wifi: iwlwifi: add iwlmld sub-driver

iwlwifi is the driver of all Intel wifi devices since 2008.
Since then, the hardware has changed a lot, but the firmware
API has changed even more. The need to keep one driver that
supports all those different APIs led us to introduce a new
architecture circa 2012 which allowed us to keep the same
interface to the hardware (DMAs, Tx queues, etc...) with a
new layer to implement the mid-layer between mac80211 and
the firmware. The first component is called the 'transport'
and the latter is called 'operation_mode' a.k.a  op_mode.

In 2013 we took advantage of the new architecture to
introduce iwlmvm which allowed us to implement the, then,
new firmware API. This op_mode supports 7260 and up, those
devices supports support at least VHT.

Since then, wifi evolved and so did the firmware. It became
much bigger and took a lot of functionality from the driver.
It became increasingly hard to keep the same op_mode for the
newest devices and we experienced frequent regressions on
older devices. In order to avoid those regressions and keep
the code maintainable, we decided it was about time to start
a new op_mode.

iwlmld is a new op_mode that supports BE200 or newer if the
firmware being used is 97.ucode or newer. If the user has
an older devices or BE200 with .96.ucode, iwlmvm will be
loaded. Of course, this op_mode selection is seamless.

All the features supported in iwlmvm are supported in
iwlmld besides a few seldom used use cases: injection and
Hotspot 2.0. Those are under work.

A few points about the implementation:
 * iwlmld doesn't have any mutexes, it relies on the
   wiphy_lock
 * iwlmld is more "resource oriented": stations, links and
   interfaces are allocated and freed only after all the
   relevant flows are completed.
 * Firmware notifications' sizes are validated in a more
   structured way.

We would love to see this new op_mode merged in 6.15. The
firmware for this new driver (.97.ucode) is not yet publicly
available but it'll be sent very soon.
People eager to get an early version of this firmware can
contact Emmanuel at:
emmanuel.grumbach@intel.com

I've listed the people who directly contributed
code, but many others from various teams have
contributed in other ways.

Co-developed-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Co-developed-by: Avraham Stern <avraham.stern@intel.com>
Signed-off-by: Avraham Stern <avraham.stern@intel.com>
Co-developed-by: Daniel Gabay <daniel.gabay@intel.com>
Signed-off-by: Daniel Gabay <daniel.gabay@intel.com>
Co-developed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Co-developed-by: Anjaneyulu <pagadala.yesu.anjaneyulu@intel.com>
Signed-off-by: Anjaneyulu <pagadala.yesu.anjaneyulu@intel.com>
Co-developed-by: Yedidya Benshimol <yedidya.ben.shimol@intel.com>
Signed-off-by: Yedidya Benshimol <yedidya.ben.shimol@intel.com>
Co-developed-by: Benjamin Berg <benjamin.berg@intel.com>
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Co-developed-by: Shaul Triebitz <shaul.triebitz@intel.com>
Signed-off-by: Shaul Triebitz <shaul.triebitz@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://lore.kernel.org/linux-wireless/20250216094321.537988-1-miriam.rachel.korenblit@intel.com/
[fix Kconfig, fix api/phy.h includes, SPDX tag and coding
 style issues, duplicated includes per 0-day robot]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Miri Korenblit 2025-02-16 11:42:40 +02:00 committed by Johannes Berg
parent 71f8992e34
commit d1e879ec60
80 changed files with 28178 additions and 11 deletions

View File

@ -81,14 +81,25 @@ config IWLMVM
of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
config IWLMLD
tristate "Intel Wireless WiFi MLD Firmware support"
select WANT_DEV_COREDUMP
depends on MAC80211
depends on PTP_1588_CLOCK_OPTIONAL
help
This is the driver that supports firmwares of MLD capable devices.
The list of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
bool
default y if IWLDVM=m
default y if IWLMVM=m
default y if IWLMLD=m
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD"
depends on IWLDVM=n && IWLMVM=n && IWLMLD=n
menu "Debugging Options"

View File

@ -12,7 +12,8 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
@ -20,6 +21,7 @@ iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_IWLMLD) += fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
@ -33,6 +35,7 @@ ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
obj-$(CONFIG_IWLMLD) += mld/
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/

View File

@ -38,6 +38,11 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
#if !IS_ENABLED(CONFIG_IWLMVM)
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
#endif
static const struct iwl_base_params iwl_bz_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,

View File

@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_phy_h__
#define __iwl_fw_api_phy_h__
#include <linux/types.h>
#include <linux/bits.h>
/**
* enum iwl_phy_ops_subcmd_ids - PHY group commands

View File

@ -653,6 +653,10 @@ extern const struct iwl_cfg iwl_cfg_ma;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
extern const struct iwl_ht_params iwl_bz_ht_params;
extern const struct iwl_ht_params iwl_bz_ht_params;
@ -664,6 +668,6 @@ extern const struct iwl_cfg iwl_cfg_sc2;
extern const struct iwl_cfg iwl_cfg_sc2f;
extern const struct iwl_cfg iwl_cfg_dr;
extern const struct iwl_cfg iwl_cfg_br;
#endif /* CONFIG_IWLMVM */
#endif /* CONFIG_IWLMLD */
#endif /* __IWL_CONFIG_H__ */

View File

@ -75,6 +75,9 @@ struct iwl_drv {
enum {
DVM_OP_MODE,
MVM_OP_MODE,
#if IS_ENABLED(CONFIG_IWLMLD)
MLD_OP_MODE,
#endif
};
/* Protects the table contents, i.e. the ops pointer & drv list */
@ -86,6 +89,9 @@ static struct iwlwifi_opmode_table {
} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
[DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
[MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
#if IS_ENABLED(CONFIG_IWLMLD)
[MLD_OP_MODE] = { .name = "iwlmld", .ops = NULL },
#endif
};
#define IWL_DEFAULT_SCAN_CHANNELS 40
@ -316,6 +322,7 @@ struct iwl_firmware_pieces {
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
u32 major;
};
static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
@ -957,19 +964,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
case IWL_UCODE_TLV_FW_VERSION: {
const __le32 *ptr = (const void *)tlv_data;
u32 major, minor;
u32 minor;
u8 local_comp;
if (tlv_len != sizeof(u32) * 3)
goto invalid_tlv_len;
major = le32_to_cpup(ptr++);
pieces->major = le32_to_cpup(ptr++);
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
"%u.%08x.%u %s", major, minor,
"%u.%08x.%u %s", pieces->major, minor,
local_comp, iwl_reduced_fw_name(drv));
break;
}
@ -1468,6 +1475,8 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
}
}
#define IWL_MLD_SUPPORTED_FW_VERSION 97
/*
* iwl_req_fw_callback - callback when firmware was loaded
*
@ -1720,6 +1729,19 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break;
}
#if IS_ENABLED(CONFIG_IWLMLD)
if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION)
op = &iwlwifi_opmode_table[MLD_OP_MODE];
else
#else
if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION) {
IWL_ERR(drv,
"IWLMLD needs to be compiled to support this firmware\n");
mutex_unlock(&iwlwifi_opmode_table_mtx);
goto out_unbind;
}
#endif
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);

View File

@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
obj-$(CONFIG_IWLMLD) += iwlmld.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
iwlmld-y += mld.o notif.o mac80211.o fw.o power.o iface.o link.o rx.o mcc.o session-protect.o phy.o
iwlmld-y += scan.o sta.o tx.o coex.o tlc.o agg.o key.o regulatory.o ap.o thermal.o roc.o stats.o
iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o
iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmld-$(CONFIG_PM_SLEEP) += d3.o
# non-upstream things
iwlmld-$(CONFIG_IWL_VENDOR_CMDS) += vendor-cmd.o
iwlmld-$(CONFIG_IWLMVM_AX_SOFTAP_TESTMODE) += ax-softap-testmode.o
subdir-ccflags-y += -I$(src)/../

View File

@ -0,0 +1,670 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "agg.h"
#include "sta.h"
#include "hcmd.h"
static void
iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct napi_struct *napi,
struct iwl_mld_baid_data *baid_data,
struct iwl_mld_reorder_buffer *reorder_buf,
u16 nssn)
{
struct iwl_mld_reorder_buf_entry *entries =
&baid_data->entries[reorder_buf->queue *
baid_data->entries_per_queue];
u16 ssn = reorder_buf->head_sn;
while (ieee80211_sn_less(ssn, nssn)) {
int index = ssn % baid_data->buf_size;
struct sk_buff_head *skb_list = &entries[index].frames;
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
/* Empty the list. Will have more than one frame for A-MSDU.
* Empty list is valid as well since nssn indicates frames were
* received.
*/
while ((skb = __skb_dequeue(skb_list))) {
iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
reorder_buf->queue,
sta);
reorder_buf->num_stored--;
}
}
reorder_buf->head_sn = nssn;
}
static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
struct napi_struct *napi,
u8 baid, u16 nssn, int queue)
{
struct iwl_mld_reorder_buffer *reorder_buf;
struct iwl_mld_baid_data *ba_data;
struct ieee80211_link_sta *link_sta;
u32 sta_id;
IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
baid, nssn);
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
return;
rcu_read_lock();
ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
if (!ba_data) {
IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
goto out_unlock;
}
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
goto out_unlock;
reorder_buf = &ba_data->reorder_buf[queue];
iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
reorder_buf, nssn);
out_unlock:
rcu_read_unlock();
}
void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
struct napi_struct *napi,
struct iwl_rx_packet *pkt, int queue)
{
struct iwl_frame_release *release = (void *)pkt->data;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
"Unexpected frame release notif size %u (expected %zu)\n",
pkt_len, sizeof(*release)))
return;
iwl_mld_release_frames_from_notif(mld, napi, release->baid,
le16_to_cpu(release->nssn),
queue);
}
void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
struct napi_struct *napi,
struct iwl_rx_packet *pkt,
int queue)
{
struct iwl_bar_frame_release *release = (void *)pkt->data;
struct iwl_mld_baid_data *baid_data;
unsigned int baid, nssn, sta_id, tid;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
"Unexpected frame release notif size %u (expected %zu)\n",
pkt_len, sizeof(*release)))
return;
baid = le32_get_bits(release->ba_info,
IWL_BAR_FRAME_RELEASE_BAID_MASK);
nssn = le32_get_bits(release->ba_info,
IWL_BAR_FRAME_RELEASE_NSSN_MASK);
sta_id = le32_get_bits(release->sta_tid,
IWL_BAR_FRAME_RELEASE_STA_MASK);
tid = le32_get_bits(release->sta_tid,
IWL_BAR_FRAME_RELEASE_TID_MASK);
if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
"BAR release: invalid BAID (%x)\n", baid))
return;
rcu_read_lock();
baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
if (!IWL_FW_CHECK(mld, !baid_data,
"Got valid BAID %d but not allocated, invalid BAR release!\n",
baid))
goto out_unlock;
if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
sta_id > mld->fw->ucode_capa.num_stations ||
!(baid_data->sta_mask & BIT(sta_id)),
"BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
baid, baid_data->sta_mask, baid_data->tid, sta_id,
tid))
goto out_unlock;
IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
nssn);
iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
out_unlock:
rcu_read_unlock();
}
void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
struct iwl_mld_delba_data *data)
{
struct iwl_mld_baid_data *ba_data;
struct iwl_mld_reorder_buffer *reorder_buf;
struct ieee80211_link_sta *link_sta;
u8 baid = data->baid;
u32 sta_id;
if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
return;
rcu_read_lock();
ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
if (WARN_ON_ONCE(!ba_data))
goto out_unlock;
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
goto out_unlock;
reorder_buf = &ba_data->reorder_buf[queue];
/* release all frames that are in the reorder buffer to the stack */
iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
ba_data->buf_size));
out_unlock:
rcu_read_unlock();
}
/* Returns true if the MPDU was buffered\dropped, false if it should be passed
* to upper layer.
*/
enum iwl_mld_reorder_result
iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
int queue, struct ieee80211_sta *sta,
struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct iwl_mld_baid_data *baid_data;
struct iwl_mld_reorder_buffer *buffer;
struct iwl_mld_reorder_buf_entry *entries;
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld_link_sta *mld_link_sta;
u32 reorder = le32_to_cpu(desc->reorder_data);
bool amsdu, last_subframe, is_old_sn, is_dup;
u8 tid = ieee80211_get_tid(hdr);
u8 baid;
u16 nssn, sn;
u32 sta_mask = 0;
int index;
u8 link_id;
baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
/* This also covers the case of receiving a Block Ack Request
* outside a BA session; we'll pass it to mac80211 and that
* then sends a delBA action frame.
* This also covers pure monitor mode, in which case we won't
* have any BA sessions.
*/
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return IWL_MLD_PASS_SKB;
/* no sta yet */
if (WARN_ONCE(!sta,
"Got valid BAID without a valid station assigned\n"))
return IWL_MLD_PASS_SKB;
/* not a data packet */
if (!ieee80211_is_data_qos(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return IWL_MLD_PASS_SKB;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return IWL_MLD_PASS_SKB;
baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
if (!baid_data) {
IWL_DEBUG_HT(mld,
"Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
baid, reorder);
return IWL_MLD_PASS_SKB;
}
for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
sta_mask |= BIT(mld_link_sta->fw_id);
/* verify the BAID is correctly mapped to the sta and tid */
if (IWL_FW_CHECK(mld,
tid != baid_data->tid ||
!(sta_mask & baid_data->sta_mask),
"BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
baid, baid_data->sta_mask, baid_data->tid,
sta_mask, tid))
return IWL_MLD_PASS_SKB;
buffer = &baid_data->reorder_buf[queue];
entries = &baid_data->entries[queue * baid_data->entries_per_queue];
is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
if (!buffer->valid && is_old_sn)
return IWL_MLD_PASS_SKB;
buffer->valid = true;
is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
/* drop any duplicated or outdated packets */
if (is_dup || is_old_sn)
return IWL_MLD_DROP_SKB;
sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
/* release immediately if allowed by nssn and no stored frames */
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
if (!amsdu || last_subframe)
buffer->head_sn = nssn;
return IWL_MLD_PASS_SKB;
}
/* release immediately if there are no stored frames, and the sn is
* equal to the head.
* This can happen due to reorder timer, where NSSN is behind head_sn.
* When we released everything, and we got the next frame in the
* sequence, according to the NSSN we can't release immediately,
* while technically there is no hole and we can move forward.
*/
if (!buffer->num_stored && sn == buffer->head_sn) {
if (!amsdu || last_subframe)
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
return IWL_MLD_PASS_SKB;
}
/* put in reorder buffer */
index = sn % baid_data->buf_size;
__skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
/* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
* reason is that NSSN advances on the first sub-frame, and may cause
* the reorder buffer to advance before all the sub-frames arrive.
*
* Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
* SN 1. NSSN for first sub frame will be 3 with the result of driver
* releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
* already ahead and it will be dropped.
* If the last sub-frame is not on this queue - we will get frame
* release notification with up to date NSSN.
*/
if (!amsdu || last_subframe)
iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
buffer, nssn);
return IWL_MLD_BUFFERED_SKB;
}
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
{
struct iwl_mld_baid_data *data =
from_timer(data, t, session_timer);
struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
struct iwl_mld_baid_data *ba_data;
struct ieee80211_link_sta *link_sta;
struct iwl_mld_sta *mld_sta;
unsigned long timeout;
unsigned int sta_id;
rcu_read_lock();
ba_data = rcu_dereference(*rcu_ptr);
if (WARN_ON(!ba_data))
goto unlock;
if (WARN_ON(!ba_data->timeout))
goto unlock;
timeout = ba_data->last_rx_timestamp +
TU_TO_JIFFIES(ba_data->timeout * 2);
if (time_is_after_jiffies(timeout)) {
mod_timer(&ba_data->session_timer, timeout);
goto unlock;
}
/* timer expired, pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
/* sta should be valid unless the following happens:
* The firmware asserts which triggers a reconfig flow, but
* the reconfig fails before we set the pointer to sta into
* the fw_id_to_link_sta pointer table. mac80211 can't stop
* A-MPDU and hence the timer continues to run. Then, the
* timer expires and sta is NULL.
*/
if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
goto unlock;
mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
ba_data->tid);
unlock:
rcu_read_unlock();
}
static int
iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
{
struct iwl_rx_baid_cfg_cmd cmd = {
.action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
.remove.sta_id_mask =
cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
.remove.tid = cpu_to_le32(tid),
};
int ret;
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(DATA_PATH_GROUP,
RX_BAID_ALLOCATION_CONFIG_CMD),
&cmd);
if (ret)
return ret;
IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
return ret;
}
static int
iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
int tid, u16 ssn, u16 buf_size)
{
struct iwl_rx_baid_cfg_cmd cmd = {
.action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
.alloc.sta_id_mask =
cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
.alloc.tid = tid,
.alloc.ssn = cpu_to_le16(ssn),
.alloc.win_size = cpu_to_le16(buf_size),
};
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
.flags = CMD_WANT_SKB,
.len[0] = sizeof(cmd),
.data[0] = &cmd,
};
struct iwl_rx_baid_cfg_resp *resp;
struct iwl_rx_packet *pkt;
u32 resp_len;
int ret, baid;
BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
ret = iwl_mld_send_cmd(mld, &hcmd);
if (ret)
return ret;
pkt = hcmd.resp_pkt;
resp_len = iwl_rx_packet_payload_len(pkt);
if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
"BAID_ALLOC_CMD: unexpected response length %d\n",
resp_len)) {
ret = -EIO;
goto out;
}
IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
resp = (void *)pkt->data;
baid = le32_to_cpu(resp->baid);
if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
"BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
ret = -EINVAL;
goto out;
}
ret = baid;
out:
iwl_free_resp(&hcmd);
return ret;
}
static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
struct iwl_mld_baid_data *data,
u16 ssn)
{
for (int i = 0; i < mld->trans->num_rx_queues; i++) {
struct iwl_mld_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mld_reorder_buf_entry *entries =
&data->entries[i * data->entries_per_queue];
reorder_buf->head_sn = ssn;
reorder_buf->queue = i;
for (int j = 0; j < data->buf_size; j++)
__skb_queue_head_init(&entries[j].frames);
}
}
static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
struct iwl_mld_baid_data *data)
{
struct iwl_mld_delba_data delba_data = {
.baid = data->baid,
};
iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
&delba_data, sizeof(delba_data));
for (int i = 0; i < mld->trans->num_rx_queues; i++) {
struct iwl_mld_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mld_reorder_buf_entry *entries =
&data->entries[i * data->entries_per_queue];
if (likely(!reorder_buf->num_stored))
continue;
/* This shouldn't happen in regular DELBA since the RX queues
* sync internal DELBA notification should trigger a release
* of all frames in the reorder buffer.
*/
WARN_ON(1);
for (int j = 0; j < data->buf_size; j++)
__skb_queue_purge(&entries[j].frames);
}
}
int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
int tid, u16 ssn, u16 buf_size, u16 timeout)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld_baid_data *baid_data = NULL;
u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
int ret, baid;
u32 sta_mask;
lockdep_assert_wiphy(mld->wiphy);
if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
IWL_DEBUG_HT(mld,
"Max num of RX BA sessions reached; blocking new session\n");
return -ENOSPC;
}
sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
if (WARN_ON(!sta_mask))
return -EINVAL;
/* sparse doesn't like the __align() so don't check */
#ifndef __CHECKER__
/* The division below will be OK if either the cache line size
* can be divided by the entry size (ALIGN will round up) or if
* the entry size can be divided by the cache line size, in which
* case the ALIGN() will do nothing.
*/
BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
#endif
/* Upward align the reorder buffer size to fill an entire cache
* line for each queue, to avoid sharing cache lines between
* different queues.
*/
reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
/* Allocate here so if allocation fails we can bail out early
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
mld->trans->num_rx_queues * reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
return -ENOMEM;
/* This division is why we need the above BUILD_BUG_ON(),
* if that doesn't hold then this will not be right.
*/
baid_data->entries_per_queue =
reorder_buf_size / sizeof(baid_data->entries[0]);
baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
if (baid < 0) {
ret = baid;
goto out_free;
}
mld->num_rx_ba_sessions++;
mld_sta->tid_to_baid[tid] = baid;
baid_data->baid = baid;
baid_data->mld = mld;
baid_data->tid = tid;
baid_data->buf_size = buf_size;
baid_data->sta_mask = sta_mask;
baid_data->timeout = timeout;
baid_data->last_rx_timestamp = jiffies;
baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
0);
if (timeout)
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
baid_data->sta_mask, tid, baid);
/* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
* supposed to happen) and we will free the session data while
* RX is being processed in parallel
*/
WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
return 0;
out_free:
kfree(baid_data);
return ret;
}
int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
int tid)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
int baid = mld_sta->tid_to_baid[tid];
struct iwl_mld_baid_data *baid_data;
int ret;
lockdep_assert_wiphy(mld->wiphy);
/* during firmware restart, do not send the command as the firmware no
* longer recognizes the session. instead, only clear the driver BA
* session data.
*/
if (!mld->fw_status.in_hw_restart) {
ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
if (ret)
return ret;
}
if (!WARN_ON(mld->num_rx_ba_sessions == 0))
mld->num_rx_ba_sessions--;
baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
if (WARN_ON(!baid_data))
return -EINVAL;
if (timer_pending(&baid_data->session_timer))
timer_shutdown_sync(&baid_data->session_timer);
iwl_mld_free_reorder_buffer(mld, baid_data);
RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
return 0;
}
int iwl_mld_update_sta_baids(struct iwl_mld *mld,
u32 old_sta_mask,
u32 new_sta_mask)
{
struct iwl_rx_baid_cfg_cmd cmd = {
.action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
.modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
.modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
};
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
int baid;
/* mac80211 will remove sessions later, but we ignore all that */
if (mld->fw_status.in_hw_restart)
return 0;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
struct iwl_mld_baid_data *data;
int ret;
data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
if (!data)
continue;
if (!(data->sta_mask & old_sta_mask))
continue;
WARN_ONCE(data->sta_mask != old_sta_mask,
"BAID data for %d corrupted - expected 0x%x found 0x%x\n",
baid, old_sta_mask, data->sta_mask);
cmd.modify.tid = cpu_to_le32(data->tid);
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
if (ret)
return ret;
data->sta_mask = new_sta_mask;
}
return 0;
}

View File

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_agg_h__
#define __iwl_agg_h__
#include "mld.h"
#include "fw/api/rx.h"
/**
* struct iwl_mld_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sequence number
* @num_stored: number of MPDUs stored in the buffer
* @queue: queue of this reorder buffer
* @valid: true if reordering is valid for this queue
*/
struct iwl_mld_reorder_buffer {
u16 head_sn;
u16 num_stored;
int queue;
bool valid;
} ____cacheline_aligned_in_smp;
/**
* struct iwl_mld_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
* @frames: list of skbs stored. a list is necessary because in an A-MSDU,
* all sub-frames share the same sequence number, so they are stored
* together in the same list.
*/
struct iwl_mld_reorder_buf_entry {
struct sk_buff_head frames;
}
#ifndef __CHECKER__
/* sparse doesn't like this construct: "bad integer constant expression" */
__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
#endif
;
/**
* struct iwl_mld_baid_data - Block Ack session data
* @rcu_head: RCU head for freeing this data
* @sta_mask: station mask for the BAID
* @tid: tid of the session
* @baid: baid of the session
* @buf_size: the reorder buffer size as set by the last ADDBA request
* @entries_per_queue: number of buffers per queue, this actually gets
* aligned up to avoid cache line sharing between queues
* @timeout: the timeout value specified in the ADDBA request.
* @last_rx_timestamp: timestamp of the last received packet (in jiffies). This
* value is updated only when the configured @timeout has passed since
* the last update to minimize cache bouncing between RX queues.
* @session_timer: timer is set to expire after 2 * @timeout (since we want
* to minimize the cache bouncing by updating @last_rx_timestamp only once
* after @timeout has passed). If no packets are received within this
* period, it informs mac80211 to initiate delBA flow, terminating the
* BA session.
* @rcu_ptr: BA data RCU protected access
* @mld: mld pointer, needed for timer context
* @reorder_buf: reorder buffer, allocated per queue
* @entries: data
*/
struct iwl_mld_baid_data {
struct rcu_head rcu_head;
u32 sta_mask;
u8 tid;
u8 baid;
u16 buf_size;
u16 entries_per_queue;
u16 timeout;
struct timer_list session_timer;
unsigned long last_rx_timestamp;
struct iwl_mld_baid_data __rcu **rcu_ptr;
struct iwl_mld *mld;
struct iwl_mld_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
struct iwl_mld_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
};
/**
* struct iwl_mld_delba_data - RX queue sync data for %IWL_MLD_RXQ_NOTIF_DEL_BA
*
* @baid: Block Ack id, used to identify the BA session to be removed
*/
struct iwl_mld_delba_data {
u32 baid;
} __packed;
/**
* enum iwl_mld_reorder_result - Possible return values for iwl_mld_reorder()
* indicating how the caller should handle the skb based on the result.
*
* @IWL_MLD_PASS_SKB: skb should be passed to upper layer.
* @IWL_MLD_BUFFERED_SKB: skb has been buffered, don't pass it to upper layer.
* @IWL_MLD_DROP_SKB: skb should be dropped and freed by the caller.
*/
enum iwl_mld_reorder_result {
IWL_MLD_PASS_SKB,
IWL_MLD_BUFFERED_SKB,
IWL_MLD_DROP_SKB
};
int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
int tid, u16 ssn, u16 buf_size, u16 timeout);
int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
int tid);
enum iwl_mld_reorder_result
iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
int queue, struct ieee80211_sta *sta,
struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc);
void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
struct napi_struct *napi,
struct iwl_rx_packet *pkt, int queue);
void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
struct napi_struct *napi,
struct iwl_rx_packet *pkt,
int queue);
void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
struct iwl_mld_delba_data *data);
int iwl_mld_update_sta_baids(struct iwl_mld *mld,
u32 old_sta_mask,
u32 new_sta_mask);
#endif /* __iwl_agg_h__ */

View File

@ -0,0 +1,344 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
*/
#include <linux/crc32.h>
#include <net/mac80211.h>
#include "ap.h"
#include "hcmd.h"
#include "tx.h"
#include "power.h"
#include "key.h"
#include "iwl-utils.h"
#include "fw/api/sta.h"
void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
u8 *beacon, u32 frame_size)
{
u32 tim_idx;
struct ieee80211_mgmt *mgmt = (void *)beacon;
/* The index is relative to frame start but we start looking at the
* variable-length part of the beacon.
*/
tim_idx = mgmt->u.beacon.variable - beacon;
/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
while ((tim_idx < (frame_size - 2)) &&
(beacon[tim_idx] != WLAN_EID_TIM))
tim_idx += beacon[tim_idx + 1] + 2;
/* If TIM field was found, set variables */
if ((tim_idx < (frame_size - 1)) && beacon[tim_idx] == WLAN_EID_TIM)
*tim_index = cpu_to_le32(tim_idx);
else
IWL_WARN(mld, "Unable to find TIM Element in beacon\n");
}
u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
struct ieee80211_tx_info *info,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
enum nl80211_band band)
{
u32 legacy = link->beacon_tx_rate.control[band].legacy;
u32 rate_idx, rate_flags = 0, fw_rate;
/* if beacon rate was configured try using it */
if (hweight32(legacy) == 1) {
u32 rate = ffs(legacy) - 1;
struct ieee80211_supported_band *sband =
mld->hw->wiphy->bands[band];
rate_idx = sband->bitrates[rate].hw_value;
} else {
rate_idx = iwl_mld_get_lowest_rate(mld, info, vif);
}
if (rate_idx <= IWL_LAST_CCK_RATE)
rate_flags = IWL_MAC_BEACON_CCK;
/* Legacy rates are indexed as follows:
* 0 - 3 for CCK and 0 - 7 for OFDM.
*/
fw_rate = (rate_idx >= IWL_FIRST_OFDM_RATE ?
rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
return fw_rate | rate_flags;
}
int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
struct sk_buff *beacon,
struct iwl_mac_beacon_cmd *cmd)
{
struct iwl_host_cmd hcmd = {
.id = BEACON_TEMPLATE_CMD,
};
hcmd.len[0] = sizeof(*cmd);
hcmd.data[0] = cmd;
hcmd.len[1] = beacon->len;
hcmd.data[1] = beacon->data;
hcmd.dataflags[1] = IWL_HCMD_DFL_DUP;
return iwl_mld_send_cmd(mld, &hcmd);
}
static int iwl_mld_fill_beacon_template_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
struct iwl_mac_beacon_cmd *cmd,
struct ieee80211_bss_conf *link)
{
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
struct ieee80211_chanctx_conf *ctx;
bool enable_fils;
u16 flags = 0;
lockdep_assert_wiphy(mld->wiphy);
if (WARN_ON(!mld_link))
return -EINVAL;
cmd->link_id = cpu_to_le32(mld_link->fw_id);
ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
if (WARN_ON(!ctx || !ctx->def.chan))
return -EINVAL;
enable_fils = cfg80211_channel_is_psc(ctx->def.chan) ||
(ctx->def.chan->band == NL80211_BAND_6GHZ &&
ctx->def.width >= NL80211_CHAN_WIDTH_80);
if (enable_fils) {
flags |= IWL_MAC_BEACON_FILS;
cmd->short_ssid = cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
vif->cfg.ssid_len));
}
cmd->byte_cnt = cpu_to_le16((u16)beacon->len);
flags |= iwl_mld_get_rate_flags(mld, info, vif, link,
ctx->def.chan->band);
cmd->flags = cpu_to_le16(flags);
if (vif->type == NL80211_IFTYPE_AP) {
iwl_mld_set_tim_idx(mld, &cmd->tim_idx,
beacon->data, beacon->len);
cmd->btwt_offset =
cpu_to_le32(iwl_find_ie_offset(beacon->data,
WLAN_EID_S1G_TWT,
beacon->len));
}
cmd->csa_offset =
cpu_to_le32(iwl_find_ie_offset(beacon->data,
WLAN_EID_CHANNEL_SWITCH,
beacon->len));
cmd->ecsa_offset =
cpu_to_le32(iwl_find_ie_offset(beacon->data,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon->len));
return 0;
}
/* The beacon template for the AP/GO/IBSS has changed and needs update */
int iwl_mld_update_beacon_template(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
struct iwl_mac_beacon_cmd cmd = {};
struct sk_buff *beacon;
int ret;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
#endif
WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC);
if (IWL_MLD_NON_TRANSMITTING_AP)
return 0;
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mld_vif->beacon_inject_active) {
IWL_DEBUG_INFO(mld,
"Can't update template, beacon injection's active\n");
return -EBUSY;
}
#endif
beacon = ieee80211_beacon_get_template(mld->hw, vif, NULL,
link_conf->link_id);
if (!beacon)
return -ENOMEM;
ret = iwl_mld_fill_beacon_template_cmd(mld, vif, beacon, &cmd,
link_conf);
if (!ret)
ret = iwl_mld_send_beacon_template_cmd(mld, beacon, &cmd);
dev_kfree_skb(beacon);
return ret;
}
void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
struct ieee80211_key_conf *key,
struct iwl_mld_vif *mld_vif)
{
struct iwl_mld_link *link;
if (WARN_ON(key->link_id < 0))
return;
link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
if (WARN_ON(!link))
return;
for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
if (link->ap_early_keys[i] != key)
continue;
/* Those weren't sent to FW, so should be marked as INVALID */
if (WARN_ON(key->hw_key_idx != STA_KEY_IDX_INVALID))
key->hw_key_idx = STA_KEY_IDX_INVALID;
link->ap_early_keys[i] = NULL;
}
}
int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
struct ieee80211_key_conf *key,
struct iwl_mld_vif *mld_vif)
{
struct iwl_mld_link *link;
if (WARN_ON(key->link_id < 0))
return -EINVAL;
link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
if (WARN_ON(!link))
return -EINVAL;
for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
if (!link->ap_early_keys[i]) {
link->ap_early_keys[i] = key;
return 0;
}
}
return -ENOSPC;
}
static int iwl_mld_send_ap_early_keys(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
int ret = 0;
if (WARN_ON(!link))
return -EINVAL;
for (int i = 0; i < ARRAY_SIZE(mld_link->ap_early_keys); i++) {
struct ieee80211_key_conf *key = mld_link->ap_early_keys[i];
if (!key)
continue;
mld_link->ap_early_keys[i] = NULL;
ret = iwl_mld_add_key(mld, vif, NULL, key);
if (ret)
break;
}
return ret;
}
int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
if (vif->type == NL80211_IFTYPE_AP)
iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
ret = iwl_mld_update_beacon_template(mld, vif, link);
if (ret)
return ret;
/* the link should be already activated when assigning chan context,
* and LINK_CONTEXT_MODIFY_EHT_PARAMS is deprecated
*/
ret = iwl_mld_change_link_in_fw(mld, link,
LINK_CONTEXT_MODIFY_ALL &
~(LINK_CONTEXT_MODIFY_ACTIVE |
LINK_CONTEXT_MODIFY_EHT_PARAMS));
if (ret)
return ret;
ret = iwl_mld_add_mcast_sta(mld, vif, link);
if (ret)
return ret;
ret = iwl_mld_add_bcast_sta(mld, vif, link);
if (ret)
goto rm_mcast;
/* Those keys were configured by the upper layers before starting the
* AP. Now that it is started and the bcast and mcast sta were added to
* the FW, we can add the keys too.
*/
ret = iwl_mld_send_ap_early_keys(mld, vif, link);
if (ret)
goto rm_bcast;
if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
iwl_mld_vif_update_low_latency(mld, vif, true,
LOW_LATENCY_VIF_TYPE);
mld_vif->ap_ibss_active = true;
if (vif->p2p && mld->p2p_device_vif)
return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
FW_CTXT_ACTION_MODIFY);
return 0;
rm_bcast:
iwl_mld_remove_bcast_sta(mld, vif, link);
rm_mcast:
iwl_mld_remove_mcast_sta(mld, vif, link);
return ret;
}
void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
mld_vif->ap_ibss_active = false;
if (vif->p2p && mld->p2p_device_vif)
iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
FW_CTXT_ACTION_MODIFY);
if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
iwl_mld_vif_update_low_latency(mld, vif, false,
LOW_LATENCY_VIF_TYPE);
iwl_mld_remove_bcast_sta(mld, vif, link);
iwl_mld_remove_mcast_sta(mld, vif, link);
}

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_ap_h__
#define __iwl_ap_h__
#include "mld.h"
#include "iface.h"
#include "fw/api/tx.h"
int iwl_mld_update_beacon_template(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
struct ieee80211_key_conf *key,
struct iwl_mld_vif *mld_vif);
void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
struct ieee80211_key_conf *key,
struct iwl_mld_vif *mld_vif);
u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
struct ieee80211_tx_info *info,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
enum nl80211_band band);
void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
u8 *beacon, u32 frame_size);
int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
struct sk_buff *beacon,
struct iwl_mac_beacon_cmd *cmd);
#endif /* __iwl_ap_h__ */

View File

@ -0,0 +1,40 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "fw/api/coex.h"
#include "coex.h"
#include "mld.h"
#include "hcmd.h"
#include "mlo.h"
int iwl_mld_send_bt_init_conf(struct iwl_mld *mld)
{
struct iwl_bt_coex_cmd cmd = {
.mode = cpu_to_le32(BT_COEX_NW),
.enabled_modules = cpu_to_le32(BT_COEX_MPLUT_ENABLED |
BT_COEX_HIGH_BAND_RET),
};
return iwl_mld_send_cmd_pdu(mld, BT_CONFIG, &cmd);
}
void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
const struct iwl_bt_coex_profile_notif zero_notif = {};
/* zeroed structure means that BT is OFF */
bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif));
if (bt_is_active == mld->bt_is_active)
return;
IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF");
mld->bt_is_active = bt_is_active;
iwl_mld_emlsr_check_bt(mld);
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_coex_h__
#define __iwl_mld_coex_h__
#include "mld.h"
int iwl_mld_send_bt_init_conf(struct iwl_mld *mld);
void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
#endif /* __iwl_mld_coex_h__ */

View File

@ -0,0 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_constants_h__
#define __iwl_mld_constants_h__
#define IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD 4
#define IWL_MLD_MISSED_BEACONS_THRESHOLD 8
#define IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG 19
#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH 15
#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
#define IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH -72
#define IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
#define IWL_MLD_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MLD_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MLD_PS_SNOOZE_INTERVAL 25
#define IWL_MLD_PS_SNOOZE_INTERVAL 25
#define IWL_MLD_PS_SNOOZE_WINDOW 50
#define IWL_MLD_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
#define IWL_MLD_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
#define IWL_MLD_PS_HEAVY_TX_THLD_PERCENT 50
#define IWL_MLD_PS_HEAVY_RX_THLD_PERCENT 50
#define IWL_MLD_PS_HEAVY_TX_THLD_PACKETS 20
#define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS 8
#define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC 30
#define IWL_MLD_SCAN_EXPIRE_TIME_SEC 20
#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ)
/* OMI reduced BW thresholds (channel load percentage) */
#define IWL_MLD_OMI_ENTER_CHAN_LOAD 10
#define IWL_MLD_OMI_EXIT_CHAN_LOAD_160 20
#define IWL_MLD_OMI_EXIT_CHAN_LOAD_320 30
/* time (in milliseconds) to let AP "settle" the OMI */
#define IWL_MLD_OMI_AP_SETTLE_DELAY 27
/* time (in milliseconds) to not enter OMI reduced BW after leaving */
#define IWL_MLD_OMI_EXIT_PROTECTION 5000
#define IWL_MLD_DIS_RANDOM_FW_ID false
#define IWL_MLD_D3_DEBUG false
#define IWL_MLD_NON_TRANSMITTING_AP false
#define IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
#define IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
#define IWL_MLD_CONN_LISTEN_INTERVAL 10
#define IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE 0
#define IWL_MLD_AUTO_EML_ENABLE true
#define IWL_MLD_HIGH_RSSI_THRESH_20MHZ -67
#define IWL_MLD_LOW_RSSI_THRESH_20MHZ -72
#define IWL_MLD_HIGH_RSSI_THRESH_40MHZ -64
#define IWL_MLD_LOW_RSSI_THRESH_40MHZ -72
#define IWL_MLD_HIGH_RSSI_THRESH_80MHZ -61
#define IWL_MLD_LOW_RSSI_THRESH_80MHZ -72
#define IWL_MLD_HIGH_RSSI_THRESH_160MHZ -58
#define IWL_MLD_LOW_RSSI_THRESH_160MHZ -72
#define IWL_MLD_ENTER_EMLSR_TPT_THRESH 400
#define IWL_MLD_CHAN_LOAD_THRESH 2 /* in percentage */
#define IWL_MLD_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MLD_FTM_INITIATOR_DYNACK true
#define IWL_MLD_FTM_LMR_FEEDBACK_TERMINATE false
#define IWL_MLD_FTM_TEST_INCORRECT_SAC false
#define IWL_MLD_FTM_R2I_MAX_REP 7
#define IWL_MLD_FTM_I2R_MAX_REP 7
#define IWL_MLD_FTM_R2I_MAX_STS 1
#define IWL_MLD_FTM_I2R_MAX_STS 1
#define IWL_MLD_FTM_R2I_MAX_TOTAL_LTF 3
#define IWL_MLD_FTM_I2R_MAX_TOTAL_LTF 3
#define IWL_MLD_FTM_RESP_NDP_SUPPORT true
#define IWL_MLD_FTM_RESP_LMR_FEEDBACK_SUPPORT true
#define IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
#define IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
#endif /* __iwl_mld_constants_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_d3_h__
#define __iwl_mld_d3_h__
#include "fw/api/d3.h"
struct iwl_mld_rekey_data {
bool valid;
u8 kck[NL80211_KCK_EXT_LEN];
u8 kek[NL80211_KEK_EXT_LEN];
size_t kck_len;
size_t kek_len;
__le64 replay_ctr;
u32 akm;
};
/**
* struct iwl_mld_wowlan_data - data used by the wowlan suspend flow
*
* @target_ipv6_addrs: IPv6 addresses on this interface for offload
* @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
* @num_target_ipv6_addrs: number of @target_ipv6_addrs
* @rekey_data: security key data used for rekeying during D3
*/
struct iwl_mld_wowlan_data {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
int num_target_ipv6_addrs;
#endif
struct iwl_mld_rekey_data rekey_data;
};
int iwl_mld_no_wowlan_resume(struct iwl_mld *mld);
int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld);
int iwl_mld_wowlan_suspend(struct iwl_mld *mld,
struct cfg80211_wowlan *wowlan);
int iwl_mld_wowlan_resume(struct iwl_mld *mld);
void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data);
#if IS_ENABLED(CONFIG_IPV6)
void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct inet6_dev *idev);
#endif
#endif /* __iwl_mld_d3_h__ */

View File

@ -0,0 +1,823 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mld.h"
#include "debugfs.h"
#include "iwl-io.h"
#include "hcmd.h"
#include "iface.h"
#include "sta.h"
#include "tlc.h"
#include "power.h"
#include "notif.h"
#include "ap.h"
#include "iwl-utils.h"
#ifdef CONFIG_THERMAL
#include "thermal.h"
#endif
#include "fw/api/rs.h"
#include "fw/api/dhc.h"
#include "fw/api/rfi.h"
#define MLD_DEBUGFS_READ_FILE_OPS(name, bufsz) \
_MLD_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_mld)
#define MLD_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
debugfs_create_file(alias, mode, parent, mld, \
&iwl_dbgfs_##name##_ops)
#define MLD_DEBUGFS_ADD_FILE(name, parent, mode) \
MLD_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
static bool iwl_mld_dbgfs_fw_cmd_disabled(struct iwl_mld *mld)
{
#ifdef CONFIG_PM_SLEEP
return !mld->fw_status.running || mld->fw_status.in_d3;
#else
return !mld->fw_status.running;
#endif /* CONFIG_PM_SLEEP */
}
static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mld *mld,
char *buf, size_t count)
{
/* If the firmware is not running, silently succeed since there is
* no data to clear.
*/
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return 0;
iwl_fw_dbg_clear_monitor_buf(&mld->fwrt);
return count;
}
static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mld *mld, char *buf,
size_t count)
{
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
IWL_ERR(mld, "Triggering an NMI from debugfs\n");
if (count == 6 && !strcmp(buf, "nolog\n"))
mld->fw_status.do_not_dump_once = true;
iwl_force_nmi(mld->trans);
return count;
}
static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
size_t count)
{
int __maybe_unused ret;
if (!iwlwifi_mod_params.fw_restart)
return -EPERM;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
if (count == 6 && !strcmp(buf, "nolog\n")) {
mld->fw_status.do_not_dump_once = true;
set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mld->trans->status);
}
/* take the return value to make compiler happy - it will
* fail anyway
*/
ret = iwl_mld_send_cmd_empty(mld, WIDE_ID(LONG_GROUP, REPLY_ERROR));
return count;
}
static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mld *mld, char *buf,
size_t count)
{
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
return iwl_mld_send_cmd_empty(mld, ECHO_CMD) ?: count;
}
struct iwl_mld_sniffer_apply {
struct iwl_mld *mld;
const u8 *bssid;
u16 aid;
};
static bool iwl_mld_sniffer_apply(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mld_sniffer_apply *apply = data;
apply->mld->monitor.cur_aid = cpu_to_le16(apply->aid);
memcpy(apply->mld->monitor.cur_bssid, apply->bssid,
sizeof(apply->mld->monitor.cur_bssid));
return true;
}
static ssize_t
iwl_dbgfs_he_sniffer_params_write(struct iwl_mld *mld, char *buf,
size_t count)
{
struct iwl_notification_wait wait;
struct iwl_he_monitor_cmd he_mon_cmd = {};
struct iwl_mld_sniffer_apply apply = {
.mld = mld,
};
u16 wait_cmds[] = {
WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
};
u32 aid;
int ret;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
if (!mld->monitor.on)
return -ENODEV;
ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
&he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
&he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
&he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
if (ret != 7)
return -EINVAL;
he_mon_cmd.aid = cpu_to_le16(aid);
apply.aid = aid;
apply.bssid = (void *)he_mon_cmd.bssid;
/* Use the notification waiter to get our function triggered
* in sequence with other RX. This ensures that frames we get
* on the RX queue _before_ the new configuration is applied
* still have mld->cur_aid pointing to the old AID, and that
* frames on the RX queue _after_ the firmware processed the
* new configuration (and sent the response, synchronously)
* get mld->cur_aid correctly set to the new AID.
*/
iwl_init_notification_wait(&mld->notif_wait, &wait,
wait_cmds, ARRAY_SIZE(wait_cmds),
iwl_mld_sniffer_apply, &apply);
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(DATA_PATH_GROUP,
HE_AIR_SNIFFER_CONFIG_CMD),
&he_mon_cmd);
/* no need to really wait, we already did anyway */
iwl_remove_notification(&mld->notif_wait, &wait);
return ret ?: count;
}
static ssize_t
iwl_dbgfs_he_sniffer_params_read(struct iwl_mld *mld, size_t count,
char *buf)
{
return scnprintf(buf, count,
"%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
le16_to_cpu(mld->monitor.cur_aid),
mld->monitor.cur_bssid[0], mld->monitor.cur_bssid[1],
mld->monitor.cur_bssid[2], mld->monitor.cur_bssid[3],
mld->monitor.cur_bssid[4], mld->monitor.cur_bssid[5]);
}
/* The size computation is as follows:
* each number needs at most 3 characters, number of rows is the size of
* the table; So, need 5 chars for the "freq: " part and each tuple afterwards
* needs 6 characters for numbers and 5 for the punctuation around. 32 bytes
* for feature support message.
*/
#define IWL_RFI_DDR_BUF_SIZE (IWL_RFI_DDR_LUT_INSTALLED_SIZE *\
(5 + IWL_RFI_DDR_LUT_ENTRY_CHANNELS_NUM *\
(6 + 5)) + 32)
#define IWL_RFI_DLVR_BUF_SIZE (IWL_RFI_DLVR_LUT_INSTALLED_SIZE *\
(5 + IWL_RFI_DLVR_LUT_ENTRY_CHANNELS_NUM *\
(6 + 5)) + 32)
#define IWL_RFI_DESENSE_BUF_SIZE IWL_RFI_DDR_BUF_SIZE
/* Extra 32 for "DDR and DLVR table" message */
#define IWL_RFI_BUF_SIZE (IWL_RFI_DDR_BUF_SIZE + IWL_RFI_DLVR_BUF_SIZE +\
IWL_RFI_DESENSE_BUF_SIZE + 32)
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_nmi, 10);
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_restart, 10);
WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(he_sniffer_params, 32);
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_dbg_clear, 10);
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(send_echo_cmd, 8);
static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct iwl_mld *mld,
size_t count, u8 *buf)
{
int err;
u32 value;
err = iwl_bios_get_dsm(&mld->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (err)
return err;
return scnprintf(buf, count, "0x%08x\n", value);
}
MLD_DEBUGFS_READ_FILE_OPS(wifi_6e_enable, 64);
static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mld *mld,
char *buf, size_t count)
{
struct iwl_op_mode *opmode = container_of((void *)mld,
struct iwl_op_mode,
op_mode_specific);
struct iwl_rx_cmd_buffer rxb = {};
struct iwl_rx_packet *pkt;
int n_bytes = count / 2;
int ret = -EINVAL;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
rxb._page = alloc_pages(GFP_KERNEL, 0);
if (!rxb._page)
return -ENOMEM;
pkt = rxb_addr(&rxb);
ret = hex2bin(page_address(rxb._page), buf, n_bytes);
if (ret)
goto out;
/* avoid invalid memory access and malformed packet */
if (n_bytes < sizeof(*pkt) ||
n_bytes != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
goto out;
local_bh_disable();
iwl_mld_rx(opmode, NULL, &rxb);
local_bh_enable();
ret = 0;
out:
iwl_free_rxb(&rxb);
return ret ?: count;
}
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(inject_packet, 512);
#ifdef CONFIG_THERMAL
static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mld *mld,
char *buf, size_t count)
{
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
CTDP_CMD_OPERATION_STOP) ? : count;
}
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(stop_ctdp, 8);
static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mld *mld,
char *buf, size_t count)
{
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
CTDP_CMD_OPERATION_START) ? : count;
}
WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(start_ctdp, 8);
#endif /* CONFIG_THERMAL */
void
iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
{
/* Add debugfs files here */
MLD_DEBUGFS_ADD_FILE(fw_nmi, debugfs_dir, 0200);
MLD_DEBUGFS_ADD_FILE(fw_restart, debugfs_dir, 0200);
MLD_DEBUGFS_ADD_FILE(wifi_6e_enable, debugfs_dir, 0400);
MLD_DEBUGFS_ADD_FILE(he_sniffer_params, debugfs_dir, 0600);
MLD_DEBUGFS_ADD_FILE(fw_dbg_clear, debugfs_dir, 0200);
MLD_DEBUGFS_ADD_FILE(send_echo_cmd, debugfs_dir, 0200);
#ifdef CONFIG_THERMAL
MLD_DEBUGFS_ADD_FILE(start_ctdp, debugfs_dir, 0200);
MLD_DEBUGFS_ADD_FILE(stop_ctdp, debugfs_dir, 0200);
#endif
MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
/* Create a symlink with mac80211. It will be removed when mac80211
* exits (before the opmode exits which removes the target.)
*/
if (!IS_ERR(debugfs_dir)) {
char buf[100];
snprintf(buf, 100, "../../%pd2", debugfs_dir->d_parent);
debugfs_create_symlink("iwlwifi", mld->wiphy->debugfsdir,
buf);
}
}
#define VIF_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
WIPHY_DEBUGFS_WRITE_FILE_OPS(vif_##name, bufsz, vif)
#define VIF_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(vif_##name, bufsz, vif) \
#define VIF_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
debugfs_create_file(alias, mode, parent, vif, \
&iwl_dbgfs_vif_##name##_ops)
#define VIF_DEBUGFS_ADD_FILE(name, parent, mode) \
VIF_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
static ssize_t iwl_dbgfs_vif_bf_params_write(struct iwl_mld *mld, char *buf,
size_t count, void *data)
{
struct ieee80211_vif *vif = data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
struct ieee80211_bss_conf *link_conf;
int val;
if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
if (sscanf(buf + 24, "%d", &val) != 1)
return -EINVAL;
} else {
return -EINVAL;
}
if (val != 0 && val != 1)
return -EINVAL;
link_conf = link_conf_dereference_protected(vif, link_id);
if (WARN_ON(!link_conf))
return -ENODEV;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
mld_vif->disable_bf = !val;
if (val)
return iwl_mld_enable_beacon_filter(mld, link_conf,
false) ?: count;
else
return iwl_mld_disable_beacon_filter(mld, vif) ?: count;
}
static ssize_t iwl_dbgfs_vif_pm_params_write(struct iwl_mld *mld,
char *buf,
size_t count, void *data)
{
struct ieee80211_vif *vif = data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int val;
if (!strncmp("use_ps_poll=", buf, 12)) {
if (sscanf(buf + 12, "%d", &val) != 1)
return -EINVAL;
} else {
return -EINVAL;
}
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
mld_vif->use_ps_poll = val;
return iwl_mld_update_mac_power(mld, vif, false) ?: count;
}
static ssize_t iwl_dbgfs_vif_low_latency_write(struct iwl_mld *mld,
char *buf, size_t count,
void *data)
{
struct ieee80211_vif *vif = data;
u8 value;
int ret;
ret = kstrtou8(buf, 0, &value);
if (ret)
return ret;
if (value > 1)
return -EINVAL;
iwl_mld_vif_update_low_latency(mld, vif, value, LOW_LATENCY_DEBUGFS);
return count;
}
static ssize_t iwl_dbgfs_vif_low_latency_read(struct ieee80211_vif *vif,
size_t count, char *buf)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
char format[] = "traffic=%d\ndbgfs=%d\nvif_type=%d\nactual=%d\n";
u8 ll_causes;
if (WARN_ON(count < sizeof(format)))
return -EINVAL;
ll_causes = READ_ONCE(mld_vif->low_latency_causes);
/* all values in format are boolean so the size of format is enough
* for holding the result string
*/
return scnprintf(buf, count, format,
!!(ll_causes & LOW_LATENCY_TRAFFIC),
!!(ll_causes & LOW_LATENCY_DEBUGFS),
!!(ll_causes & LOW_LATENCY_VIF_TYPE),
!!(ll_causes));
}
VIF_DEBUGFS_WRITE_FILE_OPS(pm_params, 32);
VIF_DEBUGFS_WRITE_FILE_OPS(bf_params, 32);
VIF_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 45);
static int
_iwl_dbgfs_inject_beacon_ie(struct iwl_mld *mld, struct ieee80211_vif *vif,
char *bin, ssize_t len,
bool restore)
{
struct iwl_mld_vif *mld_vif;
struct iwl_mld_link *mld_link;
struct iwl_mac_beacon_cmd beacon_cmd = {};
int n_bytes = len / 2;
/* Element len should be represented by u8 */
if (n_bytes >= U8_MAX)
return -EINVAL;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
if (!vif)
return -EINVAL;
mld_vif = iwl_mld_vif_from_mac80211(vif);
mld_vif->beacon_inject_active = true;
mld->hw->extra_beacon_tailroom = n_bytes;
for_each_mld_vif_valid_link(mld_vif, mld_link) {
u32 offset;
struct ieee80211_tx_info *info;
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, link_id);
struct ieee80211_chanctx_conf *ctx =
wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
struct sk_buff *beacon =
ieee80211_beacon_get_template(mld->hw, vif,
NULL, link_id);
if (!beacon)
return -EINVAL;
if (!restore && (WARN_ON(!n_bytes || !bin) ||
hex2bin(skb_put_zero(beacon, n_bytes),
bin, n_bytes))) {
dev_kfree_skb(beacon);
return -EINVAL;
}
info = IEEE80211_SKB_CB(beacon);
beacon_cmd.flags =
cpu_to_le16(iwl_mld_get_rate_flags(mld, info, vif,
link_conf,
ctx->def.chan->band));
beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
beacon_cmd.link_id =
cpu_to_le32(mld_link->fw_id);
iwl_mld_set_tim_idx(mld, &beacon_cmd.tim_idx,
beacon->data, beacon->len);
offset = iwl_find_ie_offset(beacon->data,
WLAN_EID_S1G_TWT,
beacon->len);
beacon_cmd.btwt_offset = cpu_to_le32(offset);
iwl_mld_send_beacon_template_cmd(mld, beacon, &beacon_cmd);
dev_kfree_skb(beacon);
}
if (restore)
mld_vif->beacon_inject_active = false;
return 0;
}
static ssize_t
iwl_dbgfs_vif_inject_beacon_ie_write(struct iwl_mld *mld,
char *buf, size_t count,
void *data)
{
struct ieee80211_vif *vif = data;
int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, buf,
count, false);
mld->hw->extra_beacon_tailroom = 0;
return ret ?: count;
}
VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512);
static ssize_t
iwl_dbgfs_vif_inject_beacon_ie_restore_write(struct iwl_mld *mld,
char *buf,
size_t count,
void *data)
{
struct ieee80211_vif *vif = data;
int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, NULL,
0, true);
mld->hw->extra_beacon_tailroom = 0;
return ret ?: count;
}
VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
static ssize_t
iwl_dbgfs_vif_twt_setup_write(struct iwl_mld *mld, char *buf, size_t count,
void *data)
{
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, DEBUG_HOST_COMMAND),
};
struct ieee80211_vif *vif = data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_dhc_twt_operation *dhc_twt_cmd;
struct iwl_dhc_cmd *cmd __free(kfree);
u64 target_wake_time;
u32 twt_operation, interval_exp, interval_mantissa, min_wake_duration;
u8 trigger, flow_type, flow_id, protection, tenth_param;
u8 twt_request = 1, broadcast = 0;
int ret;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu",
&twt_operation, &target_wake_time, &interval_exp,
&interval_mantissa, &min_wake_duration, &trigger,
&flow_type, &flow_id, &protection, &tenth_param);
/* the new twt_request parameter is optional for station */
if ((ret != 9 && ret != 10) ||
(ret == 10 && vif->type != NL80211_IFTYPE_STATION &&
tenth_param == 1))
return -EINVAL;
/* The 10th parameter:
* In STA mode - the TWT type (broadcast or individual)
* In AP mode - the role (0 responder, 2 unsolicited)
*/
if (ret == 10) {
if (vif->type == NL80211_IFTYPE_STATION)
broadcast = tenth_param;
else
twt_request = tenth_param;
}
cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
dhc_twt_cmd = (void *)cmd->data;
dhc_twt_cmd->mac_id = cpu_to_le32(mld_vif->fw_id);
dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation);
dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time);
dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp);
dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa);
dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration);
dhc_twt_cmd->trigger = trigger;
dhc_twt_cmd->flow_type = flow_type;
dhc_twt_cmd->flow_id = flow_id;
dhc_twt_cmd->protection = protection;
dhc_twt_cmd->twt_request = twt_request;
dhc_twt_cmd->negotiation_type = broadcast ? 3 : 0;
cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2);
cmd->index_and_mask =
cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
DHC_INT_UMAC_TWT_OPERATION);
hcmd.len[0] = sizeof(*cmd) + sizeof(*dhc_twt_cmd);
hcmd.data[0] = cmd;
ret = iwl_mld_send_cmd(mld, &hcmd);
return ret ?: count;
}
VIF_DEBUGFS_WRITE_FILE_OPS(twt_setup, 256);
static ssize_t
iwl_dbgfs_vif_twt_operation_write(struct iwl_mld *mld, char *buf, size_t count,
void *data)
{
struct ieee80211_vif *vif = data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_twt_operation_cmd twt_cmd = {};
int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
struct iwl_mld_link *mld_link = iwl_mld_link_dereference_check(mld_vif,
link_id);
int ret;
if (WARN_ON(!mld_link))
return -ENODEV;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
if (hweight16(vif->active_links) > 1)
return -EOPNOTSUPP;
ret = sscanf(buf,
"%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu",
&twt_cmd.twt_operation, &twt_cmd.target_wake_time,
&twt_cmd.interval_exponent, &twt_cmd.interval_mantissa,
&twt_cmd.minimum_wake_duration, &twt_cmd.trigger,
&twt_cmd.flow_type, &twt_cmd.flow_id,
&twt_cmd.twt_protection, &twt_cmd.ndp_paging_indicator,
&twt_cmd.responder_pm_mode, &twt_cmd.negotiation_type,
&twt_cmd.twt_request, &twt_cmd.implicit,
&twt_cmd.twt_group_assignment, &twt_cmd.twt_channel,
&twt_cmd.restricted_info_present, &twt_cmd.dl_bitmap_valid,
&twt_cmd.ul_bitmap_valid, &twt_cmd.dl_tid_bitmap,
&twt_cmd.ul_tid_bitmap);
if (ret != 21)
return -EINVAL;
twt_cmd.link_id = cpu_to_le32(mld_link->fw_id);
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(MAC_CONF_GROUP, TWT_OPERATION_CMD),
&twt_cmd);
return ret ?: count;
}
VIF_DEBUGFS_WRITE_FILE_OPS(twt_operation, 256);
void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct dentry *mld_vif_dbgfs =
debugfs_create_dir("iwlmld", vif->debugfs_dir);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
char target[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
(7 + IFNAMSIZ + 1) + 6 + 1];
char name[7 + IFNAMSIZ + 1];
/* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under
* /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmld/
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmld/
*/
snprintf(name, sizeof(name), "%pd", vif->debugfs_dir);
snprintf(target, sizeof(target), "../../../%pd3/iwlmld",
vif->debugfs_dir);
mld_vif->dbgfs_slink =
debugfs_create_symlink(name, mld->debugfs_dir, target);
if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
vif->type == NL80211_IFTYPE_STATION) {
VIF_DEBUGFS_ADD_FILE(pm_params, mld_vif_dbgfs, 0200);
VIF_DEBUGFS_ADD_FILE(bf_params, mld_vif_dbgfs, 0200);
}
if (vif->type == NL80211_IFTYPE_AP) {
VIF_DEBUGFS_ADD_FILE(inject_beacon_ie, mld_vif_dbgfs, 0200);
VIF_DEBUGFS_ADD_FILE(inject_beacon_ie_restore,
mld_vif_dbgfs, 0200);
}
VIF_DEBUGFS_ADD_FILE(low_latency, mld_vif_dbgfs, 0600);
VIF_DEBUGFS_ADD_FILE(twt_setup, mld_vif_dbgfs, 0200);
VIF_DEBUGFS_ADD_FILE(twt_operation, mld_vif_dbgfs, 0200);
}
#define LINK_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
WIPHY_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, bss_conf)
#define LINK_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
debugfs_create_file(alias, mode, parent, link_conf, \
&iwl_dbgfs_link_##name##_ops)
#define LINK_DEBUGFS_ADD_FILE(name, parent, mode) \
LINK_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct dentry *dir)
{
struct dentry *mld_link_dir;
mld_link_dir = debugfs_lookup("iwlmld", dir);
/* For non-MLO vifs, the dir of deflink is the same as the vif's one.
* so if iwlmld dir already exists, this means that this is deflink.
* If not, this is a per-link dir of a MLO vif, add in it the iwlmld
* dir.
*/
if (!mld_link_dir)
mld_link_dir = debugfs_create_dir("iwlmld", dir);
}
static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
size_t count, void *data)
{
struct ieee80211_link_sta *link_sta = data;
struct iwl_mld_link_sta *mld_link_sta;
u32 rate;
u32 partial = false;
char pretty_rate[100];
int ret;
u8 fw_sta_id;
mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
if (WARN_ON(!mld_link_sta))
return -EINVAL;
fw_sta_id = mld_link_sta->fw_id;
if (sscanf(buf, "%i %i", &rate, &partial) == 0)
return -EINVAL;
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id,
partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE :
IWL_TLC_DEBUG_FIXED_RATE,
rate);
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), rate);
IWL_DEBUG_RATE(mld, "sta_id %d rate %s partial: %d, ret:%d\n",
fw_sta_id, pretty_rate, partial, ret);
return ret ? : count;
}
static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
size_t count, void *data)
{
struct ieee80211_link_sta *link_sta = data;
struct iwl_mld_link_sta *mld_link_sta;
u32 type, value;
int ret;
u8 fw_sta_id;
mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
if (WARN_ON(!mld_link_sta))
return -EINVAL;
fw_sta_id = mld_link_sta->fw_id;
if (sscanf(buf, "%i %i", &type, &value) != 2) {
IWL_DEBUG_RATE(mld, "usage <type> <value>\n");
return -EINVAL;
}
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, type, value);
return ret ? : count;
}
#define LINK_STA_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
debugfs_create_file(alias, mode, parent, link_sta, \
&iwl_dbgfs_##name##_ops)
#define LINK_STA_DEBUGFS_ADD_FILE(name, parent, mode) \
LINK_STA_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
#define LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(name, bufsz) \
WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, link_sta)
LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64);
LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64);
void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
struct dentry *dir)
{
LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200);
LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200);
}

View File

@ -0,0 +1,200 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#include "iface.h"
#include "sta.h"
#define MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
struct dbgfs_##name##_data { \
argtype *arg; \
bool read_done; \
ssize_t rlen; \
char buf[buflen]; \
}; \
static int _iwl_dbgfs_##name##_open(struct inode *inode, \
struct file *file) \
{ \
struct dbgfs_##name##_data *data; \
\
if ((file->f_flags & O_ACCMODE) == O_RDWR) \
return -EOPNOTSUPP; \
\
data = kzalloc(sizeof(*data), GFP_KERNEL); \
if (!data) \
return -ENOMEM; \
\
data->read_done = false; \
data->arg = inode->i_private; \
file->private_data = data; \
\
return 0; \
}
#define MLD_DEBUGFS_READ_WRAPPER(name) \
static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \
char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct dbgfs_##name##_data *data = file->private_data; \
\
if (!data->read_done) { \
data->read_done = true; \
data->rlen = iwl_dbgfs_##name##_read(data->arg, \
sizeof(data->buf),\
data->buf); \
} \
\
if (data->rlen < 0) \
return data->rlen; \
return simple_read_from_buffer(user_buf, count, ppos, \
data->buf, data->rlen); \
}
static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
#define _MLD_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \
MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
MLD_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = _iwl_dbgfs_##name##_read, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}
#define WIPHY_DEBUGFS_HANDLER_WRAPPER(name) \
static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \
struct file *file, char *buf, \
size_t count, void *data) \
{ \
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
return iwl_dbgfs_##name##_write(mld, buf, count, data); \
}
static inline struct iwl_mld *
iwl_mld_from_link_sta(struct ieee80211_link_sta *link_sta)
{
struct ieee80211_vif *vif =
iwl_mld_sta_from_mac80211(link_sta->sta)->vif;
return iwl_mld_vif_from_mac80211(vif)->mld;
}
static inline struct iwl_mld *
iwl_mld_from_bss_conf(struct ieee80211_bss_conf *link)
{
return iwl_mld_vif_from_mac80211(link->vif)->mld;
}
static inline struct iwl_mld *iwl_mld_from_vif(struct ieee80211_vif *vif)
{
return iwl_mld_vif_from_mac80211(vif)->mld;
}
#define WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \
WIPHY_DEBUGFS_HANDLER_WRAPPER(name) \
static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct ieee80211_##objtype *arg = file->private_data; \
struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \
char buf[bufsz] = {}; \
\
return wiphy_locked_debugfs_write(mld->wiphy, file, \
buf, sizeof(buf), \
user_buf, count, \
iwl_dbgfs_##name##_write_handler, \
arg); \
}
#define WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, objtype) \
WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = __iwl_dbgfs_##name##_write, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define WIPHY_DEBUGFS_HANDLER_WRAPPER_MLD(name) \
static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \
struct file *file, char *buf, \
size_t count, void *data) \
{ \
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
return iwl_dbgfs_##name##_write(mld, buf, count); \
}
#define WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
WIPHY_DEBUGFS_HANDLER_WRAPPER_MLD(name) \
static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct dbgfs_##name##_data *data = file->private_data; \
struct iwl_mld *mld = data->arg; \
\
return wiphy_locked_debugfs_write(mld->wiphy, file, \
data->buf, sizeof(data->buf), \
user_buf, count, \
iwl_dbgfs_##name##_write_handler, \
NULL); \
}
#define WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(name, bufsz) \
MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = __iwl_dbgfs_##name##_write, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}
#define WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(name, bufsz) \
MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
MLD_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = __iwl_dbgfs_##name##_write, \
.read = _iwl_dbgfs_##name##_read, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}
#define WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \
WIPHY_DEBUGFS_HANDLER_WRAPPER(name) \
static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct dbgfs_##name##_data *data = file->private_data; \
struct ieee80211_##objtype *arg = data->arg; \
struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \
char buf[bufsz] = {}; \
\
return wiphy_locked_debugfs_write(mld->wiphy, file, \
buf, sizeof(buf), \
user_buf, count, \
iwl_dbgfs_##name##_write_handler, \
arg); \
}
#define IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, objtype) \
MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct ieee80211_##objtype) \
WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \
MLD_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
.read = _iwl_dbgfs_##name##_read, \
.open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
.release = _iwl_dbgfs_release, \
}

View File

@ -0,0 +1,451 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2025 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
#include <net/cfg80211.h>
#include "mld.h"
#include "iface.h"
#include "phy.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "constants.h"
#include "fw/api/location.h"
#include "ftm-initiator.h"
static void iwl_mld_ftm_cmd_common(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_tof_range_req_cmd *cmd,
struct cfg80211_pmsr_request *req)
{
int i;
cmd->initiator_flags =
cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
cmd->request_id = req->cookie;
cmd->num_of_ap = req->n_peers;
/* Use a large value for "no timeout". Don't use the maximum value
* because of fw limitations.
*/
if (req->timeout)
cmd->req_timeout_ms = cpu_to_le32(min(req->timeout, 0xfffff));
else
cmd->req_timeout_ms = cpu_to_le32(0xfffff);
memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
if (vif->cfg.assoc) {
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
/* AP's TSF is only relevant if associated */
for (i = 0; i < req->n_peers; i++) {
if (req->peers[i].report_ap_tsf) {
struct iwl_mld_vif *mld_vif =
iwl_mld_vif_from_mac80211(vif);
cmd->tsf_mac_id = cpu_to_le32(mld_vif->fw_id);
return;
}
}
} else {
eth_broadcast_addr(cmd->range_req_bssid);
}
/* Don't report AP's TSF */
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
static int
iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
{
u32 freq = peer->chandef.chan->center_freq;
target->channel_num = ieee80211_frequency_to_channel(freq);
switch (peer->chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
target->format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_20:
target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_40:
target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
target->format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_80:
target->format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
target->format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_160:
target->format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
target->format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
break;
default:
IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n",
peer->chandef.width);
return -EINVAL;
}
/* non EDCA based measurement must use HE preamble */
if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
target->format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
target->ctrl_ch_position =
(peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
iwl_mld_get_fw_ctrl_pos(&peer->chandef) : 0;
target->band = iwl_mld_nl80211_band_to_fw(peer->chandef.chan->band);
return 0;
}
#define FTM_SET_FLAG(flag) (target->initiator_ap_flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
static void
iwl_mld_ftm_set_target_flags(struct iwl_mld *mld,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
{
target->initiator_ap_flags = cpu_to_le32(0);
if (peer->ftm.asap)
FTM_SET_FLAG(ASAP);
if (peer->ftm.request_lci)
FTM_SET_FLAG(LCI_REQUEST);
if (peer->ftm.request_civicloc)
FTM_SET_FLAG(CIVIC_REQUEST);
if (IWL_MLD_FTM_INITIATOR_DYNACK)
FTM_SET_FLAG(DYN_ACK);
if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
FTM_SET_FLAG(ALGO_LR);
else if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_SET_FLAG(ALGO_FFT);
if (peer->ftm.trigger_based)
FTM_SET_FLAG(TB);
else if (peer->ftm.non_trigger_based)
FTM_SET_FLAG(NON_TB);
if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
peer->ftm.lmr_feedback)
FTM_SET_FLAG(LMR_FEEDBACK);
}
static void iwl_mld_ftm_set_sta(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
u32 sta_id_mask;
target->sta_id = IWL_INVALID_STA;
/* TODO: add ftm_unprotected debugfs support */
if (!vif->cfg.assoc || !mld_vif->ap_sta)
return;
sta_id_mask = iwl_mld_fw_sta_id_mask(mld, mld_vif->ap_sta);
if (WARN_ON(hweight32(sta_id_mask) != 1))
return;
target->sta_id = __ffs(sta_id_mask);
if (mld_vif->ap_sta->mfp &&
(peer->ftm.trigger_based || peer->ftm.non_trigger_based))
FTM_SET_FLAG(PMF);
}
static int
iwl_mld_ftm_set_target(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
{
u32 i2r_max_sts;
int ret;
ret = iwl_mld_ftm_set_target_chandef(mld, peer, target);
if (ret)
return ret;
memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period = cpu_to_le16(peer->ftm.burst_period);
target->samples_per_burst = peer->ftm.ftms_per_burst;
target->num_of_bursts = peer->ftm.num_bursts_exp;
iwl_mld_ftm_set_target_flags(mld, peer, target);
iwl_mld_ftm_set_sta(mld, vif, peer, target);
/* TODO: add secured ranging support */
i2r_max_sts = IWL_MLD_FTM_I2R_MAX_STS > 1 ? 1 :
IWL_MLD_FTM_I2R_MAX_STS;
target->r2i_ndp_params = IWL_MLD_FTM_R2I_MAX_REP |
(IWL_MLD_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
(IWL_MLD_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
target->i2r_ndp_params = IWL_MLD_FTM_I2R_MAX_REP |
(i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
(IWL_MLD_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
if (peer->ftm.non_trigger_based) {
target->min_time_between_msr =
cpu_to_le16(IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
target->burst_period =
cpu_to_le16(IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
} else {
target->min_time_between_msr = cpu_to_le16(0);
}
/* TODO: Beacon interval is currently unknown, so use the common value
* of 100 TUs.
*/
target->beacon_interval = cpu_to_le16(100);
return 0;
}
int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd cmd;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
u8 i;
int ret;
lockdep_assert_wiphy(mld->wiphy);
if (mld->ftm_initiator.req)
return -EBUSY;
if (req->n_peers > ARRAY_SIZE(cmd.ap))
return -EINVAL;
memset(&cmd, 0, sizeof(cmd));
iwl_mld_ftm_cmd_common(mld, vif, (void *)&cmd, req);
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
ret = iwl_mld_ftm_set_target(mld, vif, peer, target);
if (ret)
return ret;
}
/* TODO: get the status from the response*/
ret = iwl_mld_send_cmd(mld, &hcmd);
if (!ret) {
mld->ftm_initiator.req = req;
mld->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
}
return ret;
}
static void iwl_mld_ftm_reset(struct iwl_mld *mld)
{
lockdep_assert_wiphy(mld->wiphy);
mld->ftm_initiator.req = NULL;
mld->ftm_initiator.req_wdev = NULL;
memset(mld->ftm_initiator.responses, 0,
sizeof(mld->ftm_initiator.responses));
}
static int iwl_mld_ftm_range_resp_valid(struct iwl_mld *mld, u8 request_id,
u8 num_of_aps)
{
if (IWL_FW_CHECK(mld, request_id != (u8)mld->ftm_initiator.req->cookie,
"Request ID mismatch, got %u, active %u\n",
request_id, (u8)mld->ftm_initiator.req->cookie))
return -EINVAL;
if (IWL_FW_CHECK(mld, num_of_aps > mld->ftm_initiator.req->n_peers ||
num_of_aps > IWL_TOF_MAX_APS,
"FTM range response: invalid num of APs (%u)\n",
num_of_aps))
return -EINVAL;
return 0;
}
static int iwl_mld_ftm_find_peer(struct cfg80211_pmsr_request *req,
const u8 *addr)
{
for (int i = 0; i < req->n_peers; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
if (ether_addr_equal_unaligned(peer->addr, addr))
return i;
}
return -ENOENT;
}
static void iwl_mld_debug_range_resp(struct iwl_mld *mld, u8 index,
struct cfg80211_pmsr_result *res)
{
s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
IWL_DEBUG_INFO(mld, "entry %d\n", index);
IWL_DEBUG_INFO(mld, "\tstatus: %d\n", res->status);
IWL_DEBUG_INFO(mld, "\tBSSID: %pM\n", res->addr);
IWL_DEBUG_INFO(mld, "\thost time: %llu\n", res->host_time);
IWL_DEBUG_INFO(mld, "\tburst index: %d\n", res->ftm.burst_index);
IWL_DEBUG_INFO(mld, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
IWL_DEBUG_INFO(mld, "\trssi: %d\n", res->ftm.rssi_avg);
IWL_DEBUG_INFO(mld, "\trssi spread: %d\n", res->ftm.rssi_spread);
IWL_DEBUG_INFO(mld, "\trtt: %lld\n", res->ftm.rtt_avg);
IWL_DEBUG_INFO(mld, "\trtt var: %llu\n", res->ftm.rtt_variance);
IWL_DEBUG_INFO(mld, "\trtt spread: %llu\n", res->ftm.rtt_spread);
IWL_DEBUG_INFO(mld, "\tdistance: %lld\n", rtt_avg);
}
void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
u8 num_of_aps, last_in_batch;
if (IWL_FW_CHECK(mld, !mld->ftm_initiator.req,
"FTM response without a pending request\n"))
return;
if (iwl_mld_ftm_range_resp_valid(mld, fw_resp->request_id,
fw_resp->num_of_aps))
return;
num_of_aps = fw_resp->num_of_aps;
last_in_batch = fw_resp->last_report;
IWL_DEBUG_INFO(mld, "Range response received\n");
IWL_DEBUG_INFO(mld, "request id: %llu, num of entries: %u\n",
mld->ftm_initiator.req->cookie, num_of_aps);
for (int i = 0; i < num_of_aps; i++) {
struct cfg80211_pmsr_result result = {};
struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
int peer_idx;
fw_ap = &fw_resp->ap[i];
result.final = fw_ap->last_burst;
result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
result.ap_tsf_valid = 1;
peer_idx = iwl_mld_ftm_find_peer(mld->ftm_initiator.req,
fw_ap->bssid);
if (peer_idx < 0) {
IWL_WARN(mld,
"Unknown address (%pM, target #%d) in FTM response\n",
fw_ap->bssid, i);
continue;
}
switch (fw_ap->measure_status) {
case IWL_TOF_ENTRY_SUCCESS:
result.status = NL80211_PMSR_STATUS_SUCCESS;
break;
case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
result.status = NL80211_PMSR_STATUS_TIMEOUT;
break;
case IWL_TOF_ENTRY_NO_RESPONSE:
result.status = NL80211_PMSR_STATUS_FAILURE;
result.ftm.failure_reason =
NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
break;
case IWL_TOF_ENTRY_REQUEST_REJECTED:
result.status = NL80211_PMSR_STATUS_FAILURE;
result.ftm.failure_reason =
NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
result.ftm.busy_retry_time = fw_ap->refusal_period;
break;
default:
result.status = NL80211_PMSR_STATUS_FAILURE;
result.ftm.failure_reason =
NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
break;
}
memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
/* TODO: convert the timestamp from the result to systime */
result.host_time = ktime_get_boottime_ns();
result.type = NL80211_PMSR_TYPE_FTM;
result.ftm.burst_index = mld->ftm_initiator.responses[peer_idx];
mld->ftm_initiator.responses[peer_idx]++;
result.ftm.rssi_avg = fw_ap->rssi;
result.ftm.rssi_avg_valid = 1;
result.ftm.rssi_spread = fw_ap->rssi_spread;
result.ftm.rssi_spread_valid = 1;
result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
result.ftm.rtt_avg_valid = 1;
result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
result.ftm.rtt_variance_valid = 1;
result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
result.ftm.rtt_spread_valid = 1;
cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
mld->ftm_initiator.req,
&result, GFP_KERNEL);
if (fw_has_api(&mld->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
IWL_DEBUG_INFO(mld, "RTT confidence: %u\n",
fw_ap->rttConfidence);
iwl_mld_debug_range_resp(mld, i, &result);
}
if (last_in_batch) {
cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
mld->ftm_initiator.req,
GFP_KERNEL);
iwl_mld_ftm_reset(mld);
}
}
void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld)
{
struct cfg80211_pmsr_result result = {
.status = NL80211_PMSR_STATUS_FAILURE,
.final = 1,
.host_time = ktime_get_boottime_ns(),
.type = NL80211_PMSR_TYPE_FTM,
};
if (!mld->ftm_initiator.req)
return;
for (int i = 0; i < mld->ftm_initiator.req->n_peers; i++) {
memcpy(result.addr, mld->ftm_initiator.req->peers[i].addr,
ETH_ALEN);
cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
mld->ftm_initiator.req,
&result, GFP_KERNEL);
}
cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
mld->ftm_initiator.req, GFP_KERNEL);
iwl_mld_ftm_reset(mld);
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2025 Intel Corporation
*/
#ifndef __iwl_mld_ftm_initiator_h__
#define __iwl_mld_ftm_initiator_h__
int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req);
void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld);
#endif /* __iwl_mld_ftm_initiator_h__ */

View File

@ -0,0 +1,536 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mld.h"
#include "fw/api/alive.h"
#include "fw/api/scan.h"
#include "fw/api/rx.h"
#include "fw/dbg.h"
#include "fw/pnvm.h"
#include "hcmd.h"
#include "iwl-nvm-parse.h"
#include "power.h"
#include "mcc.h"
#include "led.h"
#include "coex.h"
#include "regulatory.h"
#include "thermal.h"
static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld)
{
struct iwl_tx_ant_cfg_cmd cmd;
lockdep_assert_wiphy(mld->wiphy);
cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld));
IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid);
return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd);
}
static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
{
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
};
lockdep_assert_wiphy(mld->wiphy);
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
1 + (i % (mld->trans->num_rx_queues - 1));
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
}
static int iwl_mld_config_scan(struct iwl_mld *mld)
{
struct iwl_scan_config cmd = {
.tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)),
.rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld))
};
return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD),
&cmd);
}
static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
const struct iwl_imr_alive_info *imr_info)
{
struct iwl_imr_data *imr_data = &trans->dbg.imr_data;
imr_data->imr_enable = le32_to_cpu(imr_info->enabled);
imr_data->imr_size = le32_to_cpu(imr_info->size);
imr_data->imr2sram_remainbyte = imr_data->imr_size;
imr_data->imr_base_addr = imr_info->base_addr;
imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr);
if (imr_data->imr_enable)
return;
for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
struct iwl_fw_ini_region_tlv *reg;
if (!trans->dbg.active_regions[i])
continue;
reg = (void *)trans->dbg.active_regions[i]->data;
/* We have only one DRAM IMR region, so we
* can break as soon as we find the first
* one.
*/
if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
trans->dbg.unsupported_region_msk |= BIT(i);
break;
}
}
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_mld *mld =
container_of(notif_wait, struct iwl_mld, notif_wait);
struct iwl_trans *trans = mld->trans;
u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
struct iwl_alive_ntf_v6 *palive;
bool *alive_valid = data;
struct iwl_umac_alive *umac;
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u32 lmac_error_event_table;
u32 umac_error_table;
u16 status;
if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
return false;
palive = (void *)pkt->data;
iwl_mld_alive_imr_data(trans, &palive->imr);
umac = &palive->umac_data;
lmac1 = &palive->lmac_data[0];
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
lmac_error_event_table =
le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table);
if (lmac2)
trans->dbg.lmac_error_event_table[1] =
le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
~FW_ADDR_CACHE_CONTROL;
if (umac_error_table >= trans->cfg->min_umac_error_event_table)
iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
else
IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
umac_error_table);
*alive_valid = status == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mld,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
status, lmac1->ver_type, lmac1->ver_subtype);
if (lmac2)
IWL_DEBUG_FW(mld, "Alive ucode CDB\n");
IWL_DEBUG_FW(mld,
"UMAC version: Major - 0x%x, Minor - 0x%x\n",
le32_to_cpu(umac->umac_major),
le32_to_cpu(umac->umac_minor));
if (version >= 7)
IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
le16_to_cpu(palive->flags));
iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
return true;
}
#define MLD_ALIVE_TIMEOUT (2 * HZ)
#define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ)
static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
{
struct iwl_trans *trans = mld->trans;
struct iwl_pc_data *pc_data;
u8 count;
IWL_ERR(mld,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
#define IWL_FW_PRINT_REG_INFO(reg_name) \
IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
/* print OTP info */
IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
#undef IWL_FW_PRINT_REG_INFO
pc_data = trans->dbg.pc_data;
for (count = 0; count < trans->dbg.num_pc; count++, pc_data++)
IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name,
pc_data->pc_address);
}
static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
{
const struct fw_img *fw =
iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
struct iwl_notification_wait alive_wait;
bool alive_valid = false;
int ret;
lockdep_assert_wiphy(mld->wiphy);
iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_valid);
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
ret = iwl_trans_start_fw(mld->trans, fw, true);
if (ret) {
iwl_remove_notification(&mld->notif_wait, &alive_wait);
return ret;
}
ret = iwl_wait_notification(&mld->notif_wait, &alive_wait,
MLD_ALIVE_TIMEOUT);
if (ret) {
if (ret == -ETIMEDOUT)
iwl_fw_dbg_error_collect(&mld->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
iwl_mld_print_alive_notif_timeout(mld);
goto alive_failure;
}
if (!alive_valid) {
IWL_ERR(mld, "Loaded firmware is not valid!\n");
ret = -EIO;
goto alive_failure;
}
iwl_trans_fw_alive(mld->trans, 0);
return 0;
alive_failure:
iwl_trans_stop_device(mld->trans);
return ret;
}
int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
{
struct iwl_notification_wait init_wait;
struct iwl_init_extended_cfg_cmd init_cfg = {};
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_load_fw_wait_alive(mld);
if (ret)
return ret;
mld->trans->step_urm =
!!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
CNVI_PMU_STEP_FLOW_FORCE_URM);
ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
&mld->fw->ucode_capa);
if (ret) {
IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
goto init_failure;
}
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
iwl_init_notification_wait(&mld->notif_wait,
&init_wait,
init_complete,
ARRAY_SIZE(init_complete),
NULL, NULL);
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD),
&init_cfg);
if (ret) {
IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
iwl_remove_notification(&mld->notif_wait, &init_wait);
goto init_failure;
}
ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
MLD_INIT_COMPLETE_TIMEOUT);
if (ret) {
IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
goto init_failure;
}
if (!mld->nvm_data) {
mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
if (IS_ERR(mld->nvm_data)) {
ret = PTR_ERR(mld->nvm_data);
mld->nvm_data = NULL;
IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
goto init_failure;
}
}
return 0;
init_failure:
iwl_trans_stop_device(mld->trans);
return ret;
}
int iwl_mld_load_fw(struct iwl_mld *mld)
{
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_trans_start_hw(mld->trans);
if (ret)
return ret;
ret = iwl_mld_run_fw_init_sequence(mld);
if (ret)
return ret;
mld->fw_status.running = true;
return 0;
}
void iwl_mld_stop_fw(struct iwl_mld *mld)
{
lockdep_assert_wiphy(mld->wiphy);
iwl_abort_notification_waits(&mld->notif_wait);
iwl_fw_dbg_stop_sync(&mld->fwrt);
iwl_trans_stop_device(mld->trans);
mld->fw_status.running = false;
}
static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_hw_restart_disconnect(vif);
}
void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags)
{
u32 error_log_size = mld->fw->ucode_capa.error_log_size;
struct iwl_fw_error_recovery_cmd recovery_cmd = {
.flags = cpu_to_le32(flags),
};
struct iwl_host_cmd cmd = {
.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
.flags = CMD_WANT_SKB,
.data = {&recovery_cmd, },
.len = {sizeof(recovery_cmd), },
};
int ret;
/* no error log was defined in TLV */
if (!error_log_size)
return;
if (flags & ERROR_RECOVERY_UPDATE_DB) {
/* no buf was allocated upon NIC error */
if (!mld->error_recovery_buf)
return;
cmd.data[1] = mld->error_recovery_buf;
cmd.len[1] = error_log_size;
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
recovery_cmd.buf_size = cpu_to_le32(error_log_size);
}
ret = iwl_mld_send_cmd(mld, &cmd);
/* we no longer need the recovery buffer */
kfree(mld->error_recovery_buf);
mld->error_recovery_buf = NULL;
if (ret) {
IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret);
return;
}
if (flags & ERROR_RECOVERY_UPDATE_DB) {
struct iwl_rx_packet *pkt = cmd.resp_pkt;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
u32 resp;
if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp),
"Unexpected recovery cmd response size %u (expected %zu)\n",
pkt_len, sizeof(resp)))
goto out;
resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data);
if (!resp)
goto out;
IWL_ERR(mld,
"Failed to send recovery cmd blob was invalid %d\n",
resp);
ieee80211_iterate_interfaces(mld->hw, 0,
iwl_mld_restart_disconnect_iter,
NULL);
}
out:
iwl_free_resp(&cmd);
}
static int iwl_mld_config_fw(struct iwl_mld *mld)
{
int ret;
lockdep_assert_wiphy(mld->wiphy);
iwl_fw_disable_dbg_asserts(&mld->fwrt);
iwl_get_shared_mem_conf(&mld->fwrt);
ret = iwl_mld_send_tx_ant_cfg(mld);
if (ret)
return ret;
ret = iwl_mld_send_bt_init_conf(mld);
if (ret)
return ret;
ret = iwl_set_soc_latency(&mld->fwrt);
if (ret)
return ret;
iwl_mld_configure_lari(mld);
ret = iwl_mld_config_temp_report_ths(mld);
if (ret)
return ret;
#ifdef CONFIG_THERMAL
ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
CTDP_CMD_OPERATION_START);
if (ret)
return ret;
#endif
ret = iwl_configure_rxq(&mld->fwrt);
if (ret)
return ret;
ret = iwl_mld_send_rss_cfg_cmd(mld);
if (ret)
return ret;
ret = iwl_mld_config_scan(mld);
if (ret)
return ret;
ret = iwl_mld_update_device_power(mld, false);
if (ret)
return ret;
ret = iwl_mld_init_mcc(mld);
if (ret)
return ret;
if (mld->fw_status.in_hw_restart) {
iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB);
iwl_mld_time_sync_fw_config(mld);
}
iwl_mld_led_config_fw(mld);
ret = iwl_mld_init_ppag(mld);
if (ret)
return ret;
ret = iwl_mld_init_sar(mld);
if (ret)
return ret;
ret = iwl_mld_init_sgom(mld);
if (ret)
return ret;
iwl_mld_init_tas(mld);
iwl_mld_init_uats(mld);
return 0;
}
int iwl_mld_start_fw(struct iwl_mld *mld)
{
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_load_fw(mld);
if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
goto error;
}
IWL_DEBUG_INFO(mld, "uCode started.\n");
ret = iwl_mld_config_fw(mld);
if (ret)
goto error;
return 0;
error:
iwl_mld_stop_fw(mld);
return ret;
}

View File

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_hcmd_h__
#define __iwl_mld_hcmd_h__
static inline int iwl_mld_send_cmd(struct iwl_mld *mld, struct iwl_host_cmd *cmd)
{
/* No commands, including the d3 related commands, should be sent
* after entering d3
*/
if (WARN_ON(mld->fw_status.in_d3))
return -EIO;
if (!(cmd->flags & CMD_ASYNC))
lockdep_assert_wiphy(mld->wiphy);
/* Devices that need to shutdown immediately on rfkill are not
* supported, so we can send all the cmds in rfkill
*/
cmd->flags |= CMD_SEND_IN_RFKILL;
return iwl_trans_send_cmd(mld->trans, cmd);
}
static inline int
__iwl_mld_send_cmd_with_flags_pdu(struct iwl_mld *mld, u32 id,
u32 flags, const void *data, u16 len)
{
struct iwl_host_cmd cmd = {
.id = id,
.len = { data ? len : 0, },
.data = { data, },
.flags = flags,
};
return iwl_mld_send_cmd(mld, &cmd);
}
#define _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len, \
ignored...) \
__iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len)
#define iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len...) \
_iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, ##len, \
sizeof(*(data)))
#define iwl_mld_send_cmd_pdu(mld, id, ...) \
iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, __VA_ARGS__)
#define iwl_mld_send_cmd_empty(mld, id) \
iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, NULL, 0)
#endif /* __iwl_mld_hcmd_h__ */

View File

@ -0,0 +1,671 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/cfg80211.h>
#include "iface.h"
#include "hcmd.h"
#include "key.h"
#include "mlo.h"
#include "mac80211.h"
#include "fw/api/context.h"
#include "fw/api/mac.h"
#include "fw/api/time-event.h"
#include "fw/api/datapath.h"
/* Cleanup function for struct iwl_mld_vif, will be called in restart */
void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld *mld = mld_vif->mld;
struct iwl_mld_link *link;
/* EMLSR is turned back on during recovery */
vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
for_each_mld_vif_valid_link(mld_vif, link) {
iwl_mld_cleanup_link(mld_vif->mld, link);
/* Correctly allocated primary link in non-MLO mode */
if (!ieee80211_vif_is_mld(vif) &&
link_id == 0 && link == &mld_vif->deflink)
continue;
if (vif->active_links & BIT(link_id))
continue;
/* Should not happen as link removal should always succeed */
WARN_ON(1);
if (link != &mld_vif->deflink)
kfree_rcu(link, rcu_head);
RCU_INIT_POINTER(mld_vif->link[link_id], NULL);
}
ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL);
CLEANUP_STRUCT(mld_vif);
}
static int iwl_mld_send_mac_cmd(struct iwl_mld *mld,
struct iwl_mac_config_cmd *cmd)
{
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
cmd);
if (ret)
IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret);
return ret;
}
int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif)
{
switch (vif->type) {
case NL80211_IFTYPE_STATION:
return vif->p2p ? FW_MAC_TYPE_P2P_STA : FW_MAC_TYPE_BSS_STA;
case NL80211_IFTYPE_AP:
return FW_MAC_TYPE_GO;
case NL80211_IFTYPE_MONITOR:
return FW_MAC_TYPE_LISTENER;
case NL80211_IFTYPE_P2P_DEVICE:
return FW_MAC_TYPE_P2P_DEVICE;
case NL80211_IFTYPE_ADHOC:
return FW_MAC_TYPE_IBSS;
default:
WARN_ON_ONCE(1);
}
return FW_MAC_TYPE_BSS_STA;
}
static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
struct ieee80211_vif *vif)
{
const struct ieee80211_supported_band *sband;
const struct ieee80211_sta_he_cap *own_he_cap;
lockdep_assert_wiphy(mld->wiphy);
/* This capability is the same for all bands,
* so take it from one of them.
*/
sband = mld->hw->wiphy->bands[NL80211_BAND_2GHZ];
own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
return own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_ACK_EN);
}
/* fill the common part for all interface types */
static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mac_config_cmd *cmd,
u32 action)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
lockdep_assert_wiphy(mld->wiphy);
cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
cmd->action = cpu_to_le32(action);
cmd->mac_type =
cpu_to_le32(iwl_mld_mac80211_iftype_to_fw(vif));
memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
if (iwlwifi_mod_params.disable_11ax)
return;
cmd->nic_not_ack_enabled =
cpu_to_le32(!iwl_mld_is_nic_ack_enabled(mld, vif));
/* If we have MLO enabled, then the firmware needs to enable
* address translation for the station(s) we add. That depends
* on having EHT enabled in firmware, which in turn depends on
* mac80211 in the code below.
* However, mac80211 doesn't enable HE/EHT until it has parsed
* the association response successfully, so just skip all that
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
if (vif->type == NL80211_IFTYPE_AP)
cmd->he_ap_support = cpu_to_le16(1);
else
cmd->he_support = cpu_to_le16(1);
cmd->eht_support = cpu_to_le32(1);
return;
}
for_each_vif_active_link(vif, link_conf, link_id) {
if (!link_conf->he_support)
continue;
if (vif->type == NL80211_IFTYPE_AP)
cmd->he_ap_support = cpu_to_le16(1);
else
cmd->he_support = cpu_to_le16(1);
/* EHT, if supported, was already set above */
break;
}
}
static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif, u32 action,
struct iwl_mac_config_cmd *cmd)
{
struct ieee80211_bss_conf *link;
u32 twt_policy = 0;
int link_id;
lockdep_assert_wiphy(mld->wiphy);
WARN_ON(vif->type != NL80211_IFTYPE_STATION);
/* We always want to hear MCAST frames, if we're not authorized yet,
* we'll drop them.
*/
cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP);
/* Adding a MAC ctxt with is_assoc set is not allowed in fw
* (and shouldn't happen)
*/
if (vif->cfg.assoc && action != FW_CTXT_ACTION_ADD) {
cmd->client.is_assoc = 1;
if (!iwl_mld_vif_from_mac80211(vif)->authorized)
cmd->client.data_policy |=
cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE);
} else {
/* Allow beacons to pass through as long as we are not
* associated
*/
cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
}
cmd->client.assoc_id = cpu_to_le16(vif->cfg.aid);
if (ieee80211_vif_is_mld(vif)) {
u16 esr_transition_timeout =
u16_get_bits(vif->cfg.eml_cap,
IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
cmd->client.esr_transition_timeout =
min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU,
esr_transition_timeout);
cmd->client.medium_sync_delay =
cpu_to_le16(vif->cfg.eml_med_sync_delay);
}
for_each_vif_active_link(vif, link, link_id) {
if (!link->he_support)
continue;
if (link->twt_requester)
twt_policy |= TWT_SUPPORTED;
if (link->twt_protected)
twt_policy |= PROTECTED_TWT_SUPPORTED;
if (link->twt_broadcast)
twt_policy |= BROADCAST_TWT_SUPPORTED;
}
if (!iwlwifi_mod_params.disable_11ax)
cmd->client.data_policy |= cpu_to_le16(twt_policy);
if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
cmd->filter_flags |=
cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
if (vif->p2p)
cmd->client.ctwin =
cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
}
static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mac_config_cmd *cmd)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
lockdep_assert_wiphy(mld->wiphy);
WARN_ON(vif->type != NL80211_IFTYPE_AP);
cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
/* in AP mode, pass beacons from other APs (needed for ht protection).
* When there're no any associated station, which means that we are not
* TXing anyway, don't ask FW to pass beacons to prevent unnecessary
* wake-ups.
*/
if (mld_vif->num_associated_stas)
cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
}
static void iwl_mld_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
bool *go_active = _data;
if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
*go_active = true;
}
static bool iwl_mld_p2p_dev_has_extended_disc(struct iwl_mld *mld)
{
bool go_active = false;
/* This flag should be set to true when the P2P Device is
* discoverable and there is at least a P2P GO. Setting
* this flag will allow the P2P Device to be discoverable on other
* channels in addition to its listen channel.
* Note that this flag should not be set in other cases as it opens the
* Rx filters on all MAC and increases the number of interrupts.
*/
ieee80211_iterate_active_interfaces(mld->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mld_go_iterator, &go_active);
return go_active;
}
static void iwl_mld_fill_mac_cmd_p2p_dev(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mac_config_cmd *cmd)
{
bool ext_disc = iwl_mld_p2p_dev_has_extended_disc(mld);
lockdep_assert_wiphy(mld->wiphy);
/* Override the filter flags to accept all management frames. This is
* needed to support both P2P device discovery using probe requests and
* P2P service discovery using action frames
*/
cmd->filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
if (ext_disc)
cmd->p2p_dev.is_disc_extended = cpu_to_le32(1);
}
static void iwl_mld_fill_mac_cmd_ibss(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mac_config_cmd *cmd)
{
lockdep_assert_wiphy(mld->wiphy);
WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
}
static int
iwl_mld_rm_mac_from_fw(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mac_config_cmd cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.id_and_color = cpu_to_le32(mld_vif->fw_id),
};
return iwl_mld_send_mac_cmd(mld, &cmd);
}
int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
u32 action)
{
struct iwl_mac_config_cmd cmd = {};
lockdep_assert_wiphy(mld->wiphy);
if (action == FW_CTXT_ACTION_REMOVE)
return iwl_mld_rm_mac_from_fw(mld, vif);
iwl_mld_mac_cmd_fill_common(mld, vif, &cmd, action);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
iwl_mld_fill_mac_cmd_sta(mld, vif, action, &cmd);
break;
case NL80211_IFTYPE_AP:
iwl_mld_fill_mac_cmd_ap(mld, vif, &cmd);
break;
case NL80211_IFTYPE_MONITOR:
cmd.filter_flags =
cpu_to_le32(MAC_CFG_FILTER_PROMISC |
MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
break;
case NL80211_IFTYPE_P2P_DEVICE:
iwl_mld_fill_mac_cmd_p2p_dev(mld, vif, &cmd);
break;
case NL80211_IFTYPE_ADHOC:
iwl_mld_fill_mac_cmd_ibss(mld, vif, &cmd);
break;
default:
WARN(1, "not supported yet\n");
return -EOPNOTSUPP;
}
return iwl_mld_send_mac_cmd(mld, &cmd);
}
IWL_MLD_ALLOC_FN(vif, vif)
/* Constructor function for struct iwl_mld_vif */
static int
iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
lockdep_assert_wiphy(mld->wiphy);
mld_vif->mld = mld;
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
if (ret)
return ret;
if (!mld->fw_status.in_hw_restart) {
wiphy_work_init(&mld_vif->emlsr.unblock_tpt_wk,
iwl_mld_emlsr_unblock_tpt_wk);
wiphy_delayed_work_init(&mld_vif->emlsr.check_tpt_wk,
iwl_mld_emlsr_check_tpt);
wiphy_delayed_work_init(&mld_vif->emlsr.prevent_done_wk,
iwl_mld_emlsr_prevent_done_wk);
wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
iwl_mld_emlsr_tmp_non_bss_done_wk);
}
return 0;
}
int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_init_vif(mld, vif);
if (ret)
return ret;
ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_ADD);
if (ret)
RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
return ret;
}
int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
return -EINVAL;
RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
mld_vif->fw_id);
return ret;
}
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
struct ieee80211_vif *vif)
{
struct ieee80211_bss_conf *link;
unsigned int link_id;
for_each_vif_active_link(vif, link, link_id) {
if (iwl_mld_link_set_associated(mld, vif, link))
IWL_ERR(mld, "failed to update link %d\n", link_id);
}
iwl_mld_recalc_multicast_filter(mld);
}
static void iwl_mld_get_fw_id_bss_bitmap_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
u8 *fw_id_bitmap = _data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
return;
*fw_id_bitmap |= BIT(mld_vif->fw_id);
}
u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld)
{
u8 fw_id_bitmap = 0;
ieee80211_iterate_interfaces(mld->hw,
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
iwl_mld_get_fw_id_bss_bitmap_iter,
&fw_id_bitmap);
return fw_id_bitmap;
}
void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
const struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
struct iwl_probe_resp_data *old_data, *new_data;
struct ieee80211_vif *vif;
struct iwl_mld_link *mld_link;
IWL_DEBUG_INFO(mld, "Probe response data notif: noa %d, csa %d\n",
notif->noa_active, notif->csa_counter);
if (IWL_FW_CHECK(mld, le32_to_cpu(notif->mac_id) >=
ARRAY_SIZE(mld->fw_id_to_vif),
"mac id is invalid: %d\n",
le32_to_cpu(notif->mac_id)))
return;
vif = wiphy_dereference(mld->wiphy,
mld->fw_id_to_vif[le32_to_cpu(notif->mac_id)]);
/* the firmware gives us the mac_id (and not the link_id), mac80211
* gets a vif and not a link, bottom line, this flow is not MLD ready
* yet.
*/
if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
return;
if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
notif->csa_counter >= 1)
ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
if (!vif->p2p)
return;
mld_link = &iwl_mld_vif_from_mac80211(vif)->deflink;
new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
if (!new_data)
return;
memcpy(&new_data->notif, notif, sizeof(new_data->notif));
/* noa_attr contains 1 reserved byte, need to substruct it */
new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
sizeof(new_data->notif.noa_attr) - 1;
/*
* If it's a one time NoA, only one descriptor is needed,
* adjust the length according to len_low.
*/
if (new_data->notif.noa_attr.len_low ==
sizeof(struct ieee80211_p2p_noa_desc) + 2)
new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
old_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
rcu_assign_pointer(mld_link->probe_resp_data, new_data);
if (old_data)
kfree_rcu(old_data, rcu_head);
}
void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
struct ieee80211_vif *vif;
if (IWL_FW_CHECK(mld, notif->mac_id >= ARRAY_SIZE(mld->fw_id_to_vif),
"mac id is invalid: %d\n", notif->mac_id))
return;
vif = wiphy_dereference(mld->wiphy, mld->fw_id_to_vif[notif->mac_id]);
if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
return;
IWL_WARN(mld, "uapsd misbehaving AP: %pM\n", vif->bss_conf.bssid);
}
void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
struct ieee80211_bss_conf *link;
struct ieee80211_supported_band *sband;
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_vif *vif;
struct iwl_mld_vif *mld_vif;
if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
return;
link = iwl_mld_fw_id_to_link_conf(mld, notif->link_id);
if (WARN_ON(!link))
return;
vif = link->vif;
if (WARN_ON(!vif) || vif->type != NL80211_IFTYPE_STATION ||
!vif->cfg.assoc)
return;
if (!link->chanreq.oper.chan ||
link->chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
link->chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
mld_vif = iwl_mld_vif_from_mac80211(vif);
/* this shouldn't happen *again*, ignore it */
if (mld_vif->cca_40mhz_workaround != CCA_40_MHZ_WA_NONE)
return;
mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RECONNECT;
/*
* This capability manipulation isn't really ideal, but it's the
* easiest choice - otherwise we'd have to do some major changes
* in mac80211 to support this, which isn't worth it. This does
* mean that userspace may have outdated information, but that's
* actually not an issue at all.
*/
sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
WARN_ON(!sband->ht_cap.ht_supported);
WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
if (he_cap) {
/* we know that ours is writable */
struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
WARN_ON(!he->has_he);
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
he->he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
}
ieee80211_disconnect(vif, true);
}
void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
struct ieee80211_vif *vif)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sta_he_cap *he_cap;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_NONE)
return;
/* Now we are just reconnecting with the new capabilities,
* but remember to reset the capabilities when we disconnect for real
*/
if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_RECONNECT) {
mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RESET;
return;
}
/* Now cca_40mhz_workaround == CCA_40_MHZ_WA_RESET */
sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
if (he_cap) {
/* we know that ours is writable */
struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
he->he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
}
mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_NONE;
}
struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld)
{
unsigned long fw_id_bitmap = iwl_mld_get_fw_bss_vifs_ids(mld);
int fw_id;
if (hweight8(fw_id_bitmap) != 1)
return NULL;
fw_id = __ffs(fw_id_bitmap);
return wiphy_dereference(mld->wiphy,
mld->fw_id_to_vif[fw_id]);
}

View File

@ -0,0 +1,234 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_iface_h__
#define __iwl_mld_iface_h__
#include <net/mac80211.h>
#include "link.h"
#include "session-protect.h"
#include "d3.h"
enum iwl_mld_cca_40mhz_wa_status {
CCA_40_MHZ_WA_NONE,
CCA_40_MHZ_WA_RESET,
CCA_40_MHZ_WA_RECONNECT,
};
/**
* enum iwl_mld_emlsr_blocked - defines reasons for which EMLSR is blocked
*
* These blocks are applied/stored per-VIF.
*
* @IWL_MLD_EMLSR_BLOCKED_PREVENTION: Prevent repeated EMLSR enter/exit
* @IWL_MLD_EMLSR_BLOCKED_WOWLAN: WOWLAN is preventing EMLSR
* @IWL_MLD_EMLSR_BLOCKED_FW: FW did not recommend MLO
* @IWL_MLD_EMLSR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
* @IWL_MLD_EMLSR_BLOCKED_NON_BSS: An active non-BSS interface's link is
* preventing EMLSR
* @IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's
* link is preventing EMLSR. This is a temporary blocking that is set when
* there is an indication that a non-BSS interface is to be added.
* @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile
*/
enum iwl_mld_emlsr_blocked {
IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1,
IWL_MLD_EMLSR_BLOCKED_WOWLAN = 0x2,
IWL_MLD_EMLSR_BLOCKED_FW = 0x4,
IWL_MLD_EMLSR_BLOCKED_ROC = 0x8,
IWL_MLD_EMLSR_BLOCKED_NON_BSS = 0x10,
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x20,
IWL_MLD_EMLSR_BLOCKED_TPT = 0x40,
};
/**
* enum iwl_mld_emlsr_exit - defines reasons for exiting EMLSR
*
* Reasons to exit EMLSR may be either link specific or even specific to a
* combination of links.
*
* @IWL_MLD_EMLSR_EXIT_BLOCK: Exit due to a block reason being set
* @IWL_MLD_EMLSR_EXIT_MISSED_BEACON: Exit due to missed beacons
* @IWL_MLD_EMLSR_EXIT_FAIL_ENTRY: FW failed to enter EMLSR
* @IWL_MLD_EMLSR_EXIT_CSA: EMLSR prevented due to channel switch on link
* @IWL_MLD_EMLSR_EXIT_EQUAL_BAND: EMLSR prevented as both links share the band
* @IWL_MLD_EMLSR_EXIT_BANDWIDTH: Bandwidths of primary and secondary links are
* not equal
* @IWL_MLD_EMLSR_EXIT_LOW_RSSI: Link RSSI is unsuitable for EMLSR
* @IWL_MLD_EMLSR_EXIT_LINK_USAGE: Exit EMLSR due to low TPT on secondary link
* @IWL_MLD_EMLSR_EXIT_BT_COEX: Exit EMLSR due to BT coexistence
* @IWL_MLD_EMLSR_EXIT_CHAN_LOAD: Exit EMLSR because the primary channel is not
* loaded enough to justify EMLSR.
* @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI
*/
enum iwl_mld_emlsr_exit {
IWL_MLD_EMLSR_EXIT_BLOCK = 0x1,
IWL_MLD_EMLSR_EXIT_MISSED_BEACON = 0x2,
IWL_MLD_EMLSR_EXIT_FAIL_ENTRY = 0x4,
IWL_MLD_EMLSR_EXIT_CSA = 0x8,
IWL_MLD_EMLSR_EXIT_EQUAL_BAND = 0x10,
IWL_MLD_EMLSR_EXIT_BANDWIDTH = 0x20,
IWL_MLD_EMLSR_EXIT_LOW_RSSI = 0x40,
IWL_MLD_EMLSR_EXIT_LINK_USAGE = 0x80,
IWL_MLD_EMLSR_EXIT_BT_COEX = 0x100,
IWL_MLD_EMLSR_EXIT_CHAN_LOAD = 0x200,
IWL_MLD_EMLSR_EXIT_RFI = 0x400,
};
/**
* struct iwl_mld_emlsr - per-VIF data about EMLSR operation
*
* @primary: The current primary link
* @selected_primary: Primary link as selected during the last link selection
* @selected_links: Links as selected during the last link selection
* @blocked_reasons: Reasons preventing EMLSR from being enabled
* @last_exit_reason: Reason for the last EMLSR exit
* @last_exit_ts: Time of the last EMLSR exit (if @last_exit_reason is non-zero)
* @exit_repeat_count: Number of times EMLSR was exited for the same reason
* @unblock_tpt_wk: Unblock EMLSR because the throughput limit was reached
* @check_tpt_wk: a worker to check if IWL_MLD_EMLSR_BLOCKED_TPT should be
* added, for example if there is no longer enough traffic.
* @prevent_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_PREVENTION
* @tmp_non_bss_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS
*/
struct iwl_mld_emlsr {
struct_group(zeroed_on_not_authorized,
u8 primary;
u8 selected_primary;
u16 selected_links;
enum iwl_mld_emlsr_blocked blocked_reasons;
enum iwl_mld_emlsr_exit last_exit_reason;
unsigned long last_exit_ts;
u8 exit_repeat_count;
);
struct wiphy_work unblock_tpt_wk;
struct wiphy_delayed_work check_tpt_wk;
struct wiphy_delayed_work prevent_done_wk;
struct wiphy_delayed_work tmp_non_bss_done_wk;
};
/**
* struct iwl_mld_vif - virtual interface (MAC context) configuration parameters
*
* @fw_id: fw id of the mac context.
* @session_protect: session protection parameters
* @ap_sta: pointer to AP sta, for easier access to it.
* Relevant only for STA vifs.
* @authorized: indicates the AP station was set to authorized
* @bigtks: BIGTKs of the AP, for beacon protection.
* Only valid for STA. (FIXME: needs to be per link)
* @num_associated_stas: number of associated STAs. Relevant only for AP mode.
* @ap_ibss_active: whether the AP/IBSS was started
* @roc_activity: the id of the roc_activity running. Relevant for p2p device
* only. Set to %ROC_NUM_ACTIVITIES when not in use.
* @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
* environment is too loaded, we work around this by reconnecting to the
* same AP with 20 MHz. This manages the status of the workaround.
* @beacon_inject_active: indicates an active debugfs beacon ie injection
* @low_latency_causes: bit flags, indicating the causes for low-latency,
* see @iwl_mld_low_latency_cause.
* @mld: pointer to the mld structure.
* @deflink: default link data, for use in non-MLO,
* @link: reference to link data for each valid link, for use in MLO.
* @emlsr: information related to EMLSR
* @wowlan_data: data used by the wowlan suspend flow
* @use_ps_poll: use ps_poll frames
* @disable_bf: disable beacon filter
* @dbgfs_slink: debugfs symlink for this interface
*/
struct iwl_mld_vif {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
u8 fw_id;
struct iwl_mld_session_protect session_protect;
struct ieee80211_sta *ap_sta;
bool authorized;
struct ieee80211_key_conf __rcu *bigtks[2];
u8 num_associated_stas;
bool ap_ibss_active;
u32 roc_activity;
enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool beacon_inject_active;
#endif
u8 low_latency_causes;
);
/* And here fields that survive a fw restart */
struct iwl_mld *mld;
struct iwl_mld_link deflink;
struct iwl_mld_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
struct iwl_mld_emlsr emlsr;
#if CONFIG_PM_SLEEP
struct iwl_mld_wowlan_data wowlan_data;
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool use_ps_poll;
bool disable_bf;
struct dentry *dbgfs_slink;
#endif
};
static inline struct iwl_mld_vif *
iwl_mld_vif_from_mac80211(struct ieee80211_vif *vif)
{
return (void *)vif->drv_priv;
}
#define iwl_mld_link_dereference_check(mld_vif, link_id) \
rcu_dereference_check((mld_vif)->link[link_id], \
lockdep_is_held(&mld_vif->mld->wiphy->mtx))
#define for_each_mld_vif_valid_link(mld_vif, mld_link) \
for (int link_id = 0; link_id < ARRAY_SIZE((mld_vif)->link); \
link_id++) \
if ((mld_link = iwl_mld_link_dereference_check(mld_vif, link_id)))
/* Retrieve pointer to mld link from mac80211 structures */
static inline struct iwl_mld_link *
iwl_mld_link_from_mac80211(struct ieee80211_bss_conf *bss_conf)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id);
}
int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif);
/* Cleanup function for struct iwl_mld_vif, will be called in restart */
void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
u32 action);
int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
struct ieee80211_vif *vif);
u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
struct ieee80211_vif *vif);
static inline bool iwl_mld_vif_low_latency(const struct iwl_mld_vif *mld_vif)
{
return !!mld_vif->low_latency_causes;
}
struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld);
#endif /* __iwl_mld_iface_h__ */

View File

@ -0,0 +1,358 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
*/
#include "key.h"
#include "iface.h"
#include "sta.h"
#include "fw/api/datapath.h"
static u32 iwl_mld_get_key_flags(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
bool igtk = key->keyidx == 4 || key->keyidx == 5;
u32 flags = 0;
if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_CCMP:
flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
fallthrough;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP;
break;
}
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mld_vif->ap_sta;
/* If we are installing an iGTK (in AP or STA mode), we need to tell
* the firmware this key will en/decrypt MGMT frames.
* Same goes if we are installing a pairwise key for an MFP station.
* In case we're installing a groupwise key (which is not an iGTK),
* then, we will not use this key for MGMT frames.
*/
if ((sta && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
return flags;
}
static u32 iwl_mld_get_key_sta_mask(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_link_sta *link_sta;
int sta_id;
lockdep_assert_wiphy(mld->wiphy);
/* AP group keys are per link and should be on the mcast/bcast STA */
if (vif->type == NL80211_IFTYPE_AP &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
struct iwl_mld_link *link = NULL;
if (key->link_id >= 0)
link = iwl_mld_link_dereference_check(mld_vif,
key->link_id);
if (WARN_ON(!link))
return 0;
/* In this stage we should have both the bcast and mcast STAs */
if (WARN_ON(link->bcast_sta.sta_id == IWL_INVALID_STA ||
link->mcast_sta.sta_id == IWL_INVALID_STA))
return 0;
/* IGTK/BIGTK to bcast STA */
if (key->keyidx >= 4)
return BIT(link->bcast_sta.sta_id);
/* GTK for data to mcast STA */
return BIT(link->mcast_sta.sta_id);
}
/* for client mode use the AP STA also for group keys */
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mld_vif->ap_sta;
/* STA should be non-NULL now */
if (WARN_ON(!sta))
return 0;
/* Key is not per-link, get the full sta mask */
if (key->link_id < 0)
return iwl_mld_fw_sta_id_mask(mld, sta);
/* The link_sta shouldn't be NULL now, but this is checked in
* iwl_mld_fw_sta_id_mask
*/
link_sta = link_sta_dereference_check(sta, key->link_id);
sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
if (sta_id < 0)
return 0;
return BIT(sta_id);
}
static int iwl_mld_add_key_to_fw(struct iwl_mld *mld, u32 sta_mask,
u32 key_flags, struct ieee80211_key_conf *key)
{
struct iwl_sec_key_cmd cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.u.add.sta_mask = cpu_to_le32(sta_mask),
.u.add.key_id = cpu_to_le32(key->keyidx),
.u.add.key_flags = cpu_to_le32(key_flags),
.u.add.tx_seq = cpu_to_le64(atomic64_read(&key->tx_pn)),
};
bool tkip = key->cipher == WLAN_CIPHER_SUITE_TKIP;
int max_key_len = sizeof(cmd.u.add.key);
if (WARN_ON(!sta_mask))
return -EINVAL;
if (WARN_ON(key->keylen > max_key_len))
return -EINVAL;
memcpy(cmd.u.add.key, key->key, key->keylen);
if (tkip) {
memcpy(cmd.u.add.tkip_mic_rx_key,
key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
8);
memcpy(cmd.u.add.tkip_mic_tx_key,
key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
8);
}
return iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
&cmd);
}
static void iwl_mld_remove_key_from_fw(struct iwl_mld *mld, u32 sta_mask,
u32 key_flags, u32 keyidx)
{
struct iwl_sec_key_cmd cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.u.remove.sta_mask = cpu_to_le32(sta_mask),
.u.remove.key_id = cpu_to_le32(keyidx),
.u.remove.key_flags = cpu_to_le32(key_flags),
};
if (WARN_ON(!sta_mask))
return;
iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), &cmd);
}
void iwl_mld_remove_key(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
lockdep_assert_wiphy(mld->wiphy);
if (!sta_mask)
return;
if (key->keyidx == 4 || key->keyidx == 5) {
struct iwl_mld_link *mld_link;
unsigned int link_id = 0;
/* set to -1 for non-MLO right now */
if (key->link_id >= 0)
link_id = key->link_id;
mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
if (WARN_ON(!mld_link))
return;
if (mld_link->igtk == key)
mld_link->igtk = NULL;
mld->num_igtks--;
}
iwl_mld_remove_key_from_fw(mld, sta_mask, key_flags, key->keyidx);
/* no longer in HW */
key->hw_key_idx = STA_KEY_IDX_INVALID;
}
int iwl_mld_add_key(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld_link *mld_link = NULL;
bool igtk = key->keyidx == 4 || key->keyidx == 5;
int ret;
lockdep_assert_wiphy(mld->wiphy);
if (!sta_mask)
return -EINVAL;
if (igtk) {
if (mld->num_igtks == IWL_MAX_NUM_IGTKS)
return -EOPNOTSUPP;
u8 link_id = 0;
/* set to -1 for non-MLO right now */
if (key->link_id >= 0)
link_id = key->link_id;
mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
if (WARN_ON(!mld_link))
return -EINVAL;
if (mld_link->igtk) {
IWL_DEBUG_MAC80211(mld, "remove old IGTK %d\n",
mld_link->igtk->keyidx);
iwl_mld_remove_key(mld, vif, sta, mld_link->igtk);
}
WARN_ON(mld_link->igtk);
}
ret = iwl_mld_add_key_to_fw(mld, sta_mask, key_flags, key);
if (ret)
return ret;
if (mld_link) {
mld_link->igtk = key;
mld->num_igtks++;
}
/* We don't really need this, but need it to be not invalid,
* so we will know if the key is in fw.
*/
key->hw_key_idx = 0;
return 0;
}
struct remove_ap_keys_iter_data {
u8 link_id;
struct ieee80211_sta *sta;
};
static void iwl_mld_remove_ap_keys_iter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *_data)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct remove_ap_keys_iter_data *data = _data;
if (key->hw_key_idx == STA_KEY_IDX_INVALID)
return;
/* All the pairwise keys should have been removed by now */
if (WARN_ON(sta))
return;
if (key->link_id >= 0 && key->link_id != data->link_id)
return;
iwl_mld_remove_key(mld, vif, data->sta, key);
}
void iwl_mld_remove_ap_keys(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, unsigned int link_id)
{
struct remove_ap_keys_iter_data iter_data = {
.link_id = link_id,
.sta = sta,
};
if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
return;
ieee80211_iter_keys(mld->hw, vif,
iwl_mld_remove_ap_keys_iter,
&iter_data);
}
struct iwl_mvm_sta_key_update_data {
struct ieee80211_sta *sta;
u32 old_sta_mask;
u32 new_sta_mask;
int err;
};
static void iwl_mld_update_sta_key_iter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *_data)
{
struct iwl_mvm_sta_key_update_data *data = _data;
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_sec_key_cmd cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
.u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask),
.u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask),
.u.modify.key_id = cpu_to_le32(key->keyidx),
.u.modify.key_flags =
cpu_to_le32(iwl_mld_get_key_flags(mld, vif, sta, key)),
};
int err;
/* only need to do this for pairwise keys (link_id == -1) */
if (sta != data->sta || key->link_id >= 0)
return;
err = iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
&cmd);
if (err)
data->err = err;
}
int iwl_mld_update_sta_keys(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 old_sta_mask,
u32 new_sta_mask)
{
struct iwl_mvm_sta_key_update_data data = {
.sta = sta,
.old_sta_mask = old_sta_mask,
.new_sta_mask = new_sta_mask,
};
ieee80211_iter_keys(mld->hw, vif, iwl_mld_update_sta_key_iter,
&data);
return data.err;
}

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_key_h__
#define __iwl_mld_key_h__
#include "mld.h"
#include <net/mac80211.h>
#include "fw/api/sta.h"
void iwl_mld_remove_key(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
int iwl_mld_add_key(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
void iwl_mld_remove_ap_keys(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
unsigned int link_id);
int iwl_mld_update_sta_keys(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 old_sta_mask,
u32 new_sta_mask);
static inline void
iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key, void *data)
{
key->hw_key_idx = STA_KEY_IDX_INVALID;
}
#endif /* __iwl_mld_key_h__ */

View File

@ -0,0 +1,100 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
*/
#include <linux/leds.h>
#include <net/mac80211.h>
#include "fw/api/led.h"
#include "mld.h"
#include "led.h"
#include "hcmd.h"
static void iwl_mld_send_led_fw_cmd(struct iwl_mld *mld, bool on)
{
struct iwl_led_cmd led_cmd = {
.status = cpu_to_le32(on),
};
int err;
if (WARN_ON(!mld->fw_status.running))
return;
err = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(LONG_GROUP,
LEDS_CMD),
CMD_ASYNC, &led_cmd);
if (err)
IWL_WARN(mld, "LED command failed: %d\n", err);
}
static void iwl_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct iwl_mld *mld = container_of(led_cdev, struct iwl_mld, led);
if (!mld->fw_status.running)
return;
iwl_mld_send_led_fw_cmd(mld, brightness > 0);
}
int iwl_mld_leds_init(struct iwl_mld *mld)
{
int mode = iwlwifi_mod_params.led_mode;
int ret;
switch (mode) {
case IWL_LED_BLINK:
IWL_ERR(mld, "Blink led mode not supported, used default\n");
fallthrough;
case IWL_LED_DEFAULT:
case IWL_LED_RF_STATE:
mode = IWL_LED_RF_STATE;
break;
case IWL_LED_DISABLE:
IWL_INFO(mld, "Led disabled\n");
return 0;
default:
return -EINVAL;
}
mld->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mld->hw->wiphy));
if (!mld->led.name)
return -ENOMEM;
mld->led.brightness_set = iwl_led_brightness_set;
mld->led.max_brightness = 1;
if (mode == IWL_LED_RF_STATE)
mld->led.default_trigger =
ieee80211_get_radio_led_name(mld->hw);
ret = led_classdev_register(mld->trans->dev, &mld->led);
if (ret) {
kfree(mld->led.name);
mld->led.name = NULL;
IWL_INFO(mld, "Failed to enable led\n");
}
return ret;
}
void iwl_mld_led_config_fw(struct iwl_mld *mld)
{
if (!mld->led.name)
return;
iwl_mld_send_led_fw_cmd(mld, mld->led.brightness > 0);
}
void iwl_mld_leds_exit(struct iwl_mld *mld)
{
if (!mld->led.name)
return;
led_classdev_unregister(&mld->led);
kfree(mld->led.name);
mld->led.name = NULL;
}

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_led_h__
#define __iwl_mld_led_h__
#include "mld.h"
#ifdef CONFIG_IWLWIFI_LEDS
int iwl_mld_leds_init(struct iwl_mld *mld);
void iwl_mld_leds_exit(struct iwl_mld *mld);
void iwl_mld_led_config_fw(struct iwl_mld *mld);
#else
static inline int iwl_mld_leds_init(struct iwl_mld *mld)
{
return 0;
}
static inline void iwl_mld_leds_exit(struct iwl_mld *mld)
{
}
static inline void iwl_mld_led_config_fw(struct iwl_mld *mld)
{
}
#endif
#endif /* __iwl_mld_led_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,153 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_link_h__
#define __iwl_mld_link_h__
#include <net/mac80211.h>
#include "mld.h"
#include "sta.h"
/**
* struct iwl_probe_resp_data - data for NoA/CSA updates
* @rcu_head: used for freeing the data on update
* @notif: notification data
* @noa_len: length of NoA attribute, calculated from the notification
*/
struct iwl_probe_resp_data {
struct rcu_head rcu_head;
struct iwl_probe_resp_data_notif notif;
int noa_len;
};
/**
* struct iwl_mld_link - link configuration parameters
*
* @rcu_head: RCU head for freeing this data.
* @fw_id: the fw id of the link.
* @active: if the link is active or not.
* @queue_params: QoS data from mac80211. This is updated with a call to
* drv_conf_tx per each AC, and then notified once with BSS_CHANGED_QOS.
* So we store it here and then send one link cmd for all the ACs.
* @chan_ctx: pointer to the channel context assigned to the link. If a link
* has an assigned channel context it means that it is active.
* @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
* @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
* This tracks the one IGTK that currently exists in FW.
* @vif: the vif this link belongs to
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
* @aux_sta: station used for remain on channel. Used in P2P device.
* @link_id: over the air link ID
* @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
* but higher layers work differently, so we store the keys here for
* later installation.
* @silent_deactivation: next deactivation needs to be silent.
* @probe_resp_data: data from FW notification to store NOA related data to be
* inserted into probe response.
* @rx_omi: data for BW reduction with OMI
* @rx_omi.bw_in_progress: update is in progress (indicates target BW)
* @rx_omi.exit_ts: timestamp of last exit
* @rx_omi.finished_work: work for the delayed reaction to the firmware saying
* the change was applied, and for then applying a new mode if it was
* updated while waiting for firmware/AP settle delay.
* @rx_omi.desired_bw: desired bandwidth
* @rx_omi.last_max_bw: last maximum BW used by firmware, for AP BW changes
*/
struct iwl_mld_link {
struct rcu_head rcu_head;
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
u8 fw_id;
bool active;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
struct ieee80211_chanctx_conf __rcu *chan_ctx;
bool he_ru_2mhz_block;
struct ieee80211_key_conf *igtk;
);
/* And here fields that survive a fw restart */
struct ieee80211_vif *vif;
struct iwl_mld_int_sta bcast_sta;
struct iwl_mld_int_sta mcast_sta;
struct iwl_mld_int_sta aux_sta;
u8 link_id;
struct {
struct wiphy_delayed_work finished_work;
unsigned long exit_ts;
enum ieee80211_sta_rx_bandwidth bw_in_progress,
desired_bw,
last_max_bw;
} rx_omi;
/* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */
struct ieee80211_key_conf *ap_early_keys[6];
bool silent_deactivation;
struct iwl_probe_resp_data __rcu *probe_resp_data;
};
/* Cleanup function for struct iwl_mld_phy, will be called in restart */
static inline void
iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
{
struct iwl_probe_resp_data *probe_data;
probe_data = wiphy_dereference(mld->wiphy, link->probe_resp_data);
RCU_INIT_POINTER(link->probe_resp_data, NULL);
if (probe_data)
kfree_rcu(probe_data, rcu_head);
CLEANUP_STRUCT(link);
if (link->bcast_sta.sta_id != IWL_INVALID_STA)
iwl_mld_free_internal_sta(mld, &link->bcast_sta);
if (link->mcast_sta.sta_id != IWL_INVALID_STA)
iwl_mld_free_internal_sta(mld, &link->mcast_sta);
if (link->aux_sta.sta_id != IWL_INVALID_STA)
iwl_mld_free_internal_sta(mld, &link->aux_sta);
}
/* Convert a percentage from [0,100] to [0,255] */
#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * 256 / 100)
int iwl_mld_add_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *bss_conf);
int iwl_mld_remove_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *bss_conf);
int iwl_mld_activate_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *link);
void iwl_mld_deactivate_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *link);
int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
struct ieee80211_bss_conf *link,
enum ieee80211_sta_rx_bandwidth bw);
int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
struct ieee80211_bss_conf *link, u32 changes);
void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt,
u32 removed_link_id);
int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf);
unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf);
int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf,
bool expect_active_link);
void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld);
void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld);
void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf,
enum ieee80211_sta_rx_bandwidth bw);
#endif /* __iwl_mld_link_h__ */

View File

@ -0,0 +1,335 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mld.h"
#include "iface.h"
#include "low_latency.h"
#include "hcmd.h"
#include "power.h"
#define MLD_LL_WK_INTERVAL_MSEC 500
#define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
#define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
/* packets/MLD_LL_WK_PERIOD seconds */
#define MLD_LL_ENABLE_THRESH 100
static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
unsigned long timestamp)
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
bool global_low_latency = false;
u8 num_rx_q = mld->trans->num_rx_queues;
for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
u32 total_vo_vi_pkts = 0;
bool ll_period_expired;
/* If it's not initialized yet, it means we have not yet
* received/transmitted any vo/vi packet on this MAC.
*/
if (!ll->window_start[mac_id])
continue;
ll_period_expired =
time_after(timestamp, ll->window_start[mac_id] +
MLD_LL_ACTIVE_WK_PERIOD);
if (ll_period_expired)
ll->window_start[mac_id] = timestamp;
for (int q = 0; q < num_rx_q; q++) {
struct iwl_mld_low_latency_packets_counters *counters =
&mld->low_latency.pkts_counters[q];
spin_lock_bh(&counters->lock);
total_vo_vi_pkts += counters->vo_vi[mac_id];
if (ll_period_expired)
counters->vo_vi[mac_id] = 0;
spin_unlock_bh(&counters->lock);
}
/* enable immediately with enough packets but defer
* disabling only if the low-latency period expired and
* below threshold.
*/
if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
mld->low_latency.result[mac_id] = true;
else if (ll_period_expired)
mld->low_latency.result[mac_id] = false;
global_low_latency |= mld->low_latency.result[mac_id];
}
return global_low_latency;
}
static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld *mld = _data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
bool low_latency;
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
return;
low_latency = mld->low_latency.result[mld_vif->fw_id];
if (prev != low_latency)
iwl_mld_vif_update_low_latency(mld, vif, low_latency,
LOW_LATENCY_TRAFFIC);
}
static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mld *mld = container_of(wk, struct iwl_mld,
low_latency.work.work);
unsigned long timestamp = jiffies;
bool low_latency_active;
if (mld->fw_status.in_hw_restart)
return;
/* It is assumed that the work was scheduled only after checking
* at least MLD_LL_PERIOD has passed since the last update.
*/
low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
/* Update the timestamp now after the low-latency calculation */
mld->low_latency.timestamp = timestamp;
/* If low-latency is active we need to force re-evaluation after
* 10 seconds, so that we can disable low-latency when
* the low-latency traffic ends.
*
* Otherwise, we don't need to run the work because there is nothing to
* disable.
*
* Note that this has no impact on the regular scheduling of the
* updates triggered by traffic - those happen whenever the
* MLD_LL_PERIOD timeout expire.
*/
if (low_latency_active)
wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
MLD_LL_ACTIVE_WK_PERIOD);
ieee80211_iterate_active_interfaces(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_low_latency_iter, mld);
}
int iwl_mld_low_latency_init(struct iwl_mld *mld)
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
unsigned long ts = jiffies;
ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
sizeof(*ll->pkts_counters), GFP_KERNEL);
if (!ll->pkts_counters)
return -ENOMEM;
for (int q = 0; q < mld->trans->num_rx_queues; q++)
spin_lock_init(&ll->pkts_counters[q].lock);
wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
ll->timestamp = ts;
/* The low-latency window_start will be initialized per-MAC on
* the first vo/vi packet received/transmitted.
*/
return 0;
}
void iwl_mld_low_latency_free(struct iwl_mld *mld)
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
kfree(ll->pkts_counters);
ll->pkts_counters = NULL;
}
void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
ll->timestamp = jiffies;
memset(ll->window_start, 0, sizeof(ll->window_start));
memset(ll->result, 0, sizeof(ll->result));
for (int q = 0; q < mld->trans->num_rx_queues; q++)
memset(ll->pkts_counters[q].vo_vi, 0,
sizeof(ll->pkts_counters[q].vo_vi));
}
static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
u16 mac_id)
{
struct iwl_mac_low_latency_cmd cmd = {
.mac_id = cpu_to_le32(mac_id)
};
u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
int ret;
if (low_latency) {
/* Currently we don't care about the direction */
cmd.low_latency_rx = 1;
cmd.low_latency_tx = 1;
}
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
if (ret)
IWL_ERR(mld, "Failed to send low latency command\n");
return ret;
}
static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
enum iwl_mld_low_latency_cause cause)
{
if (set)
mld_vif->low_latency_causes |= cause;
else
mld_vif->low_latency_causes &= ~cause;
}
void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
struct ieee80211_vif *vif,
bool low_latency,
enum iwl_mld_low_latency_cause cause)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
bool prev;
prev = iwl_mld_vif_low_latency(mld_vif);
iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
low_latency = iwl_mld_vif_low_latency(mld_vif);
if (low_latency == prev)
return;
if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
/* revert to previous low-latency state */
iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
return;
}
if (low_latency)
iwl_mld_leave_omi_bw_reduction(mld);
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
return;
iwl_mld_update_mac_power(mld, vif, false);
}
static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
{
u8 tid;
static const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO,
};
if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
return false;
tid = ieee80211_get_tid(hdr);
if (tid >= IWL_MAX_TID_COUNT)
return false;
return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
}
void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
struct ieee80211_hdr *hdr,
struct ieee80211_sta *sta,
u8 queue)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
struct iwl_mld_low_latency_packets_counters *counters;
unsigned long ts = jiffies ? jiffies : 1;
u8 fw_id = mld_vif->fw_id;
/* we should have failed op mode init if NULL */
if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
return;
if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
queue >= mld->trans->num_rx_queues))
return;
if (mld->low_latency.stopped)
return;
if (!iwl_mld_is_vo_vi_pkt(hdr))
return;
counters = &mld->low_latency.pkts_counters[queue];
spin_lock_bh(&counters->lock);
counters->vo_vi[fw_id]++;
spin_unlock_bh(&counters->lock);
/* Initialize the window_start on the first vo/vi packet */
if (!mld->low_latency.window_start[fw_id])
mld->low_latency.window_start[fw_id] = ts;
if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
0);
}
void iwl_mld_low_latency_stop(struct iwl_mld *mld)
{
lockdep_assert_wiphy(mld->wiphy);
mld->low_latency.stopped = true;
wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
}
void iwl_mld_low_latency_restart(struct iwl_mld *mld)
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
bool low_latency = false;
unsigned long ts = jiffies;
lockdep_assert_wiphy(mld->wiphy);
ll->timestamp = ts;
mld->low_latency.stopped = false;
for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
ll->window_start[mac] = 0;
low_latency |= ll->result[mac];
for (int q = 0; q < mld->trans->num_rx_queues; q++) {
spin_lock_bh(&ll->pkts_counters[q].lock);
ll->pkts_counters[q].vo_vi[mac] = 0;
spin_unlock_bh(&ll->pkts_counters[q].lock);
}
}
/* if low latency is active, force re-evaluation to cover the case of
* no traffic.
*/
if (low_latency)
wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
}

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_low_latency_h__
#define __iwl_mld_low_latency_h__
/**
* struct iwl_mld_low_latency_packets_counters - Packets counters
* @lock: synchronize the counting in data path against the worker
* @vo_vi: per-mac, counts the number of TX and RX voice and video packets
*/
struct iwl_mld_low_latency_packets_counters {
spinlock_t lock;
u32 vo_vi[NUM_MAC_INDEX_DRIVER];
} ____cacheline_aligned_in_smp;
/**
* enum iwl_mld_low_latency_cause - low-latency set causes
*
* @LOW_LATENCY_TRAFFIC: indicates low-latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low-latency mode set from debugfs
* @LOW_LATENCY_VIF_TYPE: low-latency mode set because of vif type (AP)
*/
enum iwl_mld_low_latency_cause {
LOW_LATENCY_TRAFFIC = BIT(0),
LOW_LATENCY_DEBUGFS = BIT(1),
LOW_LATENCY_VIF_TYPE = BIT(2),
};
/**
* struct iwl_mld_low_latency - Manage low-latency detection and activation.
* @work: this work is used to detect low-latency by monitoring the number of
* voice and video packets transmitted in a period of time. If the
* threshold is reached, low-latency is activated. When active,
* it is deactivated if the threshold is not reached within a
* 10-second period.
* @timestamp: timestamp of the last update.
* @window_start: per-mac, timestamp of the start of the current window. when
* the window is over, the counters are reset.
* @pkts_counters: per-queue array voice/video packet counters
* @result: per-mac latest low-latency result
* @stopped: if true, ignore the requests to update the counters
*/
struct iwl_mld_low_latency {
struct wiphy_delayed_work work;
unsigned long timestamp;
unsigned long window_start[NUM_MAC_INDEX_DRIVER];
struct iwl_mld_low_latency_packets_counters *pkts_counters;
bool result[NUM_MAC_INDEX_DRIVER];
bool stopped;
};
int iwl_mld_low_latency_init(struct iwl_mld *mld);
void iwl_mld_low_latency_free(struct iwl_mld *mld);
void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld);
void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
struct ieee80211_vif *vif,
bool low_latency,
enum iwl_mld_low_latency_cause cause);
void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
struct ieee80211_hdr *hdr,
struct ieee80211_sta *sta,
u8 queue);
void iwl_mld_low_latency_stop(struct iwl_mld *mld);
void iwl_mld_low_latency_restart(struct iwl_mld *mld);
#endif /* __iwl_mld_low_latency_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_mac80211_h__
#define __iwl_mld_mac80211_h__
#include "mld.h"
int iwl_mld_register_hw(struct iwl_mld *mld);
void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld);
#endif /* __iwl_mld_mac80211_h__ */

View File

@ -0,0 +1,329 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
*/
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <fw/dbg.h>
#include <iwl-nvm-parse.h>
#include "mld.h"
#include "hcmd.h"
#include "mcc.h"
/* It is the caller's responsibility to free the pointer returned here */
static struct iwl_mcc_update_resp_v8 *
iwl_mld_parse_mcc_update_resp_v8(const struct iwl_rx_packet *pkt)
{
const struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (const void *)pkt->data;
int n_channels = __le32_to_cpu(mcc_resp_v8->n_channels);
struct iwl_mcc_update_resp_v8 *resp_cp;
int notif_len = struct_size(resp_cp, channels, n_channels);
if (iwl_rx_packet_payload_len(pkt) != notif_len)
return ERR_PTR(-EINVAL);
resp_cp = kmemdup(mcc_resp_v8, notif_len, GFP_KERNEL);
if (!resp_cp)
return ERR_PTR(-ENOMEM);
return resp_cp;
}
/* It is the caller's responsibility to free the pointer returned here */
static struct iwl_mcc_update_resp_v8 *
iwl_mld_parse_mcc_update_resp_v5_v6(const struct iwl_rx_packet *pkt)
{
const struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (const void *)pkt->data;
struct iwl_mcc_update_resp_v8 *resp_cp;
int n_channels = __le32_to_cpu(mcc_resp_v4->n_channels);
int resp_len;
if (iwl_rx_packet_payload_len(pkt) !=
struct_size(mcc_resp_v4, channels, n_channels))
return ERR_PTR(-EINVAL);
resp_len = struct_size(resp_cp, channels, n_channels);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
if (!resp_cp)
return ERR_PTR(-ENOMEM);
resp_cp->status = mcc_resp_v4->status;
resp_cp->mcc = mcc_resp_v4->mcc;
resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap));
resp_cp->source_id = mcc_resp_v4->source_id;
resp_cp->geo_info = mcc_resp_v4->geo_info;
resp_cp->n_channels = mcc_resp_v4->n_channels;
memcpy(resp_cp->channels, mcc_resp_v4->channels,
n_channels * sizeof(__le32));
return resp_cp;
}
/* It is the caller's responsibility to free the pointer returned here */
static struct iwl_mcc_update_resp_v8 *
iwl_mld_update_mcc(struct iwl_mld *mld, const char *alpha2,
enum iwl_mcc_source src_id)
{
int resp_ver = iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP,
MCC_UPDATE_CMD, 0);
struct iwl_mcc_update_cmd mcc_update_cmd = {
.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
.source_id = (u8)src_id,
};
struct iwl_mcc_update_resp_v8 *resp_cp;
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = MCC_UPDATE_CMD,
.flags = CMD_WANT_SKB,
.data = { &mcc_update_cmd },
.len[0] = sizeof(mcc_update_cmd),
};
int ret;
u16 mcc;
IWL_DEBUG_LAR(mld, "send MCC update to FW with '%c%c' src = %d\n",
alpha2[0], alpha2[1], src_id);
ret = iwl_mld_send_cmd(mld, &cmd);
if (ret)
return ERR_PTR(ret);
pkt = cmd.resp_pkt;
/* For Wifi-7 radios, we get version 8
* For Wifi-6E radios, we get version 6
* For Wifi-6 radios, we get version 5, but 5, 6, and 4 are compatible.
*/
switch (resp_ver) {
case 5:
case 6:
resp_cp = iwl_mld_parse_mcc_update_resp_v5_v6(pkt);
break;
case 8:
resp_cp = iwl_mld_parse_mcc_update_resp_v8(pkt);
break;
default:
IWL_FW_CHECK_FAILED(mld, "Unknown MCC_UPDATE_CMD version %d\n", resp_ver);
resp_cp = ERR_PTR(-EINVAL);
}
if (IS_ERR(resp_cp))
goto exit;
mcc = le16_to_cpu(resp_cp->mcc);
IWL_FW_CHECK(mld, !mcc, "mcc can't be 0: %d\n", mcc);
IWL_DEBUG_LAR(mld,
"MCC response status: 0x%x. new MCC: 0x%x ('%c%c')\n",
le32_to_cpu(resp_cp->status), mcc, mcc >> 8, mcc & 0xff);
exit:
iwl_free_resp(&cmd);
return resp_cp;
}
/* It is the caller's responsibility to free the pointer returned here */
struct ieee80211_regdomain *
iwl_mld_get_regdomain(struct iwl_mld *mld,
const char *alpha2,
enum iwl_mcc_source src_id,
bool *changed)
{
struct ieee80211_regdomain *regd = NULL;
struct iwl_mcc_update_resp_v8 *resp;
u8 resp_ver = iwl_fw_lookup_notif_ver(mld->fw, IWL_ALWAYS_LONG_GROUP,
MCC_UPDATE_CMD, 0);
IWL_DEBUG_LAR(mld, "Getting regdomain data for %s from FW\n", alpha2);
lockdep_assert_wiphy(mld->wiphy);
resp = iwl_mld_update_mcc(mld, alpha2, src_id);
if (IS_ERR(resp)) {
IWL_DEBUG_LAR(mld, "Could not get update from FW %ld\n",
PTR_ERR(resp));
resp = NULL;
goto out;
}
if (changed) {
u32 status = le32_to_cpu(resp->status);
*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
status == MCC_RESP_ILLEGAL);
}
IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver);
regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
le32_to_cpu(resp->cap), resp_ver);
if (IS_ERR(regd)) {
IWL_DEBUG_LAR(mld, "Could not get parse update from FW %ld\n",
PTR_ERR(regd));
goto out;
}
IWL_DEBUG_LAR(mld, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
regd->alpha2, regd->alpha2[0],
regd->alpha2[1], resp->source_id);
mld->mcc_src = resp->source_id;
if (!iwl_puncturing_is_allowed_in_bios(mld->bios_enable_puncturing,
le16_to_cpu(resp->mcc)))
ieee80211_hw_set(mld->hw, DISALLOW_PUNCTURING);
else
__clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mld->hw->flags);
out:
kfree(resp);
return regd;
}
/* It is the caller's responsibility to free the pointer returned here */
static struct ieee80211_regdomain *
iwl_mld_get_current_regdomain(struct iwl_mld *mld,
bool *changed)
{
return iwl_mld_get_regdomain(mld, "ZZ",
MCC_SOURCE_GET_CURRENT, changed);
}
void iwl_mld_update_changed_regdomain(struct iwl_mld *mld)
{
struct ieee80211_regdomain *regd;
bool changed;
regd = iwl_mld_get_current_regdomain(mld, &changed);
if (IS_ERR_OR_NULL(regd))
return;
if (changed)
regulatory_set_wiphy_regd(mld->wiphy, regd);
kfree(regd);
}
static int iwl_mld_apply_last_mcc(struct iwl_mld *mld,
const char *alpha2)
{
struct ieee80211_regdomain *regd;
u32 used_src;
bool changed;
int ret;
/* save the last source in case we overwrite it below */
used_src = mld->mcc_src;
/* Notify the firmware we support wifi location updates */
regd = iwl_mld_get_current_regdomain(mld, NULL);
if (!IS_ERR_OR_NULL(regd))
kfree(regd);
/* Now set our last stored MCC and source */
regd = iwl_mld_get_regdomain(mld, alpha2, used_src,
&changed);
if (IS_ERR_OR_NULL(regd))
return -EIO;
/* update cfg80211 if the regdomain was changed */
if (changed)
ret = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
else
ret = 0;
kfree(regd);
return ret;
}
int iwl_mld_init_mcc(struct iwl_mld *mld)
{
const struct ieee80211_regdomain *r;
struct ieee80211_regdomain *regd;
char mcc[3];
int retval;
/* try to replay the last set MCC to FW */
r = wiphy_dereference(mld->wiphy, mld->wiphy->regd);
if (r)
return iwl_mld_apply_last_mcc(mld, r->alpha2);
regd = iwl_mld_get_current_regdomain(mld, NULL);
if (IS_ERR_OR_NULL(regd))
return -EIO;
if (!iwl_bios_get_mcc(&mld->fwrt, mcc)) {
kfree(regd);
regd = iwl_mld_get_regdomain(mld, mcc, MCC_SOURCE_BIOS, NULL);
if (IS_ERR_OR_NULL(regd))
return -EIO;
}
retval = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
kfree(regd);
return retval;
}
static void iwl_mld_find_assoc_vif_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
bool *assoc = data;
if (vif->type == NL80211_IFTYPE_STATION &&
vif->cfg.assoc)
*assoc = true;
}
static bool iwl_mld_is_a_vif_assoc(struct iwl_mld *mld)
{
bool assoc = false;
ieee80211_iterate_active_interfaces_atomic(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_find_assoc_vif_iterator,
&assoc);
return assoc;
}
void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
{
struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
enum iwl_mcc_source src;
char mcc[3];
struct ieee80211_regdomain *regd;
bool changed;
lockdep_assert_wiphy(mld->wiphy);
if (iwl_mld_is_a_vif_assoc(mld) &&
notif->source_id == MCC_SOURCE_WIFI) {
IWL_DEBUG_LAR(mld, "Ignore mcc update while associated\n");
return;
}
mcc[0] = le16_to_cpu(notif->mcc) >> 8;
mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
mcc[2] = '\0';
src = notif->source_id;
IWL_DEBUG_LAR(mld,
"RX: received chub update mcc cmd (mcc '%s' src %d)\n",
mcc, src);
regd = iwl_mld_get_regdomain(mld, mcc, src, &changed);
if (IS_ERR_OR_NULL(regd))
return;
if (changed)
regulatory_set_wiphy_regd(mld->hw->wiphy, regd);
kfree(regd);
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_mcc_h__
#define __iwl_mld_mcc_h__
int iwl_mld_init_mcc(struct iwl_mld *mld);
void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
void iwl_mld_update_changed_regdomain(struct iwl_mld *mld);
struct ieee80211_regdomain *
iwl_mld_get_regdomain(struct iwl_mld *mld,
const char *alpha2,
enum iwl_mcc_source src_id,
bool *changed);
#endif /* __iwl_mld_mcc_h__ */

View File

@ -0,0 +1,707 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/mac80211.h>
#include "fw/api/rx.h"
#include "fw/api/datapath.h"
#include "fw/api/commands.h"
#include "fw/api/offload.h"
#include "fw/api/coex.h"
#include "fw/dbg.h"
#include "fw/uefi.h"
#include "mld.h"
#include "mlo.h"
#include "mac80211.h"
#include "led.h"
#include "scan.h"
#include "tx.h"
#include "sta.h"
#include "regulatory.h"
#include "thermal.h"
#include "low_latency.h"
#include "hcmd.h"
#include "fw/api/location.h"
#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("IWLWIFI");
static const struct iwl_op_mode_ops iwl_mld_ops;
static int __init iwl_mld_init(void)
{
int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops);
if (ret)
pr_err("Unable to register MLD op_mode: %d\n", ret);
return ret;
}
module_init(iwl_mld_init);
static void __exit iwl_mld_exit(void)
{
iwl_opmode_deregister("iwlmld");
}
module_exit(iwl_mld_exit);
VISIBLE_IF_IWLWIFI_KUNIT
void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
const struct iwl_cfg *cfg, const struct iwl_fw *fw,
struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
{
mld->dev = trans->dev;
mld->trans = trans;
mld->cfg = cfg;
mld->fw = fw;
mld->hw = hw;
mld->wiphy = hw->wiphy;
mld->debugfs_dir = dbgfs_dir;
iwl_notification_wait_init(&mld->notif_wait);
/* Setup async RX handling */
spin_lock_init(&mld->async_handlers_lock);
INIT_LIST_HEAD(&mld->async_handlers_list);
wiphy_work_init(&mld->async_handlers_wk,
iwl_mld_async_handlers_wk);
/* Dynamic Queue Allocation */
spin_lock_init(&mld->add_txqs_lock);
INIT_LIST_HEAD(&mld->txqs_to_add);
wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk);
/* Setup RX queues sync wait queue */
init_waitqueue_head(&mld->rxq_sync.waitq);
}
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld);
static void __acquires(&mld->wiphy->mtx)
iwl_mld_fwrt_dump_start(void *ctx)
{
struct iwl_mld *mld = ctx;
wiphy_lock(mld->wiphy);
}
static void __releases(&mld->wiphy->mtx)
iwl_mld_fwrt_dump_end(void *ctx)
{
struct iwl_mld *mld = ctx;
wiphy_unlock(mld->wiphy);
}
static bool iwl_mld_d3_debug_enable(void *ctx)
{
return IWL_MLD_D3_DEBUG;
}
static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mld *mld = (struct iwl_mld *)ctx;
int ret;
wiphy_lock(mld->wiphy);
ret = iwl_mld_send_cmd(mld, host_cmd);
wiphy_unlock(mld->wiphy);
return ret;
}
static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = {
.dump_start = iwl_mld_fwrt_dump_start,
.dump_end = iwl_mld_fwrt_dump_end,
.send_hcmd = iwl_mld_fwrt_send_hcmd,
.d3_debug_enable = iwl_mld_d3_debug_enable,
};
static void
iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
const struct iwl_fw *fw,
struct dentry *debugfs_dir)
{
iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld,
NULL, NULL, debugfs_dir);
iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR);
}
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(UCODE_ALIVE_NTFY),
HCMD_NAME(INIT_COMPLETE_NOTIF),
HCMD_NAME(PHY_CONTEXT_CMD),
HCMD_NAME(SCAN_CFG_CMD),
HCMD_NAME(SCAN_REQ_UMAC),
HCMD_NAME(SCAN_ABORT_UMAC),
HCMD_NAME(SCAN_COMPLETE_UMAC),
HCMD_NAME(TX_CMD),
HCMD_NAME(TXPATH_FLUSH),
HCMD_NAME(LEDS_CMD),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
HCMD_NAME(REDUCE_TX_POWER_CMD),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(MAC_PM_POWER_TABLE),
HCMD_NAME(MFUART_LOAD_NOTIFICATION),
HCMD_NAME(RSS_CONFIG_CMD),
HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
HCMD_NAME(REPLY_RX_MPDU_CMD),
HCMD_NAME(BA_NOTIF),
HCMD_NAME(MCC_UPDATE_CMD),
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
HCMD_NAME(MATCH_FOUND_NOTIFICATION),
HCMD_NAME(WOWLAN_PATTERNS),
HCMD_NAME(WOWLAN_CONFIGURATION),
HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
HCMD_NAME(DEBUG_HOST_COMMAND),
HCMD_NAME(LDBG_CONFIG_CMD),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
HCMD_NAME(SOC_CONFIGURATION_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_STATISTICS_CMD),
HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = {
HCMD_NAME(LARI_CONFIG_CHANGE),
HCMD_NAME(NVM_GET_INFO),
HCMD_NAME(TAS_CONFIG),
HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD),
HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_debug_names[] = {
HCMD_NAME(HOST_EVENT_CFG),
HCMD_NAME(DBGC_SUSPEND_RESUME),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = {
HCMD_NAME(LOW_LATENCY_CMD),
HCMD_NAME(SESSION_PROTECTION_CMD),
HCMD_NAME(MAC_CONFIG_CMD),
HCMD_NAME(LINK_CONFIG_CMD),
HCMD_NAME(STA_CONFIG_CMD),
HCMD_NAME(AUX_STA_CMD),
HCMD_NAME(STA_REMOVE_CMD),
HCMD_NAME(ROC_CMD),
HCMD_NAME(MISSED_BEACONS_NOTIF),
HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
HCMD_NAME(ROC_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
HCMD_NAME(SESSION_PROTECTION_NOTIF),
HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(OMI_SEND_STATUS_NOTIF),
HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
HCMD_NAME(STATISTICS_OPER_NOTIF),
HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_coex_names[] = {
HCMD_NAME(PROFILE_NOTIF),
};
VISIBLE_IF_IWLWIFI_KUNIT
const struct iwl_hcmd_arr iwl_mld_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
[SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names),
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names),
[REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names),
[DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names),
[STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names),
[BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names),
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups);
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
#endif
static void
iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
{
const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
static const u8 no_reclaim_cmds[] = {TX_CMD};
struct iwl_trans_config trans_cfg = {
.op_mode = op_mode,
/* Rx is not supported yet, but add it to avoid warnings */
.rx_buf_size = iwl_amsdu_size_to_rxb_size(),
.command_groups = iwl_mld_groups,
.command_groups_size = ARRAY_SIZE(iwl_mld_groups),
.fw_reset_handshake = true,
.queue_alloc_cmd_ver =
iwl_fw_lookup_cmd_ver(mld->fw,
WIDE_ID(DATA_PATH_GROUP,
SCD_QUEUE_CONFIG_CMD),
0),
.no_reclaim_cmds = no_reclaim_cmds,
.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]),
};
struct iwl_trans *trans = mld->trans;
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->iml = mld->fw->iml;
trans->iml_len = mld->fw->iml_len;
trans->wide_cmd_header = true;
iwl_trans_configure(trans, &trans_cfg);
}
/*
*****************************************************
* op mode ops functions
*****************************************************
*/
#define NUM_FW_LOAD_RETRIES 3
static struct iwl_op_mode *
iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
struct ieee80211_hw *hw;
struct iwl_op_mode *op_mode;
struct iwl_mld *mld;
u32 eckv_value;
int ret;
/* Allocate and initialize a new hardware device */
hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
sizeof(struct iwl_mld),
&iwl_mld_hw_ops);
if (!hw)
return ERR_PTR(-ENOMEM);
op_mode = hw->priv;
op_mode->ops = &iwl_mld_ops;
mld = IWL_OP_MODE_GET_MLD(op_mode);
iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
iwl_mld_get_bios_tables(mld);
iwl_uefi_get_sgom_table(trans, &mld->fwrt);
iwl_uefi_get_step_table(trans);
if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
else
trans->ext_32khz_clock_valid = !!eckv_value;
iwl_bios_setup_step(trans, &mld->fwrt);
mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
/* Configure transport layer with the opmode specific params */
iwl_mld_configure_trans(op_mode);
/* Needed for sending commands */
wiphy_lock(mld->wiphy);
for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) {
ret = iwl_mld_load_fw(mld);
if (!ret)
break;
}
if (ret) {
wiphy_unlock(mld->wiphy);
iwl_fw_flush_dumps(&mld->fwrt);
goto free_hw;
}
iwl_mld_stop_fw(mld);
wiphy_unlock(mld->wiphy);
ret = iwl_mld_leds_init(mld);
if (ret)
goto free_nvm;
ret = iwl_mld_alloc_scan_cmd(mld);
if (ret)
goto leds_exit;
ret = iwl_mld_low_latency_init(mld);
if (ret)
goto free_scan_cmd;
ret = iwl_mld_register_hw(mld);
if (ret)
goto low_latency_free;
iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
iwl_mld_add_debugfs_files(mld, dbgfs_dir);
iwl_mld_thermal_initialize(mld);
iwl_mld_ptp_init(mld);
return op_mode;
low_latency_free:
iwl_mld_low_latency_free(mld);
free_scan_cmd:
kfree(mld->scan.cmd);
leds_exit:
iwl_mld_leds_exit(mld);
free_nvm:
kfree(mld->nvm_data);
free_hw:
ieee80211_free_hw(mld->hw);
return ERR_PTR(ret);
}
static void
iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
iwl_mld_ptp_remove(mld);
iwl_mld_leds_exit(mld);
wiphy_lock(mld->wiphy);
iwl_mld_thermal_exit(mld);
iwl_mld_low_latency_stop(mld);
iwl_mld_deinit_time_sync(mld);
wiphy_unlock(mld->wiphy);
ieee80211_unregister_hw(mld->hw);
iwl_fw_runtime_free(&mld->fwrt);
iwl_mld_low_latency_free(mld);
iwl_trans_op_mode_leave(mld->trans);
kfree(mld->nvm_data);
kfree(mld->scan.cmd);
kfree(mld->error_recovery_buf);
kfree(mld->mcast_filter_cmd);
ieee80211_free_hw(mld->hw);
}
static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode,
int hw_queue, bool queue_full)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
struct ieee80211_txq *txq;
struct iwl_mld_sta *mld_sta;
struct iwl_mld_txq *mld_txq;
rcu_read_lock();
txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]);
if (!txq) {
rcu_read_unlock();
if (queue_full) {
/* An internal queue is not expected to become full */
IWL_WARN(mld,
"Internal hw_queue %d is full! stopping all queues\n",
hw_queue);
/* Stop all queues, as an internal queue is not
* mapped to a mac80211 one
*/
ieee80211_stop_queues(mld->hw);
} else {
ieee80211_wake_queues(mld->hw);
}
return;
}
mld_txq = iwl_mld_txq_from_mac80211(txq);
mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL;
mld_txq->status.stop_full = queue_full;
if (!queue_full && mld_sta &&
mld_sta->sta_state != IEEE80211_STA_NOTEXIST) {
local_bh_disable();
iwl_mld_tx_from_txq(mld, txq);
local_bh_enable();
}
rcu_read_unlock();
}
static void
iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue)
{
iwl_mld_queue_state_change(op_mode, hw_queue, true);
}
static void
iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue)
{
iwl_mld_queue_state_change(op_mode, hw_queue, false);
}
static bool
iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
iwl_mld_set_hwkill(mld, state);
return false;
}
static void
iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
ieee80211_free_txskb(mld->hw, skb);
}
static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld)
{
u32 src_size = mld->fw->ucode_capa.error_log_size;
u32 src_addr = mld->fw->ucode_capa.error_log_addr;
u8 *recovery_buf;
int ret;
/* no recovery buffer size defined in a TLV */
if (!src_size)
return;
recovery_buf = kzalloc(src_size, GFP_ATOMIC);
if (!recovery_buf)
return;
ret = iwl_trans_read_mem_bytes(mld->trans, src_addr,
recovery_buf, src_size);
if (ret) {
IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n",
ret);
kfree(recovery_buf);
return;
}
mld->error_recovery_buf = recovery_buf;
}
static void iwl_mld_restart_nic(struct iwl_mld *mld)
{
iwl_mld_read_error_recovery_buffer(mld);
mld->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mld->hw);
}
static void
iwl_mld_nic_error(struct iwl_op_mode *op_mode,
enum iwl_fw_error_type type)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status);
if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
IWL_ERR(mld, "Command queue full!\n");
else if (!trans_dead && !mld->fw_status.do_not_dump_once)
iwl_fwrt_dump_error_logs(&mld->fwrt);
mld->fw_status.do_not_dump_once = false;
/* It is necessary to abort any os scan here because mac80211 requires
* having the scan cleared before restarting.
* We'll reset the scan_status to NONE in restart cleanup in
* the next drv_start() call from mac80211. If ieee80211_hw_restart
* isn't called scan status will stay busy.
*/
iwl_mld_report_scan_aborted(mld);
/*
* This should be first thing before trying to collect any
* data to avoid endless loops if any HW error happens while
* collecting debug data.
* It might not actually be true that we'll restart, but the
* setting doesn't matter if we're going to be unbound either.
*/
if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
mld->fw_status.in_hw_restart = true;
}
static void iwl_mld_dump_error(struct iwl_op_mode *op_mode,
struct iwl_fw_error_dump_mode *mode)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
/* if we come in from opmode we have the mutex held */
if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
lockdep_assert_wiphy(mld->wiphy);
iwl_fw_error_collect(&mld->fwrt);
} else {
wiphy_lock(mld->wiphy);
if (mode->context != IWL_ERR_CONTEXT_ABORT)
iwl_fw_error_collect(&mld->fwrt);
wiphy_unlock(mld->wiphy);
}
}
static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
enum iwl_fw_error_type type)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
/* Do restart only in the following conditions are met:
* - we consider the FW as running
* - The trigger that brought us here is defined as one that requires
* a restart (in the debug TLVs)
*/
if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required)
return false;
iwl_mld_restart_nic(mld);
return true;
}
static void
iwl_mld_time_point(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data);
}
#ifdef CONFIG_PM_SLEEP
static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
wiphy_lock(mld->wiphy);
mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mld_stop_fw(mld);
mld->fw_status.in_d3 = false;
wiphy_unlock(mld->wiphy);
}
#else
static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
{}
#endif
static const struct iwl_op_mode_ops iwl_mld_ops = {
.start = iwl_op_mode_mld_start,
.stop = iwl_op_mode_mld_stop,
.rx = iwl_mld_rx,
.rx_rss = iwl_mld_rx_rss,
.queue_full = iwl_mld_queue_full,
.queue_not_full = iwl_mld_queue_not_full,
.hw_rf_kill = iwl_mld_set_hw_rfkill_state,
.free_skb = iwl_mld_free_skb,
.nic_error = iwl_mld_nic_error,
.dump_error = iwl_mld_dump_error,
.sw_reset = iwl_mld_sw_reset,
.time_point = iwl_mld_time_point,
.device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off),
};
struct iwl_mld_mod_params iwlmld_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
};
module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, default: 2");

View File

@ -0,0 +1,584 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_h__
#define __iwl_mld_h__
#include <linux/leds.h>
#include <net/mac80211.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "fw/runtime.h"
#include "fw/notif-wait.h"
#include "fw/api/commands.h"
#include "fw/api/scan.h"
#include "fw/api/mac-cfg.h"
#include "fw/api/mac.h"
#include "fw/api/phy-ctxt.h"
#include "fw/api/datapath.h"
#include "fw/api/rx.h"
#include "fw/api/rs.h"
#include "fw/api/context.h"
#include "fw/api/coex.h"
#include "fw/api/location.h"
#include "fw/dbg.h"
#include "notif.h"
#include "scan.h"
#include "rx.h"
#include "thermal.h"
#include "low_latency.h"
#include "constants.h"
#include "ptp.h"
#include "time_sync.h"
/**
* DOC: Introduction
*
* iwlmld is an operation mode (a.k.a. op_mode) for Intel wireless devices.
* It is used for devices that ship after 2024 which typically support
* the WiFi-7 features. MLD stands for multi-link device. Note that there are
* devices that do not support WiFi-7 or even WiFi 6E and yet use iwlmld, but
* the firmware APIs used in this driver are WiFi-7 compatible.
*
* In the architecture of iwlwifi, an op_mode is a layer that translates
* mac80211's APIs into commands for the firmware and, of course, notifications
* from the firmware to mac80211's APIs. An op_mode must implement the
* interface defined in iwl-op-mode.h to interact with the transport layer
* which allows to send and receive data to the device, start the hardware,
* etc...
*/
/**
* DOC: Locking policy
*
* iwlmld has a very simple locking policy: it doesn't have any mutexes. It
* relies on cfg80211's wiphy->mtx and takes the lock when needed. All the
* control flows originating from mac80211 already acquired the lock, so that
* part is trivial, but also notifications that are received from the firmware
* and handled asynchronously are handled only after having taken the lock.
* This is described in notif.c.
* There are spin_locks needed to synchronize with the data path, around the
* allocation of the queues, for example.
*/
/**
* DOC: Debugfs
*
* iwlmld adds its share of debugfs hooks and its handlers are synchronized
* with the wiphy_lock using wiphy_locked_debugfs. This avoids races against
* resources deletion while the debugfs hook is being used.
*/
/**
* DOC: Main resources
*
* iwlmld is designed with the life cycle of the resource in mind. The
* resources are:
*
* - struct iwl_mld (matches mac80211's struct ieee80211_hw)
*
* - struct iwl_mld_vif (matches macu80211's struct ieee80211_vif)
* iwl_mld_vif contains an array of pointers to struct iwl_mld_link
* which describe the links for this vif.
*
* - struct iwl_mld_sta (matches mac80211's struct ieee80211_sta)
* iwl_mld_sta contains an array of points to struct iwl_mld_link_sta
* which describes the link stations for this station
*
* Each object has properties that can survive a firmware reset or not.
* Asynchronous firmware notifications can declare themselves as dependent on a
* certain instance of those resources and that means that the notifications
* will be cancelled once the instance is destroyed.
*/
#define IWL_MLD_MAX_ADDRESSES 5
/**
* struct iwl_mld - MLD op mode
*
* @fw_id_to_bss_conf: maps a fw id of a link to the corresponding
* ieee80211_bss_conf.
* @fw_id_to_vif: maps a fw id of a MAC context to the corresponding
* ieee80211_vif. Mapping is valid only when the MAC exists in the fw.
* @fw_id_to_txq: maps a fw id of a txq to the corresponding
* ieee80211_txq.
* @used_phy_ids: a bitmap of the phy IDs used. If a bit is set, it means
* that the index of this bit is already used as a PHY id.
* @num_igtks: the number if iGTKs that were sent to the FW.
* @monitor: monitor related data
* @monitor.on: does a monitor vif exist (singleton hence bool)
* @monitor.ampdu_ref: the id of the A-MPDU for sniffer
* @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU
* @monitor.cur_aid: current association id tracked by the sniffer
* @monitor.cur_bssid: current bssid tracked by the sniffer
* @monitor.p80: primary channel position relative to he whole bandwidth, in
* steps of 80 MHz
* @fw_id_to_link_sta: maps a fw id of a sta to the corresponding
* ieee80211_link_sta. This is not cleaned up on restart since we want to
* preserve the fw sta ids during a restart (for SN/PN restoring).
* FW ids of internal stations will be mapped to ERR_PTR, and will be
* re-allocated during a restart, so make sure to free it in restart
* cleanup using iwl_mld_free_internal_sta
* @netdetect: indicates the FW is in suspend mode with netdetect configured
* @p2p_device_vif: points to the p2p device vif if exists
* @bt_is_active: indicates that BT is active
* @dev: pointer to device struct. For printing purposes
* @trans: pointer to the transport layer
* @cfg: pointer to the device configuration
* @fw: a pointer to the fw object
* @hw: pointer to the hw object.
* @wiphy: a pointer to the wiphy struct, for easier access to it.
* @nvm_data: pointer to the nvm_data that includes all our capabilities
* @fwrt: fw runtime data
* @debugfs_dir: debugfs directory
* @notif_wait: notification wait related data.
* @async_handlers_list: a list of all async RX handlers. When a notifciation
* with an async handler is received, it is added to this list.
* When &async_handlers_wk runs - it runs these handlers one by one.
* @async_handlers_lock: a lock for &async_handlers_list. Sync
* &async_handlers_wk and RX notifcation path.
* @async_handlers_wk: A work to run all async RX handlers from
* &async_handlers_list.
* @ct_kill_exit_wk: worker to exit thermal kill
* @fw_status: bitmap of fw status bits
* @running: true if the firmware is running
* @do_not_dump_once: true if firmware dump must be prevented once
* @in_d3: indicates FW is in suspend mode and should be resumed
* @in_hw_restart: indicates that we are currently in restart flow.
* rather than restarted. Should be unset upon restart.
* @radio_kill: bitmap of radio kill status
* @radio_kill.hw: radio is killed by hw switch
* @radio_kill.ct: radio is killed because the device it too hot
* @addresses: device MAC addresses.
* @scan: instance of the scan object
* @wowlan: WoWLAN support data.
* @led: the led device
* @mcc_src: the source id of the MCC, comes from the firmware
* @bios_enable_puncturing: is puncturing enabled by bios
* @fw_id_to_ba: maps a fw (BA) id to a corresponding Block Ack session data.
* @num_rx_ba_sessions: tracks the number of active Rx Block Ack (BA) sessions.
* the driver ensures that new BA sessions are blocked once the maximum
* supported by the firmware is reached, preventing firmware asserts.
* @rxq_sync: manages RX queue sync state
* @txqs_to_add: a list of &ieee80211_txq's to allocate in &add_txqs_wk
* @add_txqs_wk: a worker to allocate txqs.
* @add_txqs_lock: to lock the &txqs_to_add list.
* @error_recovery_buf: pointer to the recovery buffer that will be read
* from firmware upon fw/hw error and sent back to the firmware in
* reconfig flow (after NIC reset).
* @mcast_filter_cmd: pointer to the multicast filter command.
* @mgmt_tx_ant: stores the last TX antenna index; used for setting
* TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure).
* @low_latency: low-latency manager.
* @tzone: thermal zone device's data
* @cooling_dev: cooling device's related data
* @ibss_manager: in IBSS mode (only one vif can be active), indicates what
* firmware indicated about having transmitted the last beacon, i.e.
* being IBSS manager for that time and needing to respond to probe
* requests
* @ptp_data: data of the PTP clock
* @time_sync: time sync data.
* @ftm_initiator: FTM initiator data
*/
struct iwl_mld {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1];
struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER];
struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES];
u8 used_phy_ids: NUM_PHY_CTX;
u8 num_igtks;
struct {
bool on;
u32 ampdu_ref;
bool ampdu_toggle;
u8 p80;
#ifdef CONFIG_IWLWIFI_DEBUGFS
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
#endif
} monitor;
#ifdef CONFIG_PM_SLEEP
bool netdetect;
#endif /* CONFIG_PM_SLEEP */
struct ieee80211_vif *p2p_device_vif;
bool bt_is_active;
);
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
/* And here fields that survive a fw restart */
struct device *dev;
struct iwl_trans *trans;
const struct iwl_cfg *cfg;
const struct iwl_fw *fw;
struct ieee80211_hw *hw;
struct wiphy *wiphy;
struct iwl_nvm_data *nvm_data;
struct iwl_fw_runtime fwrt;
struct dentry *debugfs_dir;
struct iwl_notif_wait_data notif_wait;
struct list_head async_handlers_list;
spinlock_t async_handlers_lock;
struct wiphy_work async_handlers_wk;
struct wiphy_delayed_work ct_kill_exit_wk;
struct {
u32 running:1,
do_not_dump_once:1,
#ifdef CONFIG_PM_SLEEP
in_d3:1,
#endif
in_hw_restart:1;
} fw_status;
struct {
u32 hw:1,
ct:1;
} radio_kill;
struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
struct iwl_mld_scan scan;
#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
#endif
enum iwl_mcc_source mcc_src;
bool bios_enable_puncturing;
struct iwl_mld_baid_data __rcu *fw_id_to_ba[IWL_MAX_BAID];
u8 num_rx_ba_sessions;
struct iwl_mld_rx_queues_sync rxq_sync;
struct list_head txqs_to_add;
struct wiphy_work add_txqs_wk;
spinlock_t add_txqs_lock;
u8 *error_recovery_buf;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
u8 mgmt_tx_ant;
struct iwl_mld_low_latency low_latency;
bool ibss_manager;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzone;
struct iwl_mld_cooling_device cooling_dev;
#endif
struct ptp_data ptp_data;
struct iwl_mld_time_sync_data __rcu *time_sync;
struct {
struct cfg80211_pmsr_request *req;
struct wireless_dev *req_wdev;
int responses[IWL_TOF_MAX_APS];
} ftm_initiator;
};
/* memset the part of the struct that requires cleanup on restart */
#define CLEANUP_STRUCT(_ptr) \
memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \
sizeof((_ptr)->zeroed_on_hw_restart))
/* Cleanup function for struct iwl_mld_vif, will be called in restart */
static inline void
iwl_cleanup_mld(struct iwl_mld *mld)
{
CLEANUP_STRUCT(mld);
CLEANUP_STRUCT(&mld->scan);
mld->fw_status.in_d3 = false;
iwl_mld_low_latency_restart_cleanup(mld);
/* Empty the list of async notification handlers so we won't process
* notifications from the dead fw after the reconfig flow.
*/
iwl_mld_purge_async_handlers_list(mld);
}
enum iwl_power_scheme {
IWL_POWER_SCHEME_CAM = 1,
IWL_POWER_SCHEME_BPS,
};
/**
* struct iwl_mld_mod_params - module parameters for iwlmld
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mld_mod_params {
int power_scheme;
};
extern struct iwl_mld_mod_params iwlmld_mod_params;
/* Extract MLD priv from op_mode */
#define IWL_OP_MODE_GET_MLD(_iwl_op_mode) \
((struct iwl_mld *)(_iwl_op_mode)->op_mode_specific)
#define IWL_MAC80211_GET_MLD(_hw) \
IWL_OP_MODE_GET_MLD((struct iwl_op_mode *)((_hw)->priv))
#ifdef CONFIG_IWLWIFI_DEBUGFS
void
iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir);
#else
static inline void
iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
{}
#endif
int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld);
int iwl_mld_load_fw(struct iwl_mld *mld);
void iwl_mld_stop_fw(struct iwl_mld *mld);
int iwl_mld_start_fw(struct iwl_mld *mld);
void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags);
static inline void iwl_mld_set_ctkill(struct iwl_mld *mld, bool state)
{
mld->radio_kill.ct = state;
wiphy_rfkill_set_hw_state(mld->wiphy,
mld->radio_kill.hw || mld->radio_kill.ct);
}
static inline void iwl_mld_set_hwkill(struct iwl_mld *mld, bool state)
{
mld->radio_kill.hw = state;
wiphy_rfkill_set_hw_state(mld->wiphy,
mld->radio_kill.hw || mld->radio_kill.ct);
}
static inline u8 iwl_mld_get_valid_tx_ant(const struct iwl_mld *mld)
{
u8 tx_ant = mld->fw->valid_tx_ant;
if (mld->nvm_data && mld->nvm_data->valid_tx_ant)
tx_ant &= mld->nvm_data->valid_tx_ant;
return tx_ant;
}
static inline u8 iwl_mld_get_valid_rx_ant(const struct iwl_mld *mld)
{
u8 rx_ant = mld->fw->valid_rx_ant;
if (mld->nvm_data && mld->nvm_data->valid_rx_ant)
rx_ant &= mld->nvm_data->valid_rx_ant;
return rx_ant;
}
static inline u8 iwl_mld_nl80211_band_to_fw(enum nl80211_band band)
{
switch (band) {
case NL80211_BAND_2GHZ:
return PHY_BAND_24;
case NL80211_BAND_5GHZ:
return PHY_BAND_5;
case NL80211_BAND_6GHZ:
return PHY_BAND_6;
default:
WARN_ONCE(1, "Unsupported band (%u)\n", band);
return PHY_BAND_5;
}
}
static inline u8 iwl_mld_phy_band_to_nl80211(u8 phy_band)
{
switch (phy_band) {
case PHY_BAND_24:
return NL80211_BAND_2GHZ;
case PHY_BAND_5:
return NL80211_BAND_5GHZ;
case PHY_BAND_6:
return NL80211_BAND_6GHZ;
default:
WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
return NL80211_BAND_5GHZ;
}
}
static inline int
iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
enum nl80211_band band)
{
int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
bool is_lb = band == NL80211_BAND_2GHZ;
if (format == RATE_MCS_LEGACY_OFDM_MSK)
return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate;
/* CCK is not allowed in 5 GHz */
return is_lb ? rate : -1;
}
extern const struct ieee80211_ops iwl_mld_hw_ops;
/**
* enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC: this means that it will be called in the Rx path
* which can't acquire the wiphy->mutex.
* @RX_HANDLER_ASYNC: If the handler needs to hold wiphy->mutex
* (and only in this case!), it should be set as ASYNC. In that case,
* it will be called from a worker with wiphy->mutex held.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC,
};
/**
* struct iwl_rx_handler: handler for FW notification
* @val_fn: input validation function.
* @sizes: an array that mapps a version to the expected size.
* @fn: the function is called when notification is handled
* @cmd_id: command id
* @n_sizes: number of elements in &sizes.
* @context: see &iwl_rx_handler_context
* @obj_type: the type of the object that this handler is related to.
* See &iwl_mld_object_type. Use IWL_MLD_OBJECT_TYPE_NONE if not related.
* @cancel: function to cancel the notification. valid only if obj_type is not
* IWL_MLD_OBJECT_TYPE_NONE.
*/
struct iwl_rx_handler {
union {
bool (*val_fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
const struct iwl_notif_struct_size *sizes;
};
void (*fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
u16 cmd_id;
u8 n_sizes;
u8 context;
enum iwl_mld_object_type obj_type;
bool (*cancel)(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
u32 obj_id);
};
/**
* struct iwl_notif_struct_size: map a notif ver to the expected size
*
* @size: the size to expect
* @ver: the version of the notification
*/
struct iwl_notif_struct_size {
u32 size:24, ver:8;
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
extern const struct iwl_hcmd_arr iwl_mld_groups[];
extern const unsigned int global_iwl_mld_goups_size;
extern const struct iwl_rx_handler iwl_mld_rx_handlers[];
extern const unsigned int iwl_mld_rx_handlers_num;
bool
iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr,
const struct iwl_rx_mpdu_desc *mpdu_desc,
struct ieee80211_rx_status *rx_status, int queue);
void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
const struct iwl_cfg *cfg, const struct iwl_fw *fw,
struct ieee80211_hw *hw, struct dentry *dbgfs_dir);
#endif
#define IWL_MLD_INVALID_FW_ID 0xff
#define IWL_MLD_ALLOC_FN(_type, _mac80211_type) \
static int \
iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld, \
u8 *fw_id, \
struct ieee80211_##_mac80211_type *mac80211_ptr) \
{ \
u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8(); \
u8 arr_sz = ARRAY_SIZE(mld->fw_id_to_##_mac80211_type); \
if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \
struct ieee80211_link_sta)) \
arr_sz = mld->fw->ucode_capa.num_stations; \
if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \
struct ieee80211_bss_conf)) \
arr_sz = mld->fw->ucode_capa.num_links; \
for (int i = 0; i < arr_sz; i++) { \
u8 idx = (i + rand) % arr_sz; \
if (rcu_access_pointer(mld->fw_id_to_##_mac80211_type[idx])) \
continue; \
IWL_DEBUG_INFO(mld, "Allocated at index %d / %d\n", idx, arr_sz); \
*fw_id = idx; \
rcu_assign_pointer(mld->fw_id_to_##_mac80211_type[idx], mac80211_ptr); \
return 0; \
} \
return -ENOSPC; \
}
static inline struct ieee80211_bss_conf *
iwl_mld_fw_id_to_link_conf(struct iwl_mld *mld, u8 fw_link_id)
{
if (IWL_FW_CHECK(mld, fw_link_id >= mld->fw->ucode_capa.num_links,
"Invalid fw_link_id: %d\n", fw_link_id))
return NULL;
return wiphy_dereference(mld->wiphy,
mld->fw_id_to_bss_conf[fw_link_id]);
}
#define MSEC_TO_TU(_msec) ((_msec) * 1000 / 1024)
void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct dentry *dir);
void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
struct dentry *dir);
/* Utilities */
static inline u8 iwl_mld_mac80211_ac_to_fw_tx_fifo(enum ieee80211_ac_numbers ac)
{
static const u8 mac80211_ac_to_fw_tx_fifo[] = {
IWL_BZ_EDCA_TX_FIFO_VO,
IWL_BZ_EDCA_TX_FIFO_VI,
IWL_BZ_EDCA_TX_FIFO_BE,
IWL_BZ_EDCA_TX_FIFO_BK,
IWL_BZ_TRIG_TX_FIFO_VO,
IWL_BZ_TRIG_TX_FIFO_VI,
IWL_BZ_TRIG_TX_FIFO_BE,
IWL_BZ_TRIG_TX_FIFO_BK,
};
return mac80211_ac_to_fw_tx_fifo[ac];
}
static inline u32
iwl_mld_get_lmac_id(struct iwl_mld *mld, enum nl80211_band band)
{
if (!fw_has_capa(&mld->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
band == NL80211_BAND_2GHZ)
return IWL_LMAC_24G_INDEX;
return IWL_LMAC_5G_INDEX;
}
/* Check if we had an error, but reconfig flow didn't start yet */
static inline bool iwl_mld_error_before_recovery(struct iwl_mld *mld)
{
return mld->fw_status.in_hw_restart &&
!iwl_trans_fw_running(mld->trans);
}
int iwl_mld_tdls_sta_count(struct iwl_mld *mld);
#endif /* __iwl_mld_h__ */

View File

@ -0,0 +1,994 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mlo.h"
/* Block reasons helper */
#define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
HOW(PREVENTION) \
HOW(WOWLAN) \
HOW(FW) \
HOW(ROC) \
HOW(NON_BSS) \
HOW(TMP_NON_BSS) \
HOW(TPT)
static const char *
iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
{
/* Using switch without "default" will warn about missing entries */
switch (blocked) {
#define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
#undef REASON_CASE
}
return "ERROR";
}
static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
{
#define NAME_FMT(x) "%s"
#define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
IWL_DEBUG_INFO(mld,
"EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
" (0x%x)\n",
HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
mask);
#undef NAME_FMT
#undef NAME_PR
}
/* Exit reasons helper */
#define HANDLE_EMLSR_EXIT_REASONS(HOW) \
HOW(BLOCK) \
HOW(MISSED_BEACON) \
HOW(FAIL_ENTRY) \
HOW(CSA) \
HOW(EQUAL_BAND) \
HOW(BANDWIDTH) \
HOW(LOW_RSSI) \
HOW(LINK_USAGE) \
HOW(BT_COEX) \
HOW(CHAN_LOAD) \
HOW(RFI)
static const char *
iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
{
/* Using switch without "default" will warn about missing entries */
switch (exit) {
#define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
#undef REASON_CASE
}
return "ERROR";
}
static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
{
#define NAME_FMT(x) "%s"
#define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
IWL_DEBUG_INFO(mld,
"EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
" (0x%x)\n",
HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
mask);
#undef NAME_FMT
#undef NAME_PR
}
void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
emlsr.prevent_done_wk.work);
struct ieee80211_vif *vif =
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
return;
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_PREVENTION);
}
void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
struct wiphy_work *wk)
{
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
emlsr.prevent_done_wk.work);
struct ieee80211_vif *vif =
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
return;
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
}
#define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
#define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
/* Exit reasons that can cause longer EMLSR prevention */
#define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
IWL_MLD_EMLSR_EXIT_LINK_USAGE)
#define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400)
#define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300)
#define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600)
static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif,
enum iwl_mld_emlsr_exit reason)
{
unsigned long delay;
/*
* Reset the counter if more than 400 seconds have passed between one
* exit and the other, or if we exited due to a different reason.
* Will also reset the counter after the long prevention is done.
*/
if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
mld_vif->emlsr.last_exit_reason != reason)
mld_vif->emlsr.exit_repeat_count = 0;
mld_vif->emlsr.last_exit_reason = reason;
mld_vif->emlsr.last_exit_ts = jiffies;
mld_vif->emlsr.exit_repeat_count++;
/*
* Do not add a prevention when the reason was a block. For a block,
* EMLSR will be enabled again on unblock.
*/
if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
return;
/* Set prevention for a minimum of 30 seconds */
mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
/* Handle repeats for reasons that can cause long prevention */
if (mld_vif->emlsr.exit_repeat_count > 1 &&
reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
if (mld_vif->emlsr.exit_repeat_count == 2)
delay = IWL_MLD_EMLSR_PREVENT_SHORT;
else
delay = IWL_MLD_EMLSR_PREVENT_LONG;
/*
* The timeouts are chosen so that this will not happen, i.e.
* IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
*/
WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
}
IWL_DEBUG_INFO(mld,
"Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
delay / HZ, mld_vif->emlsr.exit_repeat_count,
iwl_mld_get_emlsr_exit_string(reason), reason);
wiphy_delayed_work_queue(mld->wiphy,
&mld_vif->emlsr.prevent_done_wk, delay);
}
static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
bool sync)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
u16 new_active_links;
int ret = 0;
lockdep_assert_wiphy(mld->wiphy);
/* On entry failure need to exit anyway, even if entered from debugfs */
if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
return 0;
/* Ignore exit request if EMLSR is not active */
if (!iwl_mld_emlsr_active(vif))
return 0;
if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
return 0;
if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
link_to_keep = __ffs(vif->active_links);
new_active_links = BIT(link_to_keep);
IWL_DEBUG_INFO(mld,
"Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
iwl_mld_get_emlsr_exit_string(exit), exit,
vif->active_links, new_active_links);
if (sync)
ret = ieee80211_set_active_links(vif, new_active_links);
else
ieee80211_set_active_links_async(vif, new_active_links);
/* Update latest exit reason and check EMLSR prevention */
iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
return ret;
}
void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
{
_iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
}
static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason,
u8 link_to_keep, bool sync)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
lockdep_assert_wiphy(mld->wiphy);
if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
return 0;
if (mld_vif->emlsr.blocked_reasons & reason)
return 0;
mld_vif->emlsr.blocked_reasons |= reason;
IWL_DEBUG_INFO(mld,
"Blocking EMLSR mode. reason = %s (0x%x)\n",
iwl_mld_get_emlsr_blocked_string(reason), reason);
iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
&mld_vif->emlsr.check_tpt_wk);
return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
link_to_keep, sync);
}
void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
{
_iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
}
int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
{
return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
}
static void _iwl_mld_select_links(struct iwl_mld *mld,
struct ieee80211_vif *vif);
void iwl_mld_trigger_link_selection(struct iwl_mld *mld,
struct ieee80211_vif *vif)
{
bool last_scan_was_recent =
time_before(jiffies, mld->scan.last_mlo_scan_jiffies +
IWL_MLD_SCAN_EXPIRE_TIME);
if (last_scan_was_recent) {
IWL_DEBUG_EHT(mld, "MLO scan was recent, skip.\n");
_iwl_mld_select_links(mld, vif);
} else {
IWL_DEBUG_EHT(mld, "Doing link selection after MLO scan\n");
iwl_mld_int_mlo_scan(mld, vif);
}
}
void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
lockdep_assert_wiphy(mld->wiphy);
if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
return;
if (!(mld_vif->emlsr.blocked_reasons & reason))
return;
mld_vif->emlsr.blocked_reasons &= ~reason;
IWL_DEBUG_INFO(mld,
"Unblocking EMLSR mode. reason = %s (0x%x)\n",
iwl_mld_get_emlsr_blocked_string(reason), reason);
iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
wiphy_delayed_work_queue(mld_vif->mld->wiphy,
&mld_vif->emlsr.check_tpt_wk,
round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
if (mld_vif->emlsr.blocked_reasons)
return;
IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
iwl_mld_trigger_link_selection(mld, vif);
}
static void
iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_esr_mode_notif *notif = (void *)data;
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
switch (le32_to_cpu(notif->action)) {
case ESR_RECOMMEND_ENTER:
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_FW);
break;
case ESR_RECOMMEND_LEAVE:
iwl_mld_block_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_FW,
iwl_mld_get_primary_link(vif));
break;
case ESR_FORCE_LEAVE:
default:
/* ESR_FORCE_LEAVE should not happen at this point */
IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
le32_to_cpu(notif->action));
}
}
void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_iter_emlsr_mode_notif,
pkt->data);
}
static void
iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
ieee80211_connection_loss(vif);
}
void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
u32 fw_link_id = le32_to_cpu(notif->link_id);
struct ieee80211_bss_conf *bss_conf =
iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
le32_to_cpu(notif->activation) ? "enter" : "exit",
bss_conf ? bss_conf->link_id : -1,
le32_to_cpu(notif->link_id),
le32_to_cpu(notif->err_code));
if (IWL_FW_CHECK(mld, !bss_conf,
"FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
le32_to_cpu(notif->activation) ? "" : "de",
fw_link_id)) {
ieee80211_iterate_active_interfaces_mtx(
mld->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_iter_disconnect_emlsr, NULL);
return;
}
/* Disconnect if we failed to deactivate a link */
if (!le32_to_cpu(notif->activation)) {
ieee80211_connection_loss(bss_conf->vif);
return;
}
/*
* We failed to activate the second link, go back to the link specified
* by the firmware as that is the one that is still valid now.
*/
iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
bss_conf->link_id);
}
/* Active non-station link tracking */
static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int *count = _data;
if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
return;
*count += iwl_mld_count_active_links(mld_vif->mld, vif);
}
struct iwl_mld_update_emlsr_block_data {
bool block;
int result;
};
static void
iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_update_emlsr_block_data *data = _data;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
if (data->block) {
ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_NON_BSS,
iwl_mld_get_primary_link(vif));
if (ret)
data->result = ret;
} else {
iwl_mld_unblock_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_NON_BSS);
}
}
int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
int pending_link_changes)
{
/* An active link of a non-station vif blocks EMLSR. Upon activation
* block EMLSR on the bss vif. Upon deactivation, check if this link
* was the last non-station link active, and if so unblock the bss vif
*/
struct iwl_mld_update_emlsr_block_data block_data = {};
int count = pending_link_changes;
/* No need to count if we are activating a non-BSS link */
if (count <= 0)
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_count_non_bss_links,
&count);
/*
* We could skip updating it if the block change did not change (and
* pending_link_changes is non-zero).
*/
block_data.block = !!count;
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_iter_update_emlsr_non_bss_block,
&block_data);
return block_data.result;
}
#define EMLSR_SEC_LINK_MIN_PERC 10
#define EMLSR_MIN_TX 3000
#define EMLSR_MIN_RX 400
void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
emlsr.check_tpt_wk.work);
struct ieee80211_vif *vif =
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
struct iwl_mld *mld = mld_vif->mld;
struct iwl_mld_sta *mld_sta;
struct iwl_mld_link *sec_link;
unsigned long total_tx = 0, total_rx = 0;
unsigned long sec_link_tx = 0, sec_link_rx = 0;
u8 sec_link_tx_perc, sec_link_rx_perc;
s8 sec_link_id;
if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
return;
mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
/* We only count for the AP sta in a MLO connection */
if (!mld_sta->mpdu_counters)
return;
/* This wk should only run when the TPT blocker isn't set.
* When the blocker is set, the decision to remove it, as well as
* clearing the counters is done in DP (to avoid having a wk every
* 5 seconds when idle. When the blocker is unset, we are not idle anyway)
*/
if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
return;
/*
* TPT is unblocked, need to check if the TPT criteria is still met.
*
* If EMLSR is active, then we also need to check the secondar link
* requirements.
*/
if (iwl_mld_emlsr_active(vif)) {
sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
if (WARN_ON_ONCE(!sec_link))
return;
/* We need the FW ID here */
sec_link_id = sec_link->fw_id;
} else {
sec_link_id = -1;
}
/* Sum up RX and TX MPDUs from the different queues/links */
for (int q = 0; q < mld->trans->num_rx_queues; q++) {
struct iwl_mld_per_q_mpdu_counter *queue_counter =
&mld_sta->mpdu_counters[q];
spin_lock_bh(&queue_counter->lock);
/* The link IDs that doesn't exist will contain 0 */
for (int link = 0;
link < ARRAY_SIZE(queue_counter->per_link);
link++) {
total_tx += queue_counter->per_link[link].tx;
total_rx += queue_counter->per_link[link].rx;
}
if (sec_link_id != -1) {
sec_link_tx += queue_counter->per_link[sec_link_id].tx;
sec_link_rx += queue_counter->per_link[sec_link_id].rx;
}
memset(queue_counter->per_link, 0,
sizeof(queue_counter->per_link));
spin_unlock_bh(&queue_counter->lock);
}
IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
total_tx, total_rx);
/* If we don't have enough MPDUs - exit EMLSR */
if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
iwl_mld_get_primary_link(vif));
return;
}
/* EMLSR is not active */
if (sec_link_id == -1)
return;
IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
sec_link_id, sec_link_tx, sec_link_rx);
/* Calculate the percentage of the secondary link TX/RX */
sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
/*
* The TX/RX percentage is checked only if it exceeds the required
* minimum. In addition, RX is checked only if the TX check failed.
*/
if ((total_tx > EMLSR_MIN_TX &&
sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
(total_rx > EMLSR_MIN_RX &&
sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
iwl_mld_get_primary_link(vif));
return;
}
/* Check again when the next window ends */
wiphy_delayed_work_queue(mld_vif->mld->wiphy,
&mld_vif->emlsr.check_tpt_wk,
round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
}
void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
emlsr.unblock_tpt_wk);
struct ieee80211_vif *vif =
container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
}
/*
* Link selection
*/
struct iwl_mld_link_sel_data {
u8 link_id;
const struct cfg80211_chan_def *chandef;
s32 signal;
u16 grade;
};
s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
const struct cfg80211_chan_def *chandef,
bool low)
{
if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
chandef->chan->band != NL80211_BAND_5GHZ &&
chandef->chan->band != NL80211_BAND_6GHZ))
return S8_MAX;
#define RSSI_THRESHOLD(_low, _bw) \
(_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \
: IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
/* 320 MHz has the same thresholds as 20 MHz */
case NL80211_CHAN_WIDTH_320:
return RSSI_THRESHOLD(low, 20);
case NL80211_CHAN_WIDTH_40:
return RSSI_THRESHOLD(low, 40);
case NL80211_CHAN_WIDTH_80:
return RSSI_THRESHOLD(low, 80);
case NL80211_CHAN_WIDTH_160:
return RSSI_THRESHOLD(low, 160);
default:
WARN_ON(1);
return S8_MAX;
}
#undef RSSI_THRESHOLD
}
static u32
iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *link,
bool primary)
{
struct wiphy *wiphy = mld->wiphy;
struct ieee80211_bss_conf *conf;
enum iwl_mld_emlsr_exit ret = 0;
conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
if (WARN_ON_ONCE(!conf))
return false;
if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
if (link->signal <
iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
if (conf->csa_active)
ret |= IWL_MLD_EMLSR_EXIT_CSA;
if (ret) {
IWL_DEBUG_INFO(mld,
"Link %d is not allowed for EMLSR as %s\n",
link->link_id,
primary ? "primary" : "secondary");
iwl_mld_print_emlsr_exit(mld, ret);
}
return ret;
}
static u8
iwl_mld_set_link_sel_data(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *data,
unsigned long usable_links,
u8 *best_link_idx)
{
u8 n_data = 0;
u16 max_grade = 0;
unsigned long link_id;
/*
* TODO: don't select links that weren't discovered in the last scan
* This requires mac80211 (or cfg80211) changes to forward/track when
* a BSS was last updated. cfg80211 already tracks this information but
* it is not exposed within the kernel.
*/
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, link_id);
if (WARN_ON_ONCE(!link_conf))
continue;
/* Ignore any BSS that was not seen in the last 5 seconds */
if (ktime_before(link_conf->bss->ts_boottime,
ktime_sub_ns(ktime_get_boottime_ns(),
(u64)5 * NSEC_PER_SEC)))
continue;
data[n_data].link_id = link_id;
data[n_data].chandef = &link_conf->chanreq.oper;
data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
if (n_data == 0 || data[n_data].grade > max_grade) {
max_grade = data[n_data].grade;
*best_link_idx = n_data;
}
n_data++;
}
return n_data;
}
static bool
iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *a,
struct iwl_mld_link_sel_data *b)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld *mld = mld_vif->mld;
enum iwl_mld_emlsr_exit ret = 0;
/* Per-link considerations */
if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
return false;
if (a->chandef->chan->band == b->chandef->chan->band) {
ret |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
} else if (a->chandef->width != b->chandef->width) {
/* TODO: task=EMLSR task=statistics
* replace BANDWIDTH exit reason with channel load criteria
*/
ret |= IWL_MLD_EMLSR_EXIT_BANDWIDTH;
}
if (ret) {
IWL_DEBUG_INFO(mld,
"Links %d and %d are not a valid pair for EMLSR\n",
a->link_id, b->link_id);
IWL_DEBUG_INFO(mld,
"Links bandwidth are: %d and %d\n",
nl80211_chan_width_to_mhz(a->chandef->width),
nl80211_chan_width_to_mhz(b->chandef->width));
iwl_mld_print_emlsr_exit(mld, ret);
return false;
}
return true;
}
/* Calculation is done with fixed-point with a scaling factor of 1/256 */
#define SCALE_FACTOR 256
/*
* Returns the combined grade of two given links.
* Returns 0 if EMLSR is not allowed with these 2 links.
*/
static
unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *a,
struct iwl_mld_link_sel_data *b,
u8 *primary_id)
{
struct ieee80211_bss_conf *primary_conf;
struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
unsigned int primary_load;
lockdep_assert_wiphy(wiphy);
/* a is always primary, b is always secondary */
if (b->grade > a->grade)
swap(a, b);
*primary_id = a->link_id;
if (!iwl_mld_valid_emlsr_pair(vif, a, b))
return 0;
primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
if (WARN_ON_ONCE(!primary_conf))
return 0;
primary_load = iwl_mld_get_chan_load(mld, primary_conf);
/* The more the primary link is loaded, the more worthwhile EMLSR becomes */
return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
}
static void _iwl_mld_select_links(struct iwl_mld *mld,
struct ieee80211_vif *vif)
{
struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
struct iwl_mld_link_sel_data *best_link;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int max_active_links = iwl_mld_max_active_links(mld, vif);
u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
u8 best_idx, new_primary, n_data;
u16 max_grade;
lockdep_assert_wiphy(mld->wiphy);
if (!mld_vif->authorized || hweight16(usable_links) <= 1)
return;
if (WARN(time_before(mld->scan.last_mlo_scan_jiffies,
jiffies - IWL_MLD_SCAN_EXPIRE_TIME),
"Last MLO scan was too long ago, can't select links\n"))
return;
/* The logic below is simple and not suited for more than 2 links */
WARN_ON_ONCE(max_active_links > 2);
n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
&best_idx);
if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
return;
/* Default to selecting the single best link */
best_link = &data[best_idx];
new_primary = best_link->link_id;
new_active = BIT(best_link->link_id);
max_grade = best_link->grade;
/* If EMLSR is not possible, activate the best link */
if (max_active_links == 1 || n_data == 1 ||
!iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
mld_vif->emlsr.blocked_reasons)
goto set_active;
/* Try to find the best link combination */
for (u8 a = 0; a < n_data; a++) {
for (u8 b = a + 1; b < n_data; b++) {
u8 best_in_pair;
u16 emlsr_grade =
iwl_mld_get_emlsr_grade(mld, vif,
&data[a], &data[b],
&best_in_pair);
/*
* Prefer (new) EMLSR combination to prefer EMLSR over
* a single link.
*/
if (emlsr_grade < max_grade)
continue;
max_grade = emlsr_grade;
new_primary = best_in_pair;
new_active = BIT(data[a].link_id) |
BIT(data[b].link_id);
}
}
set_active:
IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
new_active, new_primary);
mld_vif->emlsr.selected_primary = new_primary;
mld_vif->emlsr.selected_links = new_active;
ieee80211_set_active_links_async(vif, new_active);
}
static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld *mld = mld_vif->mld;
_iwl_mld_select_links(mld, vif);
}
void iwl_mld_select_links(struct iwl_mld *mld)
{
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_iter_select_links,
NULL);
}
void iwl_mld_emlsr_check_equal_bw(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
u8 other_link_id = iwl_mld_get_other_link(vif, link->link_id);
struct ieee80211_bss_conf *other_link =
link_conf_dereference_check(vif, other_link_id);
if (!ieee80211_vif_link_active(vif, link->link_id) ||
!iwl_mld_emlsr_active(vif) ||
WARN_ON(link->link_id == other_link_id || !other_link))
return;
if (link->chanreq.oper.width != other_link->chanreq.oper.width)
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BANDWIDTH,
iwl_mld_get_primary_link(vif));
}
static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld *mld = mld_vif->mld;
struct ieee80211_bss_conf *link;
unsigned int link_id;
if (!mld->bt_is_active) {
iwl_mld_retry_emlsr(mld, vif);
return;
}
/* BT is turned ON but we are not in EMLSR, nothing to do */
if (!iwl_mld_emlsr_active(vif))
return;
/* In EMLSR and BT is turned ON */
for_each_vif_active_link(vif, link, link_id) {
if (WARN_ON(!link->chanreq.oper.chan))
continue;
if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
iwl_mld_get_primary_link(vif));
return;
}
}
}
void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
{
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_emlsr_check_bt_iter,
NULL);
}
static void iwl_mld_emlsr_check_chan_load_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld *mld = (struct iwl_mld *)_data;
struct ieee80211_bss_conf *prim_link;
unsigned int prim_link_id;
int chan_load;
if (!iwl_mld_emlsr_active(vif))
return;
prim_link_id = iwl_mld_get_primary_link(vif);
prim_link = link_conf_dereference_protected(vif, prim_link_id);
if (WARN_ON(!prim_link))
return;
chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link, true);
if (chan_load < 0)
return;
/* chan_load is in range [0,255] */
if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_CHAN_LOAD_THRESH))
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
prim_link_id);
}
void iwl_mld_emlsr_check_chan_load(struct iwl_mld *mld)
{
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_emlsr_check_chan_load_iter,
(void *)(uintptr_t)mld);
}
void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) ||
mld_vif->emlsr.blocked_reasons)
return;
iwl_mld_trigger_link_selection(mld, vif);
}

View File

@ -0,0 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_mlo_h__
#define __iwl_mld_mlo_h__
#include <linux/ieee80211.h>
#include <linux/types.h>
#include <net/mac80211.h>
#include "iwl-config.h"
#include "iwl-trans.h"
#include "iface.h"
struct iwl_mld;
void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk);
void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
struct wiphy_work *wk);
static inline bool iwl_mld_emlsr_active(struct ieee80211_vif *vif)
{
/* Set on phy context activation, so should be a good proxy */
return !!(vif->driver_flags & IEEE80211_VIF_EML_ACTIVE);
}
static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
/* We only track/permit EMLSR state once authorized */
if (!mld_vif->authorized)
return false;
/* No EMLSR on dual radio devices */
return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION &&
ieee80211_vif_is_mld(vif) &&
vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP &&
!CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id);
}
static inline int
iwl_mld_max_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
if (vif->type == NL80211_IFTYPE_AP)
return mld->fw->ucode_capa.num_beacons;
if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
return IWL_FW_MAX_ACTIVE_LINKS_NUM;
/* For now, do not accept more links on other interface types */
return 1;
}
static inline int
iwl_mld_count_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld_link *mld_link;
int n_active = 0;
for_each_mld_vif_valid_link(mld_vif, mld_link) {
if (rcu_access_pointer(mld_link->chan_ctx))
n_active++;
}
return n_active;
}
static inline u8 iwl_mld_get_primary_link(struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
lockdep_assert_wiphy(mld_vif->mld->wiphy);
if (!ieee80211_vif_is_mld(vif) || WARN_ON(!vif->active_links))
return 0;
/* In AP mode, there is no primary link */
if (vif->type == NL80211_IFTYPE_AP)
return __ffs(vif->active_links);
if (iwl_mld_emlsr_active(vif) &&
!WARN_ON(!(BIT(mld_vif->emlsr.primary) & vif->active_links)))
return mld_vif->emlsr.primary;
return __ffs(vif->active_links);
}
/*
* For non-MLO/single link, this will return the deflink/single active link,
* respectively
*/
static inline u8 iwl_mld_get_other_link(struct ieee80211_vif *vif, u8 link_id)
{
switch (hweight16(vif->active_links)) {
case 0:
return 0;
default:
WARN_ON(1);
fallthrough;
case 1:
return __ffs(vif->active_links);
case 2:
return __ffs(vif->active_links & ~BIT(link_id));
}
}
s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
const struct cfg80211_chan_def *chandef,
bool low);
/* EMLSR block/unblock and exit */
void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_blocked reason);
void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
enum iwl_mld_emlsr_exit exit, u8 link_to_keep);
int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
int pending_link_changes);
void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk);
void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk);
void iwl_mld_select_links(struct iwl_mld *mld);
void iwl_mld_emlsr_check_equal_bw(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
void iwl_mld_emlsr_check_bt(struct iwl_mld *mld);
void iwl_mld_emlsr_check_chan_load(struct iwl_mld *mld);
void iwl_mld_trigger_link_selection(struct iwl_mld *mld,
struct ieee80211_vif *vif);
/**
* iwl_mld_retry_emlsr - Retry entering EMLSR
* @mld: MLD context
* @vif: VIF to retry EMLSR on
*
* Retry entering EMLSR on the given VIF.
* Use this if one of the parameters that can prevent EMLSR has changed.
*/
void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif);
#endif /* __iwl_mld_mlo_h__ */

View File

@ -0,0 +1,758 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mld.h"
#include "notif.h"
#include "scan.h"
#include "iface.h"
#include "mlo.h"
#include "iwl-trans.h"
#include "fw/file.h"
#include "fw/dbg.h"
#include "fw/api/cmdhdr.h"
#include "fw/api/mac-cfg.h"
#include "session-protect.h"
#include "fw/api/time-event.h"
#include "fw/api/tx.h"
#include "fw/api/rs.h"
#include "fw/api/offload.h"
#include "fw/api/stats.h"
#include "fw/api/rfi.h"
#include "fw/api/coex.h"
#include "mcc.h"
#include "link.h"
#include "tx.h"
#include "rx.h"
#include "tlc.h"
#include "agg.h"
#include "mac80211.h"
#include "thermal.h"
#include "roc.h"
#include "stats.h"
#include "coex.h"
#include "time_sync.h"
#include "ftm-initiator.h"
/* Please use this in an increasing order of the versions */
#define CMD_VER_ENTRY(_ver, _struct) \
{ .size = sizeof(struct _struct), .ver = _ver },
#define CMD_VERSIONS(name, ...) \
static const struct iwl_notif_struct_size \
iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ };
#define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context) \
{.cmd_id = WIDE_ID(_grp, _cmd), \
.context = _context, \
.fn = iwl_mld_handle_##_name, \
.sizes = iwl_notif_struct_sizes_##_name, \
.n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \
},
/* Use this for Rx handlers that do not need notification validation */
#define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context) \
{.cmd_id = WIDE_ID(_grp, _cmd), \
.context = _context, \
.fn = iwl_mld_handle_##_name, \
},
#define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context) \
{ .cmd_id = WIDE_ID(_grp, _cmd), \
.context = _context, \
.fn = iwl_mld_handle_##_name, \
.val_fn = iwl_mld_validate_##_name, \
},
#define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member) \
static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \
struct iwl_rx_packet *pkt, \
u32 obj_id) \
{ \
const struct notif_struct *notif = (const void *)pkt->data; \
\
return obj_id == _Generic((notif)->id_member, \
__le32: le32_to_cpu((notif)->id_member), \
__le16: le16_to_cpu((notif)->id_member), \
u8: (notif)->id_member); \
}
static bool iwl_mld_always_cancel(struct iwl_mld *mld,
struct iwl_rx_packet *pkt,
u32 obj_id)
{
return true;
}
/* Currently only defined for the RX_HANDLER_SIZES options. Use this for
* notifications that belong to a specific object, and that should be
* canceled when the object is removed
*/
#define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type) \
{.cmd_id = WIDE_ID(_grp, _cmd), \
/* Only async handlers can be canceled */ \
.context = RX_HANDLER_ASYNC, \
.fn = iwl_mld_handle_##_name, \
.sizes = iwl_notif_struct_sizes_##_name, \
.n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \
.obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type, \
.cancel = iwl_mld_cancel_##_name, \
},
#define RX_HANDLER_OF_LINK(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK) \
#define RX_HANDLER_OF_VIF(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF) \
#define RX_HANDLER_OF_STA(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA) \
#define RX_HANDLER_OF_ROC(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC)
#define RX_HANDLER_OF_SCAN(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN)
#define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name) \
RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ)
static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
IWL_DEBUG_INFO(mld,
"MFUART: installed ver: 0x%08x, external ver: 0x%08x\n",
le32_to_cpu(mfuart_notif->installed_ver),
le32_to_cpu(mfuart_notif->external_ver));
IWL_DEBUG_INFO(mld,
"MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n",
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration),
le32_to_cpu(mfuart_notif->image_size));
}
static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
unsigned int link_id = 0;
if (WARN(hweight16(vif->active_links) > 1,
"no support for this notif while in EMLSR 0x%x\n",
vif->active_links))
return;
if (ieee80211_vif_is_mld(vif)) {
link_id = __ffs(vif->active_links);
bss_conf = link_conf_dereference_check(vif, link_id);
}
if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) {
const struct iwl_mu_group_mgmt_notif *notif = _data;
BUILD_BUG_ON(sizeof(notif->membership_status) !=
WLAN_MEMBERSHIP_LEN);
BUILD_BUG_ON(sizeof(notif->user_position) !=
WLAN_USER_POSITION_LEN);
/* MU-MIMO Group Id action frame is little endian. We treat
* the data received from firmware as if it came from the
* action frame, so no conversion is needed.
*/
ieee80211_update_mu_groups(vif, link_id,
(u8 *)&notif->membership_status,
(u8 *)&notif->user_position);
}
}
/* This handler is called in SYNC mode because it needs to be serialized with
* Rx as specified in ieee80211_update_mu_groups()'s documentation.
*/
static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
ieee80211_iterate_active_interfaces_atomic(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_mu_mimo_iface_iterator,
notif);
}
static void
iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
struct ieee80211_rx_status rx_status = {};
struct sk_buff *skb;
u32 size = le32_to_cpu(sb->common.byte_count);
if (size == 0)
return;
if (pkt_len < struct_size(sb, data, size))
return;
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) {
IWL_ERR(mld, "alloc_skb failed\n");
return;
}
/* update rx_status according to the notification's metadata */
rx_status.mactime = le64_to_cpu(sb->common.tsf);
/* TSF as indicated by the firmware is at INA time */
rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
rx_status.device_timestamp = le32_to_cpu(sb->common.system_time);
rx_status.band =
iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band));
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel),
rx_status.band);
/* copy the data */
skb_put_data(skb, sb->data, size);
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
/* pass it as regular rx to mac80211 */
ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
}
static void
iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
u32 link_id = le32_to_cpu(notif->link_id);
struct ieee80211_bss_conf *link_conf =
iwl_mld_fw_id_to_link_conf(mld, link_id);
struct ieee80211_vif *vif;
if (WARN_ON(!link_conf))
return;
vif = link_conf->vif;
IWL_DEBUG_INFO(mld,
"CSA Start Notification with vif type: %d, link_id: %d\n",
vif->type,
link_conf->link_id);
switch (vif->type) {
case NL80211_IFTYPE_AP:
/* We don't support canceling a CSA as it was advertised
* by the AP itself
*/
if (!link_conf->csa_active)
return;
ieee80211_csa_finish(vif, link_conf->link_id);
break;
case NL80211_IFTYPE_STATION:
if (!link_conf->csa_active) {
/* Either unexpected cs notif or mac80211 chose to
* ignore, for example in channel switch to same channel
*/
struct iwl_cancel_channel_switch_cmd cmd = {
.id = cpu_to_le32(link_id),
};
if (iwl_mld_send_cmd_pdu(mld,
WIDE_ID(MAC_CONF_GROUP,
CANCEL_CHANNEL_SWITCH_CMD),
&cmd))
IWL_ERR(mld,
"Failed to cancel the channel switch\n");
return;
}
ieee80211_chswitch_done(vif, true, link_conf->link_id);
break;
default:
WARN(1, "CSA on invalid vif type: %d", vif->type);
}
}
static void
iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
struct ieee80211_bss_conf *link_conf;
struct ieee80211_vif *vif;
u32 link_id = le32_to_cpu(notif->link_id);
u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id);
if (WARN_ON(!link_conf))
return;
vif = link_conf->vif;
IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n",
link_id, csa_err_mask);
if (csa_err_mask & (CS_ERR_COUNT_ERROR |
CS_ERR_LONG_DELAY_AFTER_CS |
CS_ERR_TX_BLOCK_TIMER_EXPIRED))
ieee80211_channel_switch_disconnect(vif);
}
static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
mld->ibss_manager = !!beacon->ibss_mgr_status;
}
/**
* DOC: Notification versioning
*
* The firmware's notifications change from time to time. In order to
* differentiate between different versions of the same notification, the
* firmware advertises the version of each notification.
* Here are listed all the notifications that are supported. Several versions
* of the same notification can be allowed at the same time:
*
* CMD_VERSION(my_multi_version_notif,
* CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1)
* CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2)
*
* etc...
*
* The driver will enforce that the notification coming from the firmware
* has its version listed here and it'll also enforce that the firmware sent
* at least enough bytes to cover the structure listed in the CMD_VER_ENTRY.
*/
CMD_VERSIONS(scan_complete_notif,
CMD_VER_ENTRY(1, iwl_umac_scan_complete))
CMD_VERSIONS(scan_iter_complete_notif,
CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif))
CMD_VERSIONS(mfuart_notif,
CMD_VER_ENTRY(2, iwl_mfuart_load_notif))
CMD_VERSIONS(update_mcc,
CMD_VER_ENTRY(1, iwl_mcc_chub_notif))
CMD_VERSIONS(session_prot_notif,
CMD_VER_ENTRY(3, iwl_session_prot_notif))
CMD_VERSIONS(missed_beacon_notif,
CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
CMD_VERSIONS(tx_resp_notif,
CMD_VER_ENTRY(8, iwl_tx_resp))
CMD_VERSIONS(compressed_ba_notif,
CMD_VER_ENTRY(5, iwl_compressed_ba_notif))
CMD_VERSIONS(tlc_notif,
CMD_VER_ENTRY(3, iwl_tlc_update_notif))
CMD_VERSIONS(mu_mimo_grp_notif,
CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
CMD_VERSIONS(channel_switch_start_notif,
CMD_VER_ENTRY(3, iwl_channel_switch_start_notif))
CMD_VERSIONS(channel_switch_error_notif,
CMD_VER_ENTRY(2, iwl_channel_switch_error_notif))
CMD_VERSIONS(ct_kill_notif,
CMD_VER_ENTRY(2, ct_kill_notif))
CMD_VERSIONS(temp_notif,
CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
CMD_VERSIONS(stored_beacon_notif,
CMD_VER_ENTRY(4, iwl_stored_beacon_notif))
CMD_VERSIONS(roc_notif,
CMD_VER_ENTRY(1, iwl_roc_notif))
CMD_VERSIONS(probe_resp_data_notif,
CMD_VER_ENTRY(1, iwl_probe_resp_data_notif))
CMD_VERSIONS(datapath_monitor_notif,
CMD_VER_ENTRY(1, iwl_datapath_monitor_notif))
CMD_VERSIONS(stats_oper_notif,
CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper))
CMD_VERSIONS(stats_oper_part1_notif,
CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper))
CMD_VERSIONS(bt_coex_notif,
CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif))
CMD_VERSIONS(beacon_notification,
CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
CMD_VERSIONS(emlsr_mode_notif,
CMD_VER_ENTRY(1, iwl_esr_mode_notif))
CMD_VERSIONS(emlsr_trans_fail_notif,
CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
CMD_VERSIONS(uapsd_misbehaving_ap_notif,
CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif))
CMD_VERSIONS(time_msmt_notif,
CMD_VER_ENTRY(1, iwl_time_msmt_notify))
CMD_VERSIONS(time_sync_confirm_notif,
CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
CMD_VERSIONS(omi_status_notif,
CMD_VER_ENTRY(1, iwl_omi_send_status_notif))
CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(9, iwl_tof_range_rsp_ntfy))
DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id)
DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id)
DEFINE_SIMPLE_CANCELLATION(channel_switch_start,
iwl_channel_switch_start_notif, link_id)
DEFINE_SIMPLE_CANCELLATION(channel_switch_error,
iwl_channel_switch_error_notif, link_id)
DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif,
link_id)
DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity)
DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid)
DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif,
mac_id)
DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif,
mac_id)
#define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel
DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id)
/**
* DOC: Handlers for fw notifications
*
* Here are listed the notifications IDs (including the group ID), the handler
* of the notification and how it should be called:
*
* - RX_HANDLER_SYNC: will be called as part of the Rx path
* - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
*
* This means that if the firmware sends two notifications A and B in that
* order and notification A is RX_HANDLER_ASYNC and notification is
* RX_HANDLER_SYNC, the handler of B will likely be called before the handler
* of A.
*
* This list should be in order of frequency for performance purposes.
* The handler can be one from two contexts, see &iwl_rx_handler_context
*
* A handler can declare that it relies on a specific object in which case it
* can be cancelled in case the object is deleted. In order to use this
* mechanism, a cancellation function is needed. The cancellation function must
* receive an object id (the index of that object in the firmware) and a
* notification payload. It'll return true if that specific notification should
* be cancelled upon the obliteration of the specific instance of the object.
*
* DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function
* that wills simply return true if a given object id matches the object id in
* the firmware notification.
*/
VISIBLE_IF_IWLWIFI_KUNIT
const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif,
RX_HANDLER_SYNC)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif,
RX_HANDLER_SYNC)
RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC,
scan_complete_notif)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC,
scan_iter_complete_notif,
RX_HANDLER_SYNC)
RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION,
match_found_notif, RX_HANDLER_SYNC)
RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
stats_oper_notif, RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
stats_oper_part1_notif, RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION,
mfuart_notif, RX_HANDLER_SYNC)
RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
temp_notif, RX_HANDLER_ASYNC)
RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
session_prot_notif)
RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
missed_beacon_notif)
RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif)
RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
channel_switch_start_notif)
RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
channel_switch_error_notif)
RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
mu_mimo_grp_notif, RX_HANDLER_SYNC)
RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
stored_beacon_notif, RX_HANDLER_SYNC)
RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
probe_resp_data_notif)
RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
ct_kill_notif, RX_HANDLER_ASYNC)
RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF,
datapath_monitor_notif)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc,
RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF,
bt_coex_notif, RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION,
beacon_notification, RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF,
emlsr_mode_notif, RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
emlsr_trans_fail_notif, RX_HANDLER_ASYNC)
RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
uapsd_misbehaving_ap_notif)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
time_msmt_notif, RX_HANDLER_SYNC)
RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
time_sync_confirm_notif, RX_HANDLER_ASYNC)
RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF,
omi_status_notif)
RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
ftm_resp_notif)
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers);
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num);
#endif
static bool
iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
const struct iwl_rx_handler *handler)
{
unsigned int size = iwl_rx_packet_payload_len(pkt);
size_t notif_ver;
/* If n_sizes == 0, it indicates that a validation function may be used
* or that no validation is required.
*/
if (!handler->n_sizes) {
if (handler->val_fn)
return handler->val_fn(mld, pkt);
return true;
}
notif_ver = iwl_fw_lookup_notif_ver(mld->fw,
iwl_cmd_groupid(handler->cmd_id),
iwl_cmd_opcode(handler->cmd_id),
IWL_FW_CMD_VER_UNKNOWN);
for (int i = 0; i < handler->n_sizes; i++) {
if (handler->sizes[i].ver != notif_ver)
continue;
if (IWL_FW_CHECK(mld, size < handler->sizes[i].size,
"unexpected notification 0x%04x size %d, need %d\n",
handler->cmd_id, size, handler->sizes[i].size))
return false;
return true;
}
IWL_FW_CHECK_FAILED(mld,
"notif 0x%04x ver %zu missing expected size, use version %u size\n",
handler->cmd_id, notif_ver,
handler->sizes[handler->n_sizes - 1].ver);
return size < handler->sizes[handler->n_sizes - 1].size;
}
struct iwl_async_handler_entry {
struct list_head list;
struct iwl_rx_cmd_buffer rxb;
const struct iwl_rx_handler *rx_h;
};
static void
iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_DEBUG_HC(mld,
"%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n",
op, iwl_get_cmd_string(mld->trans,
WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
pkt->hdr.group_id, pkt->hdr.cmd,
le16_to_cpu(pkt->hdr.sequence));
}
static void iwl_mld_rx_notif(struct iwl_mld *mld,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt)
{
for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) {
const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i];
struct iwl_async_handler_entry *entry;
if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
continue;
if (!iwl_mld_notif_is_valid(mld, pkt, rx_h))
return;
if (rx_h->context == RX_HANDLER_SYNC) {
rx_h->fn(mld, pkt);
break;
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
/* we can't do much... */
if (!entry)
return;
/* Set the async handler entry */
entry->rxb._page = rxb_steal_page(rxb);
entry->rxb._offset = rxb->_offset;
entry->rxb._rx_page_order = rxb->_rx_page_order;
entry->rx_h = rx_h;
/* Add it to the list and queue the work */
spin_lock(&mld->async_handlers_lock);
list_add_tail(&entry->list, &mld->async_handlers_list);
spin_unlock(&mld->async_handlers_lock);
wiphy_work_queue(mld->hw->wiphy,
&mld->async_handlers_wk);
iwl_mld_log_async_handler_op(mld, "Queued", rxb);
break;
}
iwl_notification_wait_notify(&mld->notif_wait, pkt);
}
void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mld_rx_mpdu(mld, napi, rxb, 0);
else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0);
else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0);
else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
RX_QUEUES_NOTIFICATION)))
iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0);
else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0);
else
iwl_mld_rx_notif(mld, rxb, pkt);
}
void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, unsigned int queue)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
if (unlikely(queue >= mld->trans->num_rx_queues))
return;
if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mld_rx_mpdu(mld, napi, rxb, queue);
else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
RX_QUEUES_NOTIFICATION)))
iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue);
else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue);
}
void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds)
{
struct iwl_async_handler_entry *entry, *tmp;
spin_lock_bh(&mld->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
bool match = false;
for (int i = 0; i < n_cmds; i++) {
if (entry->rx_h->cmd_id == cmds[i]) {
match = true;
break;
}
}
if (!match)
continue;
iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
kfree(entry);
}
spin_unlock_bh(&mld->async_handlers_lock);
}
void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mld *mld =
container_of(wk, struct iwl_mld, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
/* Sync with Rx path with a lock. Remove all the entries from this
* list, add them to a local one (lock free), and then handle them.
*/
spin_lock_bh(&mld->async_handlers_lock);
list_splice_init(&mld->async_handlers_list, &local_list);
spin_unlock_bh(&mld->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb);
entry->rx_h->fn(mld, rxb_addr(&entry->rxb));
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
kfree(entry);
}
}
void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld)
{
struct iwl_async_handler_entry *entry, *tmp;
spin_lock_bh(&mld->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
kfree(entry);
}
spin_unlock_bh(&mld->async_handlers_lock);
}
void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
enum iwl_mld_object_type obj_type,
u32 obj_id)
{
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(cancel_list);
lockdep_assert_wiphy(mld->wiphy);
if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE))
return;
/* Sync with RX path and remove matching entries from the async list */
spin_lock_bh(&mld->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
const struct iwl_rx_handler *rx_h = entry->rx_h;
if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel))
continue;
if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) {
iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb);
list_del(&entry->list);
list_add_tail(&entry->list, &cancel_list);
}
}
spin_unlock_bh(&mld->async_handlers_lock);
/* Free the matching entries outside of the spinlock */
list_for_each_entry_safe(entry, tmp, &cancel_list, list) {
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
kfree(entry);
}
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_notif_h__
#define __iwl_mld_notif_h__
struct iwl_mld;
void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk);
void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld);
enum iwl_mld_object_type {
IWL_MLD_OBJECT_TYPE_NONE,
IWL_MLD_OBJECT_TYPE_LINK,
IWL_MLD_OBJECT_TYPE_STA,
IWL_MLD_OBJECT_TYPE_VIF,
IWL_MLD_OBJECT_TYPE_ROC,
IWL_MLD_OBJECT_TYPE_SCAN,
IWL_MLD_OBJECT_TYPE_FTM_REQ,
};
void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
enum iwl_mld_object_type obj_type,
u32 obj_id);
void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds);
#endif /* __iwl_mld_notif_h__ */

View File

@ -0,0 +1,116 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/mac80211.h>
#include "phy.h"
#include "hcmd.h"
#include "fw/api/phy-ctxt.h"
int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld)
{
int id;
unsigned long used = mld->used_phy_ids;
for_each_clear_bit(id, &used, NUM_PHY_CTX) {
mld->used_phy_ids |= BIT(id);
return id;
}
return -ENOSPC;
}
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_allocate_fw_phy_id);
struct cfg80211_chan_def *
iwl_mld_get_chandef_from_chanctx(struct ieee80211_chanctx_conf *ctx)
{
bool use_def = cfg80211_channel_is_psc(ctx->def.chan) ||
(ctx->def.chan->band == NL80211_BAND_6GHZ &&
ctx->def.width >= NL80211_CHAN_WIDTH_80);
return use_def ? &ctx->def : &ctx->min_def;
}
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_get_chandef_from_chanctx);
static u8
iwl_mld_nl80211_width_to_fw(enum nl80211_chan_width width)
{
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
return IWL_PHY_CHANNEL_MODE20;
case NL80211_CHAN_WIDTH_40:
return IWL_PHY_CHANNEL_MODE40;
case NL80211_CHAN_WIDTH_80:
return IWL_PHY_CHANNEL_MODE80;
case NL80211_CHAN_WIDTH_160:
return IWL_PHY_CHANNEL_MODE160;
case NL80211_CHAN_WIDTH_320:
return IWL_PHY_CHANNEL_MODE320;
default:
WARN(1, "Invalid channel width=%u", width);
return IWL_PHY_CHANNEL_MODE20;
}
}
/* Maps the driver specific control channel position (relative to the center
* freq) definitions to the fw values
*/
u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
u8 ret;
if (offs == 0) {
/* The FW is expected to check the control channel position only
* when in HT/VHT and the channel width is not 20MHz. Return
* this value as the default one.
*/
return 0;
}
/* this results in a value 0-7, i.e. fitting into 0b0111 */
ret = (abs_offs - 10) / 20;
/* But we need the value to be in 0b1011 because 0b0100 is
* IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
* IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
*/
ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
((ret & BIT(2)) << 1);
/* and add the above bit */
ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
return ret;
}
int iwl_mld_phy_fw_action(struct iwl_mld *mld,
struct ieee80211_chanctx_conf *ctx, u32 action)
{
struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
struct cfg80211_chan_def *chandef = &phy->chandef;
struct iwl_phy_context_cmd cmd = {
.id_and_color = cpu_to_le32(phy->fw_id),
.action = cpu_to_le32(action),
.puncture_mask = cpu_to_le16(chandef->punctured),
/* Channel info */
.ci.channel = cpu_to_le32(chandef->chan->hw_value),
.ci.band = iwl_mld_nl80211_band_to_fw(chandef->chan->band),
.ci.width = iwl_mld_nl80211_width_to_fw(chandef->width),
.ci.ctrl_pos = iwl_mld_get_fw_ctrl_pos(chandef),
};
int ret;
if (ctx->ap.chan) {
cmd.sbb_bandwidth =
iwl_mld_nl80211_width_to_fw(ctx->ap.width);
cmd.sbb_ctrl_channel_loc = iwl_mld_get_fw_ctrl_pos(&ctx->ap);
}
ret = iwl_mld_send_cmd_pdu(mld, PHY_CONTEXT_CMD, &cmd);
if (ret)
IWL_ERR(mld, "Failed to send PHY_CONTEXT_CMD ret = %d\n", ret);
return ret;
}

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_phy_h__
#define __iwl_mld_phy_h__
#include "mld.h"
/**
* struct iwl_mld_phy - PHY configuration parameters
*
* @fw_id: fw id of the phy.
* @chandef: the last chandef that mac80211 configured the driver
* with. Used to detect a no-op when the chanctx changes.
* @channel_load_by_us: channel load on this channel caused by
* the NIC itself, as indicated by firmware
*/
struct iwl_mld_phy {
/* Add here fields that need clean up on hw restart */
struct_group(zeroed_on_hw_restart,
u8 fw_id;
struct cfg80211_chan_def chandef;
);
/* And here fields that survive a hw restart */
u32 channel_load_by_us;
};
static inline struct iwl_mld_phy *
iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel)
{
return (void *)channel->drv_priv;
}
/* Cleanup function for struct iwl_mld_phy, will be called in restart */
static inline void
iwl_mld_cleanup_phy(struct iwl_mld *mld, struct iwl_mld_phy *phy)
{
CLEANUP_STRUCT(phy);
}
int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld);
int iwl_mld_phy_fw_action(struct iwl_mld *mld,
struct ieee80211_chanctx_conf *ctx, u32 action);
struct cfg80211_chan_def *
iwl_mld_get_chandef_from_chanctx(struct ieee80211_chanctx_conf *ctx);
u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef);
#endif /* __iwl_mld_phy_h__ */

View File

@ -0,0 +1,395 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/mac80211.h>
#include "mld.h"
#include "hcmd.h"
#include "power.h"
#include "iface.h"
#include "link.h"
#include "constants.h"
static void iwl_mld_vif_ps_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
bool *ps_enable = (bool *)data;
if (vif->type != NL80211_IFTYPE_STATION)
return;
*ps_enable &= vif->cfg.ps;
}
int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3)
{
struct iwl_device_power_cmd cmd = {};
bool enable_ps = false;
if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) {
enable_ps = true;
/* Disable power save if any STA interface has
* power save turned off
*/
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_ps_iterator,
&enable_ps);
}
if (enable_ps)
cmd.flags |=
cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
if (d3)
cmd.flags |=
cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK);
IWL_DEBUG_POWER(mld,
"Sending device power command with flags = 0x%X\n",
cmd.flags);
return iwl_mld_send_cmd_pdu(mld, POWER_TABLE_CMD, &cmd);
}
int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
const struct ieee80211_bss_conf *link_conf,
bool d3)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
.ba_enable_beacon_abort = cpu_to_le32(1),
};
if (ieee80211_vif_type_p2p(link_conf->vif) != NL80211_IFTYPE_STATION)
return 0;
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (iwl_mld_vif_from_mac80211(link_conf->vif)->disable_bf)
return 0;
#endif
if (link_conf->cqm_rssi_thold) {
cmd.bf_energy_delta =
cpu_to_le32(link_conf->cqm_rssi_hyst);
/* fw uses an absolute value for this */
cmd.bf_roaming_state =
cpu_to_le32(-link_conf->cqm_rssi_thold);
}
if (d3)
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
&cmd);
}
int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {};
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
return 0;
return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
&cmd);
}
static bool iwl_mld_power_is_radar(struct iwl_mld *mld,
const struct ieee80211_bss_conf *link_conf)
{
const struct ieee80211_chanctx_conf *chanctx_conf;
chanctx_conf = wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
if (WARN_ON(!chanctx_conf))
return false;
return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
}
static void iwl_mld_power_configure_uapsd(struct iwl_mld *mld,
struct iwl_mld_link *link,
struct iwl_mac_power_cmd *cmd,
bool ps_poll)
{
bool tid_found = false;
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MLD_UAPSD_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd =
cpu_to_le32(IWL_MLD_UAPSD_TX_DATA_TIMEOUT);
/* set advanced pm flag with no uapsd ACs to enable ps-poll */
if (ps_poll) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
return;
}
for (enum ieee80211_ac_numbers ac = IEEE80211_AC_VO;
ac <= IEEE80211_AC_BK;
ac++) {
if (!link->queue_params[ac].uapsd)
continue;
cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK |
POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
cmd->uapsd_ac_flags |= BIT(ac);
/* QNDP TID - the highest TID with no admission control */
if (!tid_found && !link->queue_params[ac].acm) {
tid_found = true;
switch (ac) {
case IEEE80211_AC_VO:
cmd->qndp_tid = 6;
break;
case IEEE80211_AC_VI:
cmd->qndp_tid = 5;
break;
case IEEE80211_AC_BE:
cmd->qndp_tid = 0;
break;
case IEEE80211_AC_BK:
cmd->qndp_tid = 1;
break;
}
}
}
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
BIT(IEEE80211_AC_VI) |
BIT(IEEE80211_AC_BE) |
BIT(IEEE80211_AC_BK))) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval = cpu_to_le16(IWL_MLD_PS_SNOOZE_INTERVAL);
cmd->snooze_window = cpu_to_le16(IWL_MLD_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = mld->hw->uapsd_max_sp_len;
}
static void
iwl_mld_power_config_skip_dtim(struct iwl_mld *mld,
const struct ieee80211_bss_conf *link_conf,
struct iwl_mac_power_cmd *cmd)
{
unsigned int dtimper_tu;
unsigned int dtimper;
unsigned int skip;
dtimper = link_conf->dtim_period ?: 1;
dtimper_tu = dtimper * link_conf->beacon_int;
if (dtimper >= 10 || iwl_mld_power_is_radar(mld, link_conf))
return;
if (WARN_ON(!dtimper_tu))
return;
/* configure skip over dtim up to 900 TU DTIM interval */
skip = max_t(int, 1, 900 / dtimper_tu);
cmd->skip_dtim_periods = skip;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
static void iwl_mld_power_build_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd,
bool d3)
{
int dtimper, bi;
int keep_alive;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf = &vif->bss_conf;
struct iwl_mld_link *link = &mld_vif->deflink;
bool ps_poll = false;
cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
if (ieee80211_vif_is_mld(vif)) {
int link_id;
if (WARN_ON(!vif->active_links))
return;
/* The firmware consumes one single configuration for the vif
* and can't differentiate between links, just pick the lowest
* link_id's configuration and use that.
*/
link_id = __ffs(vif->active_links);
link_conf = link_conf_dereference_check(vif, link_id);
link = iwl_mld_link_dereference_check(mld_vif, link_id);
if (WARN_ON(!link_conf || !link))
return;
}
dtimper = link_conf->dtim_period;
bi = link_conf->beacon_int;
/* Regardless of power management state the driver must set
* keep alive period. FW will use it for sending keep alive NDPs
* immediately after association. Check that keep alive period
* is at least 3 * DTIM
*/
keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
USEC_PER_SEC);
keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
if (!vif->cfg.ps || iwl_mld_tdls_sta_count(mld) > 0)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
/* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */
if (link_conf->beacon_rate &&
(link_conf->beacon_rate->bitrate == 10 ||
link_conf->beacon_rate->bitrate == 60)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
}
if (d3) {
iwl_mld_power_config_skip_dtim(mld, link_conf, cmd);
cmd->rx_data_timeout =
cpu_to_le32(IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
cpu_to_le32(IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT);
} else if (iwl_mld_vif_low_latency(mld_vif) && vif->p2p) {
cmd->tx_data_timeout =
cpu_to_le32(IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT);
cmd->rx_data_timeout =
cpu_to_le32(IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT);
} else {
cmd->rx_data_timeout =
cpu_to_le32(IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout =
cpu_to_le32(IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT);
}
/* uAPSD is only enabled for specific certifications. For those cases,
* mac80211 will allow uAPSD. Always call iwl_mld_power_configure_uapsd
* which will look at what mac80211 is saying.
*/
#ifdef CONFIG_IWLWIFI_DEBUGFS
ps_poll = mld_vif->use_ps_poll;
#endif
iwl_mld_power_configure_uapsd(mld, link, cmd, ps_poll);
}
int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
bool d3)
{
struct iwl_mac_power_cmd cmd = {};
iwl_mld_power_build_cmd(mld, vif, &cmd, d3);
return iwl_mld_send_cmd_pdu(mld, MAC_PM_POWER_TABLE, &cmd);
}
static void
iwl_mld_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
const struct ieee80211_bss_conf *link)
{
u8 i;
/* NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
* we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
*/
BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
ARRAY_SIZE(link->tpe.psd_local[0].power));
/* if not valid, mac80211 puts default (max value) */
for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
cmd->psd_pwr[i] = min(link->tpe.psd_local[0].power[i],
link->tpe.psd_reg_client[0].power[i]);
BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
ARRAY_SIZE(link->tpe.max_local[0].power));
for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
cmd->eirp_pwr[i] = min(link->tpe.max_local[0].power[i],
link->tpe.max_reg_client[0].power[i]);
}
void
iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
struct iwl_txpower_constraints_cmd cmd = {};
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
int ret;
lockdep_assert_wiphy(mld->wiphy);
if (!mld_link->active)
return;
if (link->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
return;
cmd.link_id = cpu_to_le16(mld_link->fw_id);
memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
if (vif->type == NL80211_IFTYPE_AP) {
cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
} else if (link->power_type == IEEE80211_REG_UNSET_AP) {
return;
} else {
cmd.ap_type = cpu_to_le16(link->power_type - 1);
iwl_mld_tpe_sta_cmd_data(&cmd, link);
}
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(PHY_OPS_GROUP,
AP_TX_POWER_CONSTRAINTS_CMD),
&cmd);
if (ret)
IWL_ERR(mld,
"failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
ret);
}
int iwl_mld_set_tx_power(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf,
s16 tx_power)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
IWL_DEV_MAX_TX_POWER : 8 * tx_power;
struct iwl_dev_tx_power_cmd cmd = {
/* Those fields sit on the same place for v9 and v10 */
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
.common.pwr_restriction = cpu_to_le16(u_tx_power),
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
int len = sizeof(cmd.common);
if (WARN_ON(!mld_link))
return -ENODEV;
cmd.common.link_id = cpu_to_le32(mld_link->fw_id);
if (cmd_ver == 10)
len += sizeof(cmd.v10);
else if (cmd_ver == 9)
len += sizeof(cmd.v9);
return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
}

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_power_h__
#define __iwl_mld_power_h__
#include <net/mac80211.h>
#include "mld.h"
int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3);
int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
const struct ieee80211_bss_conf *link_conf,
bool d3);
int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
struct ieee80211_vif *vif);
int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
bool d3);
void
iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
int iwl_mld_set_tx_power(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf,
s16 tx_power);
#endif /* __iwl_mld_power_h__ */

View File

@ -0,0 +1,321 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2025 Intel Corporation
*/
#include "mld.h"
#include "iwl-debug.h"
#include "hcmd.h"
#include "ptp.h"
#include <linux/timekeeping.h>
/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional
* part, which means that a value of 1 in one of those fields actually means
* 2^-16 ppm, and 2^16=65536 is 1 ppm.
*/
#define PTP_SCALE_FACTOR 65536000000ULL
#define IWL_PTP_GP2_WRAP 0x100000000ULL
#define IWL_PTP_WRAP_TIME (3600 * HZ)
#define IWL_PTP_WRAP_THRESHOLD_USEC (5000)
static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2)
{
*gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr);
if (*gp2 == 0x5a5a5a5a)
return -EINVAL;
return 0;
}
static void iwl_mld_ptp_update_new_read(struct iwl_mld *mld, u32 gp2)
{
IWL_DEBUG_PTP(mld, "PTP: last_gp2=%u, new gp2 read=%u\n",
mld->ptp_data.last_gp2, gp2);
/* If the difference is above the threshold, assume it's a wraparound.
* Otherwise assume it's an old read and ignore it.
*/
if (gp2 < mld->ptp_data.last_gp2) {
if (mld->ptp_data.last_gp2 - gp2 <
IWL_PTP_WRAP_THRESHOLD_USEC) {
IWL_DEBUG_PTP(mld,
"PTP: ignore old read (gp2=%u, last_gp2=%u)\n",
gp2, mld->ptp_data.last_gp2);
return;
}
mld->ptp_data.wrap_counter++;
IWL_DEBUG_PTP(mld,
"PTP: wraparound detected (new counter=%u)\n",
mld->ptp_data.wrap_counter);
}
mld->ptp_data.last_gp2 = gp2;
schedule_delayed_work(&mld->ptp_data.dwork, IWL_PTP_WRAP_TIME);
}
u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns)
{
struct ptp_data *data = &mld->ptp_data;
u64 scale_time_gp2_ns = mld->ptp_data.scale_update_gp2 * NSEC_PER_USEC;
u64 res;
u64 diff;
s64 scaled_diff;
lockdep_assert_held(&data->lock);
iwl_mld_ptp_update_new_read(mld,
div64_u64(base_time_ns, NSEC_PER_USEC));
base_time_ns = base_time_ns +
(data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC);
/* It is possible that a GP2 timestamp was received from fw before the
* last scale update.
*/
if (base_time_ns < scale_time_gp2_ns) {
diff = scale_time_gp2_ns - base_time_ns;
scaled_diff = -mul_u64_u64_div_u64(diff,
data->scaled_freq,
PTP_SCALE_FACTOR);
} else {
diff = base_time_ns - scale_time_gp2_ns;
scaled_diff = mul_u64_u64_div_u64(diff,
data->scaled_freq,
PTP_SCALE_FACTOR);
}
IWL_DEBUG_PTP(mld, "base_time=%llu diff ns=%llu scaled_diff_ns=%lld\n",
(unsigned long long)base_time_ns,
(unsigned long long)diff, (long long)scaled_diff);
res = data->scale_update_adj_time_ns + data->delta + scaled_diff;
IWL_DEBUG_PTP(mld, "scale_update_ns=%llu delta=%lld adj=%llu\n",
(unsigned long long)data->scale_update_adj_time_ns,
(long long)data->delta, (unsigned long long)res);
return res;
}
static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
ptp_data.ptp_clock_info);
struct ptp_data *data = &mld->ptp_data;
u32 gp2;
u64 ns;
if (iwl_mld_get_systime(mld, &gp2)) {
IWL_DEBUG_PTP(mld, "PTP: gettime: failed to read systime\n");
return -EIO;
}
spin_lock_bh(&data->lock);
ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
spin_unlock_bh(&data->lock);
*ts = ns_to_timespec64(ns);
return 0;
}
static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
ptp_data.ptp_clock_info);
struct ptp_data *data = &mld->ptp_data;
spin_lock_bh(&data->lock);
data->delta += delta;
IWL_DEBUG_PTP(mld, "delta=%lld, new delta=%lld\n", (long long)delta,
(long long)data->delta);
spin_unlock_bh(&data->lock);
return 0;
}
static int iwl_mld_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
ptp_data.ptp_clock_info);
struct ptp_data *data = &mld->ptp_data;
u32 gp2;
/* Must call iwl_mld_ptp_get_adj_time() before updating
* data->scale_update_gp2 or data->scaled_freq since
* scale_update_adj_time_ns should reflect the previous scaled_freq.
*/
if (iwl_mld_get_systime(mld, &gp2)) {
IWL_DEBUG_PTP(mld, "adjfine: failed to read systime\n");
return -EBUSY;
}
spin_lock_bh(&data->lock);
data->scale_update_adj_time_ns =
iwl_mld_ptp_get_adj_time(mld, gp2 * NSEC_PER_USEC);
data->scale_update_gp2 = gp2;
/* scale_update_adj_time_ns now relects the configured delta, the
* wrap_counter and the previous scaled frequency. Thus delta and
* wrap_counter should be reset, and the scale frequency is updated
* to the new frequency.
*/
data->delta = 0;
data->wrap_counter = 0;
data->scaled_freq = PTP_SCALE_FACTOR + scaled_ppm;
IWL_DEBUG_PTP(mld, "adjfine: scaled_ppm=%ld new=%llu\n",
scaled_ppm, (unsigned long long)data->scaled_freq);
spin_unlock_bh(&data->lock);
return 0;
}
static void iwl_mld_ptp_work(struct work_struct *wk)
{
struct iwl_mld *mld = container_of(wk, struct iwl_mld,
ptp_data.dwork.work);
struct ptp_data *data = &mld->ptp_data;
u32 gp2;
spin_lock_bh(&data->lock);
if (!iwl_mld_get_systime(mld, &gp2))
iwl_mld_ptp_update_new_read(mld, gp2);
else
IWL_DEBUG_PTP(mld, "PTP work: failed to read GP2\n");
spin_unlock_bh(&data->lock);
}
static int
iwl_mld_get_crosstimestamp_fw(struct iwl_mld *mld, u32 *gp2, u64 *sys_time)
{
struct iwl_synced_time_cmd synced_time_cmd = {
.operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH)
};
struct iwl_host_cmd cmd = {
.id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD),
.flags = CMD_WANT_SKB,
.data[0] = &synced_time_cmd,
.len[0] = sizeof(synced_time_cmd),
};
struct iwl_synced_time_rsp *resp;
struct iwl_rx_packet *pkt;
int ret;
u64 gp2_10ns;
wiphy_lock(mld->wiphy);
ret = iwl_mld_send_cmd(mld, &cmd);
wiphy_unlock(mld->wiphy);
if (ret)
return ret;
pkt = cmd.resp_pkt;
if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) {
IWL_DEBUG_PTP(mld, "PTP: Invalid PTM command response\n");
iwl_free_resp(&cmd);
return -EIO;
}
resp = (void *)pkt->data;
gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 |
le32_to_cpu(resp->gp2_timestamp_lo);
*gp2 = div_u64(gp2_10ns, 100);
*sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 |
le32_to_cpu(resp->platform_timestamp_lo);
iwl_free_resp(&cmd);
return ret;
}
static int
iwl_mld_phc_get_crosstimestamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *xtstamp)
{
struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
ptp_data.ptp_clock_info);
struct ptp_data *data = &mld->ptp_data;
int ret = 0;
/* Raw value read from GP2 register in usec */
u32 gp2;
/* GP2 value in ns*/
s64 gp2_ns;
/* System (wall) time */
ktime_t sys_time;
memset(xtstamp, 0, sizeof(struct system_device_crosststamp));
ret = iwl_mld_get_crosstimestamp_fw(mld, &gp2, &sys_time);
if (ret) {
IWL_DEBUG_PTP(mld,
"PTP: fw get_crosstimestamp failed (ret=%d)\n",
ret);
return ret;
}
spin_lock_bh(&data->lock);
gp2_ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
spin_unlock_bh(&data->lock);
IWL_DEBUG_PTP(mld,
"Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n",
gp2, mld->ptp_data.last_gp2, gp2_ns, (s64)sys_time);
/* System monotonic raw time is not used */
xtstamp->device = ns_to_ktime(gp2_ns);
xtstamp->sys_realtime = sys_time;
return ret;
}
void iwl_mld_ptp_init(struct iwl_mld *mld)
{
if (WARN_ON(mld->ptp_data.ptp_clock))
return;
spin_lock_init(&mld->ptp_data.lock);
INIT_DELAYED_WORK(&mld->ptp_data.dwork, iwl_mld_ptp_work);
mld->ptp_data.ptp_clock_info.owner = THIS_MODULE;
mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime;
mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime;
mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine;
mld->ptp_data.scaled_freq = PTP_SCALE_FACTOR;
mld->ptp_data.ptp_clock_info.getcrosststamp =
iwl_mld_phc_get_crosstimestamp;
/* Give a short 'friendly name' to identify the PHC clock */
snprintf(mld->ptp_data.ptp_clock_info.name,
sizeof(mld->ptp_data.ptp_clock_info.name),
"%s", "iwlwifi-PTP");
mld->ptp_data.ptp_clock =
ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev);
if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) {
IWL_ERR(mld, "Failed to register PHC clock (%ld)\n",
PTR_ERR(mld->ptp_data.ptp_clock));
mld->ptp_data.ptp_clock = NULL;
} else {
IWL_INFO(mld, "Registered PHC clock: %s, with index: %d\n",
mld->ptp_data.ptp_clock_info.name,
ptp_clock_index(mld->ptp_data.ptp_clock));
}
}
void iwl_mld_ptp_remove(struct iwl_mld *mld)
{
if (mld->ptp_data.ptp_clock) {
IWL_INFO(mld, "Unregistering PHC clock: %s, with index: %d\n",
mld->ptp_data.ptp_clock_info.name,
ptp_clock_index(mld->ptp_data.ptp_clock));
ptp_clock_unregister(mld->ptp_data.ptp_clock);
mld->ptp_data.ptp_clock = NULL;
mld->ptp_data.last_gp2 = 0;
mld->ptp_data.wrap_counter = 0;
cancel_delayed_work_sync(&mld->ptp_data.dwork);
}
}

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2025 Intel Corporation
*/
#ifndef __iwl_mld_ptp_h__
#define __iwl_mld_ptp_h__
#include <linux/ptp_clock_kernel.h>
/**
* struct ptp_data - PTP hardware clock data
*
* @ptp_clock: struct ptp_clock pointer returned by the ptp_clock_register()
* function.
* @ptp_clock_info: struct ptp_clock_info that describes a PTP hardware clock
* @lock: protects the time adjustments data
* @delta: delta between hardware clock and ptp clock in nanoseconds
* @scale_update_gp2: GP2 time when the scale was last updated
* @scale_update_adj_time_ns: adjusted time when the scale was last updated,
* in nanoseconds
* @scaled_freq: clock frequency offset, scaled to 65536000000
* @last_gp2: the last GP2 reading from the hardware, used for tracking GP2
* wraparounds
* @wrap_counter: number of wraparounds since scale_update_adj_time_ns
* @dwork: worker scheduled every 1 hour to detect workarounds
*/
struct ptp_data {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
spinlock_t lock;
s64 delta;
u32 scale_update_gp2;
u64 scale_update_adj_time_ns;
u64 scaled_freq;
u32 last_gp2;
u32 wrap_counter;
struct delayed_work dwork;
};
void iwl_mld_ptp_init(struct iwl_mld *mld);
void iwl_mld_ptp_remove(struct iwl_mld *mld);
u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns);
#endif /* __iwl_mld_ptp_h__ */

View File

@ -0,0 +1,393 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/dmi.h>
#include "fw/regulatory.h"
#include "fw/acpi.h"
#include "fw/uefi.h"
#include "regulatory.h"
#include "mld.h"
#include "hcmd.h"
void iwl_mld_get_bios_tables(struct iwl_mld *mld)
{
int ret;
iwl_acpi_get_guid_lock_status(&mld->fwrt);
ret = iwl_bios_get_ppag_table(&mld->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mld,
"PPAG BIOS table invalid or unavailable. (%d)\n",
ret);
}
ret = iwl_bios_get_wrds_table(&mld->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mld,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* If not available, don't fail and don't bother with EWRD and
* WGDS
*/
if (!iwl_bios_get_wgds_table(&mld->fwrt)) {
/* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
* available, issue an error, because we can't use SAR
* Geo without basic SAR.
*/
IWL_ERR(mld, "BIOS contains WGDS but no WRDS\n");
}
} else {
ret = iwl_bios_get_ewrd_table(&mld->fwrt);
/* If EWRD is not available, we can still use
* WRDS, so don't fail.
*/
if (ret < 0)
IWL_DEBUG_RADIO(mld,
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
ret);
ret = iwl_bios_get_wgds_table(&mld->fwrt);
if (ret < 0)
IWL_DEBUG_RADIO(mld,
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
ret);
/* we don't fail if the table is not available */
}
ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
if (ret)
IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret);
}
static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
{
u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
u32 n_bands;
__le32 sk = cpu_to_le32(0);
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
cmd.v4.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
/* Only set to South Korea if the table revision is 1 */
if (mld->fwrt.geo_rev == 1)
sk = cpu_to_le32(1);
if (cmd_ver == 5) {
len = sizeof(cmd.v5);
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
cmd.v5.table_revision = sk;
} else if (cmd_ver == 4) {
len = sizeof(cmd.v4);
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
cmd.v4.table_revision = sk;
} else {
return -EOPNOTSUPP;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v4 */
ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v4.table[0][0],
n_bands, BIOS_GEO_MAX_PROFILE_NUM);
/* It is a valid scenario to not support SAR, or miss wgds table,
* but in that case there is no need to send the command.
*/
if (ret)
return 0;
return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
}
int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
__le16 *per_chain;
int ret;
u16 len = sizeof(cmd.common);
u32 n_subbands;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 10) {
len += sizeof(cmd.v10);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = &cmd.v10.per_chain[0][0][0];
cmd.v10.flags =
cpu_to_le32(mld->fwrt.reduced_power_flags);
} else if (cmd_ver == 9) {
len += sizeof(cmd.v9);
n_subbands = IWL_NUM_SUB_BANDS_V1;
per_chain = &cmd.v9.per_chain[0][0];
} else {
return -EOPNOTSUPP;
}
/* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */
ret = iwl_sar_fill_profile(&mld->fwrt, per_chain,
IWL_NUM_CHAIN_TABLES,
n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
return ret;
return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
}
int iwl_mld_init_sar(struct iwl_mld *mld)
{
int chain_a_prof = 1;
int chain_b_prof = 1;
int ret;
/* If no profile was chosen by the user yet, choose profile 1 (WRDS) as
* default for both chains
*/
if (mld->fwrt.sar_chain_a_profile && mld->fwrt.sar_chain_b_profile) {
chain_a_prof = mld->fwrt.sar_chain_a_profile;
chain_b_prof = mld->fwrt.sar_chain_b_profile;
}
ret = iwl_mld_config_sar_profile(mld, chain_a_prof, chain_b_prof);
if (ret < 0)
return ret;
if (ret)
return 0;
return iwl_mld_geo_sar_init(mld);
}
int iwl_mld_init_sgom(struct iwl_mld *mld)
{
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
SAR_OFFSET_MAPPING_TABLE_CMD),
.data[0] = &mld->fwrt.sgom_table,
.len[0] = sizeof(mld->fwrt.sgom_table),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
if (!mld->fwrt.sgom_enabled) {
IWL_DEBUG_RADIO(mld, "SGOM table is disabled\n");
return 0;
}
ret = iwl_mld_send_cmd(mld, &cmd);
if (ret)
IWL_ERR(mld,
"failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
return ret;
}
static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
{
union iwl_ppag_table_cmd cmd = {};
int ret, len;
ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;
IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
&cmd, len);
if (ret < 0)
IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
return ret;
}
int iwl_mld_init_ppag(struct iwl_mld *mld)
{
/* no need to read the table, done in INIT stage */
if (!(iwl_is_ppag_approved(&mld->fwrt)))
return 0;
return iwl_mld_ppag_send_cmd(mld);
}
void iwl_mld_configure_lari(struct iwl_mld *mld)
{
struct iwl_fw_runtime *fwrt = &mld->fwrt;
struct iwl_lari_config_change_cmd cmd = {
.config_bitmap = iwl_get_lari_config_bitmap(fwrt),
};
int ret;
u32 value;
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap =
cpu_to_le32(value &= DSM_UNII4_ALLOW_BITMAP);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret)
cmd.chan_state_active_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
if (!ret)
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
&value);
if (!ret)
cmd.edt_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_wbem(fwrt, &value);
if (!ret)
cmd.oem_320mhz_allow_bitmap = cpu_to_le32(value);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
if (!ret)
cmd.oem_11be_allow_bitmap = cpu_to_le32(value);
if (!cmd.config_bitmap &&
!cmd.oem_uhb_allow_bitmap &&
!cmd.oem_11ax_allow_bitmap &&
!cmd.oem_unii4_allow_bitmap &&
!cmd.chan_state_active_bitmap &&
!cmd.force_disable_channels_bitmap &&
!cmd.edt_bitmap &&
!cmd.oem_320mhz_allow_bitmap &&
!cmd.oem_11be_allow_bitmap)
return;
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.config_bitmap),
le32_to_cpu(cmd.oem_11ax_allow_bitmap));
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x\n",
le32_to_cpu(cmd.oem_unii4_allow_bitmap),
le32_to_cpu(cmd.chan_state_active_bitmap));
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
le32_to_cpu(cmd.oem_uhb_allow_bitmap),
le32_to_cpu(cmd.force_disable_channels_bitmap));
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.edt_bitmap),
le32_to_cpu(cmd.oem_320mhz_allow_bitmap));
IWL_DEBUG_RADIO(mld,
"sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
le32_to_cpu(cmd.oem_11be_allow_bitmap));
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), &cmd);
if (ret)
IWL_DEBUG_RADIO(mld,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
ret);
}
void iwl_mld_init_uats(struct iwl_mld *mld)
{
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
MCC_ALLOWED_AP_TYPE_CMD),
.data[0] = &mld->fwrt.uats_table,
.len[0] = sizeof(mld->fwrt.uats_table),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
if (!mld->fwrt.uats_valid)
return;
ret = iwl_mld_send_cmd(mld, &cmd);
if (ret)
IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
ret);
}
void iwl_mld_init_tas(struct iwl_mld *mld)
{
int ret;
struct iwl_tas_data data = {};
struct iwl_tas_config_cmd cmd = {};
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
IWL_WTAS_BLACK_LIST_MAX);
BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) !=
IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mld->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mld, "TAS not enabled in FW\n");
return;
}
ret = iwl_bios_get_tas_table(&mld->fwrt, &data);
if (ret < 0) {
IWL_DEBUG_RADIO(mld,
"TAS table invalid or unavailable. (%d)\n",
ret);
return;
}
if (!iwl_is_tas_approved()) {
IWL_DEBUG_RADIO(mld,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
&data.block_list_size,
IWL_MCC_US)) ||
(!iwl_add_mcc_to_tas_block_list(data.block_list_array,
&data.block_list_size,
IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mld,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
}
} else {
IWL_DEBUG_RADIO(mld,
"System vendor '%s' is in the approved list.\n",
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
cmd.block_list_size = cpu_to_le16(data.block_list_size);
for (u8 i = 0; i < data.block_list_size; i++)
cmd.block_list_array[i] =
cpu_to_le16(data.block_list_array[i]);
cmd.tas_config_info.table_source = data.table_source;
cmd.tas_config_info.table_revision = data.table_revision;
cmd.tas_config_info.value = cpu_to_le32(data.tas_selection);
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
if (ret)
IWL_DEBUG_RADIO(mld, "failed to send TAS_CONFIG (%d)\n", ret);
}

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_regulatory_h__
#define __iwl_mld_regulatory_h__
#include "mld.h"
void iwl_mld_get_bios_tables(struct iwl_mld *mld);
void iwl_mld_configure_lari(struct iwl_mld *mld);
void iwl_mld_init_uats(struct iwl_mld *mld);
void iwl_mld_init_tas(struct iwl_mld *mld);
int iwl_mld_init_ppag(struct iwl_mld *mld);
int iwl_mld_init_sgom(struct iwl_mld *mld);
int iwl_mld_init_sar(struct iwl_mld *mld);
int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b);
#endif /* __iwl_mld_regulatory_h__ */

View File

@ -0,0 +1,224 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 - 2025 Intel Corporation
*/
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "mld.h"
#include "roc.h"
#include "hcmd.h"
#include "iface.h"
#include "sta.h"
#include "mlo.h"
#include "fw/api/context.h"
#include "fw/api/time-event.h"
#define AUX_ROC_MAX_DELAY MSEC_TO_TU(200)
static void
iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int *result = data;
int ret;
ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
IWL_MLD_EMLSR_BLOCKED_ROC,
iwl_mld_get_primary_link(vif));
if (ret)
*result = ret;
}
int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *channel, int duration,
enum ieee80211_roc_type type)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld_int_sta *aux_sta;
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
};
u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
enum iwl_roc_activity activity;
int ret = 0;
lockdep_assert_wiphy(mld->wiphy);
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_iter_emlsr_block_roc,
&ret);
if (ret)
return ret;
/* TODO: task=Hotspot 2.0 */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n",
vif->type);
return -EOPNOTSUPP;
}
switch (type) {
case IEEE80211_ROC_TYPE_NORMAL:
activity = ROC_ACTIVITY_P2P_DISC;
break;
case IEEE80211_ROC_TYPE_MGMT_TX:
activity = ROC_ACTIVITY_P2P_NEG;
break;
default:
WARN_ONCE(1, "Got an invalid P2P ROC type\n");
return -EINVAL;
}
if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES))
return -EBUSY;
/* No MLO on P2P device */
aux_sta = &mld_vif->deflink.aux_sta;
ret = iwl_mld_add_aux_sta(mld, aux_sta);
if (ret)
return ret;
cmd.activity = cpu_to_le32(activity);
cmd.sta_id = cpu_to_le32(aux_sta->sta_id);
cmd.channel_info.channel = cpu_to_le32(channel->hw_value);
cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band);
cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20;
/* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC
* on the BSS vif
*/
cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY);
cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
&cmd, cmd_len);
if (ret) {
IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
return ret;
}
mld_vif->roc_activity = activity;
return 0;
}
static void
iwl_mld_vif_iter_emlsr_unblock_roc(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_ROC);
}
static void iwl_mld_destroy_roc(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mld_vif *mld_vif)
{
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
ieee80211_iterate_active_interfaces_mtx(mld->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mld_vif_iter_emlsr_unblock_roc,
NULL);
/* wait until every tx has seen that roc_activity has been reset */
synchronize_net();
/* from here, no new tx will be added
* we can flush the Tx on the queues
*/
iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id);
iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf);
}
int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
int ret;
lockdep_assert_wiphy(mld->wiphy);
/* TODO: task=Hotspot 2.0 */
if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
return -EOPNOTSUPP;
/* No roc activity running it's probably already done */
if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES)
return 0;
cmd.activity = cpu_to_le32(mld_vif->roc_activity);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
&cmd, cmd_len);
if (ret)
IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
/* We may have raced with the firmware expiring the ROC instance at
* this very moment. In that case, we can have a notification in the
* async processing queue. However, none can arrive _after_ this as
* ROC_CMD was sent synchronously, i.e. we waited for a response and
* the firmware cannot refer to this ROC after the response. Thus,
* if we just cancel the notification (if there's one) we'll be at a
* clean state for any possible next ROC.
*/
iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_ROC,
mld_vif->roc_activity);
iwl_mld_destroy_roc(mld, vif, mld_vif);
return 0;
}
void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
const struct iwl_roc_notif *notif = (void *)pkt->data;
u32 activity = le32_to_cpu(notif->activity);
/* TODO: task=Hotspot 2.0 - roc can run on BSS */
struct ieee80211_vif *vif = mld->p2p_device_vif;
struct iwl_mld_vif *mld_vif;
if (WARN_ON(!vif))
return;
mld_vif = iwl_mld_vif_from_mac80211(vif);
/* It is possible that the ROC was canceled
* but the notification was already fired.
*/
if (mld_vif->roc_activity != activity)
return;
if (le32_to_cpu(notif->success) &&
le32_to_cpu(notif->started)) {
/* We had a successful start */
ieee80211_ready_on_channel(mld->hw);
} else {
/* ROC was not successful, tell the firmware to remove it */
if (le32_to_cpu(notif->started))
iwl_mld_cancel_roc(mld->hw, vif);
else
iwl_mld_destroy_roc(mld, vif, mld_vif);
/* we need to let know mac80211 about end OR
* an unsuccessful start
*/
ieee80211_remain_on_channel_expired(mld->hw);
}
}

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_roc_h__
#define __iwl_mld_roc_h__
#include <net/mac80211.h>
int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *channel, int duration,
enum ieee80211_roc_type type);
int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
#endif /* __iwl_mld_roc_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_rx_h__
#define __iwl_mld_rx_h__
#include "mld.h"
/**
* enum iwl_mld_internal_rxq_notif_type - RX queue sync notif types
*
* @IWL_MLD_RXQ_EMPTY: empty sync notification
* @IWL_MLD_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/
enum iwl_mld_internal_rxq_notif_type {
IWL_MLD_RXQ_EMPTY,
IWL_MLD_RXQ_NOTIF_DEL_BA,
};
/**
* struct iwl_mld_internal_rxq_notif - @iwl_rxq_sync_cmd internal data.
* This data is echoed by the firmware to all RSS queues and should be DWORD
* aligned. FW is agnostic to the data, so there are no endianness requirements
*
* @type: one of &iwl_mld_internal_rxq_notif_type
* @cookie: unique internal cookie to identify old notifications
* @reserved: reserved for alignment
* @payload: data to send to RX queues based on the type (may be empty)
*/
struct iwl_mld_internal_rxq_notif {
u8 type;
u8 reserved[3];
u32 cookie;
u8 payload[];
} __packed;
/**
* struct iwl_mld_rx_queues_sync - RX queues sync data
*
* @waitq: wait queue for RX queues sync completion
* @cookie: unique id to correlate sync requests with responses
* @state: bitmask representing the sync state of RX queues
* all RX queues bits are set before sending the command, and the
* corresponding queue bit cleared upon handling the notification
*/
struct iwl_mld_rx_queues_sync {
wait_queue_head_t waitq;
u32 cookie;
unsigned long state;
};
void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
enum iwl_mld_internal_rxq_notif_type type,
const void *notif_payload, u32 notif_payload_size);
void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
struct napi_struct *napi,
struct iwl_rx_packet *pkt, int queue);
void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta);
void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
struct iwl_rx_packet *pkt, int queue);
#endif /* __iwl_mld_agg_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_scan_h__
#define __iwl_mld_scan_h__
int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld);
int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies);
void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif);
void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify);
int iwl_mld_sched_scan_start(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies,
int type);
void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
#define WFA_TPC_IE_LEN 9
static inline int iwl_mld_scan_max_template_size(void)
{
#define MAC_HDR_LEN 24
#define DS_IE_LEN 3
#define SSID_IE_LEN 2
/* driver create the 802.11 header, WFA TPC IE, DS parameter and SSID IE */
#define DRIVER_TOTAL_IES_LEN \
(MAC_HDR_LEN + WFA_TPC_IE_LEN + DS_IE_LEN + SSID_IE_LEN)
BUILD_BUG_ON(SCAN_OFFLOAD_PROBE_REQ_SIZE < DRIVER_TOTAL_IES_LEN);
return SCAN_OFFLOAD_PROBE_REQ_SIZE - DRIVER_TOTAL_IES_LEN;
}
void iwl_mld_report_scan_aborted(struct iwl_mld *mld);
enum iwl_mld_scan_status {
IWL_MLD_SCAN_NONE = 0,
IWL_MLD_SCAN_REGULAR = BIT(0),
IWL_MLD_SCAN_SCHED = BIT(1),
IWL_MLD_SCAN_NETDETECT = BIT(2),
IWL_MLD_SCAN_INT_MLO = BIT(3),
};
/* enum iwl_mld_pass_all_sched_results_states - Defines the states for
* handling/passing scheduled scan results to mac80211
* @SCHED_SCAN_PASS_ALL_STATE_DISABLED: Don't pass all scan results, only when
* a match found.
* @SCHED_SCAN_PASS_ALL_STATE_ENABLED: Pass all scan results is enabled
* (no filtering).
* @SCHED_SCAN_PASS_ALL_STATE_FOUND: A scan result is found, pass it on the
* next scan iteration complete notification.
*/
enum iwl_mld_pass_all_sched_results_states {
SCHED_SCAN_PASS_ALL_STATE_DISABLED,
SCHED_SCAN_PASS_ALL_STATE_ENABLED,
SCHED_SCAN_PASS_ALL_STATE_FOUND,
};
/**
* enum iwl_mld_traffic_load - Levels of traffic load
*
* @IWL_MLD_TRAFFIC_LOW: low traffic load
* @IWL_MLD_TRAFFIC_MEDIUM: medium traffic load
* @IWL_MLD_TRAFFIC_HIGH: high traffic load
*/
enum iwl_mld_traffic_load {
IWL_MLD_TRAFFIC_LOW,
IWL_MLD_TRAFFIC_MEDIUM,
IWL_MLD_TRAFFIC_HIGH,
};
/**
* struct iwl_mld_scan - Scan data
* @status: scan status, a combination of %enum iwl_mld_scan_status,
* reflects the %scan.uid_status array.
* @uid_status: array to track the scan status per uid.
* @start_tsf: start time of last scan in TSF of the link that requested
* the scan.
* @last_ebs_failed: true if the last EBS (Energy Based Scan) failed.
* @pass_all_sched_res: see %enum iwl_mld_pass_all_sched_results_states.
* @fw_link_id: the current (regular) scan fw link id, used by scan
* complete notif.
* @traffic_load: traffic load related data
* @traffic_load.last_stats_ts_usec: The timestamp of the last statistics
* notification, used to calculate the elapsed time between two
* notifications and determine the traffic load
* @traffic_load.status: The current traffic load status, see
* &enum iwl_mld_traffic_load
* @cmd_size: size of %cmd.
* @cmd: pointer to scan cmd buffer (allocated once in op mode start).
* @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time
* in jiffies.
* @last_start_time_jiffies: stores the last start time in jiffies
* (interface up/reset/resume).
* @last_mlo_scan_jiffies: end time of the last MLO scan in jiffies.
*/
struct iwl_mld_scan {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
unsigned int status;
u32 uid_status[IWL_MAX_UMAC_SCANS];
u64 start_tsf;
bool last_ebs_failed;
enum iwl_mld_pass_all_sched_results_states pass_all_sched_res;
u8 fw_link_id;
struct {
u32 last_stats_ts_usec;
enum iwl_mld_traffic_load status;
} traffic_load;
);
/* And here fields that survive a fw restart */
size_t cmd_size;
void *cmd;
unsigned long last_6ghz_passive_jiffies;
unsigned long last_start_time_jiffies;
unsigned long last_mlo_scan_jiffies;
};
#endif /* __iwl_mld_scan_h__ */

View File

@ -0,0 +1,222 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
*/
#include "session-protect.h"
#include "fw/api/time-event.h"
#include "fw/api/context.h"
#include "iface.h"
#include <net/mac80211.h>
void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_session_prot_notif *notif = (void *)pkt->data;
int fw_link_id = le32_to_cpu(notif->mac_link_id);
struct ieee80211_bss_conf *link_conf =
iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
struct ieee80211_vif *vif;
struct iwl_mld_vif *mld_vif;
struct iwl_mld_session_protect *session_protect;
if (WARN_ON(!link_conf))
return;
vif = link_conf->vif;
mld_vif = iwl_mld_vif_from_mac80211(vif);
session_protect = &mld_vif->session_protect;
if (!le32_to_cpu(notif->status)) {
memset(session_protect, 0, sizeof(*session_protect));
} else if (le32_to_cpu(notif->start)) {
/* End_jiffies indicates an active session */
session_protect->session_requested = false;
session_protect->end_jiffies =
TU_TO_EXP_TIME(session_protect->duration);
/* !session_protect->end_jiffies means inactive session */
if (!session_protect->end_jiffies)
session_protect->end_jiffies = 1;
} else {
memset(session_protect, 0, sizeof(*session_protect));
}
}
static int _iwl_mld_schedule_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
int link_id)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld_link *link =
iwl_mld_link_dereference_check(mld_vif, link_id);
struct iwl_mld_session_protect *session_protect =
&mld_vif->session_protect;
struct iwl_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(link->fw_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
int ret;
lockdep_assert_wiphy(mld->wiphy);
WARN(hweight16(vif->active_links) > 1,
"Session protection isn't allowed with more than one active link");
if (session_protect->end_jiffies &&
time_after(session_protect->end_jiffies,
TU_TO_EXP_TIME(min_duration))) {
IWL_DEBUG_TE(mld, "We have ample in the current session: %u\n",
jiffies_to_msecs(session_protect->end_jiffies -
jiffies));
return -EALREADY;
}
IWL_DEBUG_TE(mld, "Add a new session protection, duration %d TU\n",
le32_to_cpu(cmd.duration_tu));
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP,
SESSION_PROTECTION_CMD), &cmd);
if (ret)
return ret;
/* end_jiffies will be updated when handling session_prot_notif */
session_protect->end_jiffies = 0;
session_protect->duration = duration;
session_protect->session_requested = true;
return 0;
}
void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
int link_id)
{
int ret;
ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
min_duration, link_id);
if (ret && ret != -EALREADY)
IWL_ERR(mld,
"Couldn't send the SESSION_PROTECTION_CMD (%d)\n",
ret);
}
struct iwl_mld_session_start_data {
struct iwl_mld *mld;
struct ieee80211_bss_conf *link_conf;
bool success;
};
static bool iwl_mld_session_start_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *_data)
{
struct iwl_session_prot_notif *notif = (void *)pkt->data;
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
struct iwl_mld_session_start_data *data = _data;
struct ieee80211_bss_conf *link_conf;
struct iwl_mld *mld = data->mld;
int fw_link_id;
if (IWL_FW_CHECK(mld, pkt_len < sizeof(*notif),
"short session prot notif (%d)\n",
pkt_len))
return false;
fw_link_id = le32_to_cpu(notif->mac_link_id);
link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
if (link_conf != data->link_conf)
return false;
if (!le32_to_cpu(notif->status))
return true;
if (notif->start) {
data->success = true;
return true;
}
return false;
}
int iwl_mld_start_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
int link_id, unsigned long timeout)
{
static const u16 start_notif[] = { SESSION_PROTECTION_NOTIF };
struct iwl_notification_wait start_wait;
struct iwl_mld_session_start_data data = {
.mld = mld,
.link_conf = wiphy_dereference(mld->wiphy,
vif->link_conf[link_id]),
};
int ret;
if (WARN_ON(!data.link_conf))
return -EINVAL;
iwl_init_notification_wait(&mld->notif_wait, &start_wait,
start_notif, ARRAY_SIZE(start_notif),
iwl_mld_session_start_fn, &data);
ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
min_duration, link_id);
if (ret) {
iwl_remove_notification(&mld->notif_wait, &start_wait);
return ret == -EALREADY ? 0 : ret;
}
ret = iwl_wait_notification(&mld->notif_wait, &start_wait, timeout);
if (ret)
return ret;
return data.success ? 0 : -EIO;
}
int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
int link_id)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct iwl_mld_link *link =
iwl_mld_link_dereference_check(mld_vif, link_id);
struct iwl_mld_session_protect *session_protect =
&mld_vif->session_protect;
struct iwl_session_prot_cmd cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
};
int ret;
lockdep_assert_wiphy(mld->wiphy);
/* If there isn't an active session or a requested one for this
* link do nothing
*/
if (!session_protect->session_requested &&
!session_protect->end_jiffies)
return 0;
if (WARN_ON(!link))
return -EINVAL;
cmd.id_and_color = cpu_to_le32(link->fw_id);
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(MAC_CONF_GROUP,
SESSION_PROTECTION_CMD), &cmd);
if (ret) {
IWL_ERR(mld,
"Couldn't send the SESSION_PROTECTION_CMD\n");
return ret;
}
memset(session_protect, 0, sizeof(*session_protect));
return 0;
}

View File

@ -0,0 +1,102 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __session_protect_h__
#define __session_protect_h__
#include "mld.h"
#include "hcmd.h"
#include <net/mac80211.h>
#include "fw/api/mac-cfg.h"
/**
* DOC: session protection
*
* Session protection is an API from the firmware that allows the driver to
* request time on medium. This is needed before the association when we need
* to be on medium for the association frame exchange. Once we configure the
* firmware as 'associated', the firmware will allocate time on medium without
* needed a session protection.
*
* TDLS discover uses this API as well even after association to ensure that
* other activities internal to the firmware will not interrupt our presence
* on medium.
*/
/**
* struct iwl_mld_session_protect - session protection parameters
* @end_jiffies: expected end_jiffies of current session protection.
* 0 if not active
* @duration: the duration in tu of current session
* @session_requested: A session protection command was sent and wasn't yet
* answered
*/
struct iwl_mld_session_protect {
unsigned long end_jiffies;
u32 duration;
bool session_requested;
};
#define IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS 900
#define IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS 400
/**
* iwl_mld_handle_session_prot_notif - handles %SESSION_PROTECTION_NOTIF
* @mld: the mld component
* @pkt: the RX packet containing the notification
*/
void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
/**
* iwl_mld_schedule_session_protection - schedule a session protection
* @mld: the mld component
* @vif: the virtual interface for which the protection issued
* @duration: the requested duration of the protection
* @min_duration: the minimum duration of the protection
* @link_id: The link to schedule a session protection for
*/
void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
int link_id);
/**
* iwl_mld_start_session_protection - start a session protection
* @mld: the mld component
* @vif: the virtual interface for which the protection issued
* @duration: the requested duration of the protection
* @min_duration: the minimum duration of the protection
* @link_id: The link to schedule a session protection for
* @timeout: timeout for waiting
*
* This schedules the session protection, and waits for it to start
* (with timeout)
*
* Returns: 0 if successful, error code otherwise
*/
int iwl_mld_start_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
int link_id, unsigned long timeout);
/**
* iwl_mld_cancel_session_protection - cancel the session protection.
* @mld: the mld component
* @vif: the virtual interface for which the session is issued
* @link_id: cancel the session protection for given link
*
* This functions cancels the session protection which is an act of good
* citizenship. If it is not needed any more it should be canceled because
* the other mac contexts wait for the medium during that time.
*
* Returns: 0 if successful, error code otherwise
*
*/
int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
struct ieee80211_vif *vif,
int link_id);
#endif /* __session_protect_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_sta_h__
#define __iwl_mld_sta_h__
#include <net/mac80211.h>
#include "mld.h"
#include "tx.h"
/**
* struct iwl_mld_rxq_dup_data - Duplication detection data, per STA & Rx queue
* @last_seq: last sequence per tid.
* @last_sub_frame_idx: the index of the last subframe in an A-MSDU. This value
* will be zero if the packet is not part of an A-MSDU.
*/
struct iwl_mld_rxq_dup_data {
__le16 last_seq[IWL_MAX_TID_COUNT + 1];
u8 last_sub_frame_idx[IWL_MAX_TID_COUNT + 1];
} ____cacheline_aligned_in_smp;
/**
* struct iwl_mld_link_sta - link-level station
*
* This represents the link-level sta - the driver level equivalent to the
* ieee80211_link_sta
*
* @last_rate_n_flags: rate_n_flags from the last &iwl_tlc_update_notif
* @signal_avg: the signal average coming from the firmware
* @in_fw: whether the link STA is uploaded to the FW (false during restart)
* @rcu_head: RCU head for freeing this object
* @fw_id: the FW id of this link sta.
*/
struct iwl_mld_link_sta {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
u32 last_rate_n_flags;
bool in_fw;
s8 signal_avg;
);
/* And here fields that survive a fw restart */
struct rcu_head rcu_head;
u32 fw_id;
};
#define iwl_mld_link_sta_dereference_check(mld_sta, link_id) \
rcu_dereference_check((mld_sta)->link[link_id], \
lockdep_is_held(&mld_sta->mld->wiphy->mtx))
#define for_each_mld_link_sta(mld_sta, link_sta, link_id) \
for (link_id = 0; link_id < ARRAY_SIZE((mld_sta)->link); \
link_id++) \
if ((link_sta = \
iwl_mld_link_sta_dereference_check(mld_sta, link_id)))
#define IWL_NUM_DEFAULT_KEYS 4
/* struct iwl_mld_ptk_pn - Holds Packet Number (PN) per TID.
* @rcu_head: RCU head for freeing this data.
* @pn: Array storing PN for each TID.
*/
struct iwl_mld_ptk_pn {
struct rcu_head rcu_head;
struct {
u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
} ____cacheline_aligned_in_smp q[];
};
/**
* struct iwl_mld_per_link_mpdu_counter - per-link TX/RX MPDU counters
*
* @tx: Number of TX MPDUs.
* @rx: Number of RX MPDUs.
*/
struct iwl_mld_per_link_mpdu_counter {
u32 tx;
u32 rx;
};
/**
* struct iwl_mld_per_q_mpdu_counter - per-queue MPDU counter
*
* @lock: Needed to protect the counters when modified from statistics.
* @per_link: per-link counters.
* @window_start_time: timestamp of the counting-window start
*/
struct iwl_mld_per_q_mpdu_counter {
spinlock_t lock;
struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1];
unsigned long window_start_time;
} ____cacheline_aligned_in_smp;
/**
* struct iwl_mld_sta - representation of a station in the driver.
*
* This represent the MLD-level sta, and will not be added to the FW.
* Embedded in ieee80211_sta.
*
* @vif: pointer the vif object.
* @sta_state: station state according to enum %ieee80211_sta_state
* @sta_type: type of this station. See &enum iwl_fw_sta_type
* @mld: a pointer to the iwl_mld object
* @dup_data: per queue duplicate packet detection data
* @data_tx_ant: stores the last TX antenna index; used for setting
* TX rate_n_flags for injected data frames (toggles on every TX failure).
* @tid_to_baid: a simple map of TID to Block-Ack fw id
* @deflink: This holds the default link STA information, for non MLO STA all
* link specific STA information is accessed through @deflink or through
* link[0] which points to address of @deflink. For MLO Link STA
* the first added link STA will point to deflink.
* @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
* i.e link[0] all links would be assigned to NULL by default and
* would access link information via @deflink or link[0]. For MLO
* STA, first link STA being added will point its link pointer to
* @deflink address and remaining would be allocated and the address
* would be assigned to link[link_id] where link_id is the id assigned
* by the AP.
* @ptk_pn: Array of pointers to PTK PN data, used to track the Packet Number
* per key index and per queue (TID).
* @mpdu_counters: RX/TX MPDUs counters for each queue.
*/
struct iwl_mld_sta {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
enum ieee80211_sta_state sta_state;
enum iwl_fw_sta_type sta_type;
);
/* And here fields that survive a fw restart */
struct iwl_mld *mld;
struct ieee80211_vif *vif;
struct iwl_mld_rxq_dup_data *dup_data;
u8 tid_to_baid[IWL_MAX_TID_COUNT];
u8 data_tx_ant;
struct iwl_mld_link_sta deflink;
struct iwl_mld_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
struct iwl_mld_ptk_pn __rcu *ptk_pn[IWL_NUM_DEFAULT_KEYS];
struct iwl_mld_per_q_mpdu_counter *mpdu_counters;
};
static inline struct iwl_mld_sta *
iwl_mld_sta_from_mac80211(struct ieee80211_sta *sta)
{
return (void *)sta->drv_priv;
}
static inline void
iwl_mld_cleanup_sta(void *data, struct ieee80211_sta *sta)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld_link_sta *mld_link_sta;
u8 link_id;
for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
CLEANUP_STRUCT(iwl_mld_txq_from_mac80211(sta->txq[i]));
for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) {
CLEANUP_STRUCT(mld_link_sta);
if (!ieee80211_vif_is_mld(mld_sta->vif)) {
/* not an MLD STA; only has the deflink with ID zero */
WARN_ON(link_id);
continue;
}
if (mld_sta->vif->active_links & BIT(link_id))
continue;
/* Should not happen as link removal should always succeed */
WARN_ON(1);
RCU_INIT_POINTER(mld_sta->link[link_id], NULL);
RCU_INIT_POINTER(mld_sta->mld->fw_id_to_link_sta[mld_link_sta->fw_id],
NULL);
if (mld_link_sta != &mld_sta->deflink)
kfree_rcu(mld_link_sta, rcu_head);
}
CLEANUP_STRUCT(mld_sta);
}
static inline struct iwl_mld_link_sta *
iwl_mld_link_sta_from_mac80211(struct ieee80211_link_sta *link_sta)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
return iwl_mld_link_sta_dereference_check(mld_sta, link_sta->link_id);
}
int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, enum iwl_fw_sta_type type);
void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta);
int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
struct ieee80211_link_sta *link_sta);
u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta);
int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
struct ieee80211_sta *sta);
void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta);
void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld,
struct ieee80211_sta *sta);
void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
u32 count);
void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count);
/**
* struct iwl_mld_int_sta - representation of an internal station
* (a station that exist in FW and in driver, but not in mac80211)
*
* @sta_id: the index of the station in the fw
* @queue_id: the if of the queue used by the station
* @sta_type: station type. One of &iwl_fw_sta_type
*/
struct iwl_mld_int_sta {
u8 sta_id;
u32 queue_id;
enum iwl_fw_sta_type sta_type;
};
static inline void
iwl_mld_init_internal_sta(struct iwl_mld_int_sta *internal_sta)
{
internal_sta->sta_id = IWL_INVALID_STA;
internal_sta->queue_id = IWL_MLD_INVALID_QUEUE;
}
static inline void
iwl_mld_free_internal_sta(struct iwl_mld *mld,
struct iwl_mld_int_sta *internal_sta)
{
if (WARN_ON(internal_sta->sta_id == IWL_INVALID_STA))
return;
RCU_INIT_POINTER(mld->fw_id_to_link_sta[internal_sta->sta_id], NULL);
iwl_mld_init_internal_sta(internal_sta);
}
int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
int iwl_mld_add_aux_sta(struct iwl_mld *mld,
struct iwl_mld_int_sta *internal_sta);
void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
int iwl_mld_update_link_stas(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links);
#endif /* __iwl_mld_sta_h__ */

View File

@ -0,0 +1,509 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include "mld.h"
#include "stats.h"
#include "sta.h"
#include "mlo.h"
#include "hcmd.h"
#include "iface.h"
#include "scan.h"
#include "phy.h"
#include "fw/api/stats.h"
static int iwl_mld_send_fw_stats_cmd(struct iwl_mld *mld, u32 cfg_mask,
u32 cfg_time, u32 type_mask)
{
u32 cmd_id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD);
struct iwl_system_statistics_cmd stats_cmd = {
.cfg_mask = cpu_to_le32(cfg_mask),
.config_time_sec = cpu_to_le32(cfg_time),
.type_id_mask = cpu_to_le32(type_mask),
};
return iwl_mld_send_cmd_pdu(mld, cmd_id, &stats_cmd);
}
int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld)
{
u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK;
u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
}
static void
iwl_mld_fill_stats_from_oper_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt,
u8 fw_sta_id, struct station_info *sinfo)
{
const struct iwl_system_statistics_notif_oper *notif =
(void *)&pkt->data;
const struct iwl_stats_ntfy_per_sta *per_sta =
&notif->per_sta[fw_sta_id];
struct ieee80211_link_sta *link_sta;
struct iwl_mld_link_sta *mld_link_sta;
/* 0 isn't a valid value, but FW might send 0.
* In that case, set the latest non-zero value we stored
*/
rcu_read_lock();
link_sta = rcu_dereference(mld->fw_id_to_link_sta[fw_sta_id]);
if (IS_ERR_OR_NULL(link_sta))
goto unlock;
mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
if (WARN_ON(!mld_link_sta))
goto unlock;
if (per_sta->average_energy)
mld_link_sta->signal_avg =
-(s8)le32_to_cpu(per_sta->average_energy);
sinfo->signal_avg = mld_link_sta->signal_avg;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
unlock:
rcu_read_unlock();
}
struct iwl_mld_stats_data {
u8 fw_sta_id;
struct station_info *sinfo;
struct iwl_mld *mld;
};
static bool iwl_mld_wait_stats_handler(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mld_stats_data *stats_data = data;
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
switch (cmd) {
case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF):
iwl_mld_fill_stats_from_oper_notif(stats_data->mld, pkt,
stats_data->fw_sta_id,
stats_data->sinfo);
break;
case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF):
break;
case WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF):
return true;
}
return false;
}
static int
iwl_mld_fw_stats_to_mac80211(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta,
struct station_info *sinfo)
{
u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK |
IWL_STATS_CFG_FLG_RESET_MSK;
u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
static const u16 notifications[] = {
WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF),
WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF),
WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
};
struct iwl_mld_stats_data wait_stats_data = {
/* We don't support drv_sta_statistics in EMLSR */
.fw_sta_id = mld_sta->deflink.fw_id,
.sinfo = sinfo,
.mld = mld,
};
struct iwl_notification_wait stats_wait;
int ret;
iwl_init_notification_wait(&mld->notif_wait, &stats_wait,
notifications, ARRAY_SIZE(notifications),
iwl_mld_wait_stats_handler,
&wait_stats_data);
ret = iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
if (ret) {
iwl_remove_notification(&mld->notif_wait, &stats_wait);
return ret;
}
/* Wait 500ms for OPERATIONAL, PART1, and END notifications,
* which should be sufficient for the firmware to gather data
* from all LMACs and send notifications to the host.
*/
ret = iwl_wait_notification(&mld->notif_wait, &stats_wait, HZ / 2);
if (ret)
return ret;
/* When periodic statistics are sent, FW will clear its statistics DB.
* If the statistics request here happens shortly afterwards,
* the response will contain data collected over a short time
* interval. The response we got here shouldn't be processed by
* the general statistics processing because it's incomplete.
* So, we delete it from the list so it won't be processed.
*/
iwl_mld_delete_handlers(mld, notifications, ARRAY_SIZE(notifications));
return 0;
}
#define PERIODIC_STATS_SECONDS 5
int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable)
{
u32 cfg_mask = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
u32 type_mask = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
u32 cfg_time = enable ? PERIODIC_STATS_SECONDS : 0;
return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, cfg_time, type_mask);
}
static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
struct station_info *sinfo)
{
struct rate_info *rinfo = &sinfo->txrate;
u32 rate_n_flags = mld_sta->deflink.last_rate_n_flags;
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
u32 gi_ltf;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
rinfo->bw = RATE_INFO_BW_20;
break;
case RATE_MCS_CHAN_WIDTH_40:
rinfo->bw = RATE_INFO_BW_40;
break;
case RATE_MCS_CHAN_WIDTH_80:
rinfo->bw = RATE_INFO_BW_80;
break;
case RATE_MCS_CHAN_WIDTH_160:
rinfo->bw = RATE_INFO_BW_160;
break;
case RATE_MCS_CHAN_WIDTH_320:
rinfo->bw = RATE_INFO_BW_320;
break;
}
if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) {
int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
/* add the offset needed to get to the legacy ofdm indices */
if (format == RATE_MCS_LEGACY_OFDM_MSK)
rate += IWL_FIRST_OFDM_RATE;
switch (rate) {
case IWL_RATE_1M_INDEX:
rinfo->legacy = 10;
break;
case IWL_RATE_2M_INDEX:
rinfo->legacy = 20;
break;
case IWL_RATE_5M_INDEX:
rinfo->legacy = 55;
break;
case IWL_RATE_11M_INDEX:
rinfo->legacy = 110;
break;
case IWL_RATE_6M_INDEX:
rinfo->legacy = 60;
break;
case IWL_RATE_9M_INDEX:
rinfo->legacy = 90;
break;
case IWL_RATE_12M_INDEX:
rinfo->legacy = 120;
break;
case IWL_RATE_18M_INDEX:
rinfo->legacy = 180;
break;
case IWL_RATE_24M_INDEX:
rinfo->legacy = 240;
break;
case IWL_RATE_36M_INDEX:
rinfo->legacy = 360;
break;
case IWL_RATE_48M_INDEX:
rinfo->legacy = 480;
break;
case IWL_RATE_54M_INDEX:
rinfo->legacy = 540;
}
return;
}
rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
if (format == RATE_MCS_HT_MSK)
rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags);
else
rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
if (rate_n_flags & RATE_MCS_SGI_MSK)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
case RATE_MCS_EHT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
break;
case RATE_MCS_HE_MSK:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
rinfo->bw = RATE_INFO_BW_HE_RU;
rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
}
switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
case RATE_MCS_HE_TYPE_SU:
case RATE_MCS_HE_TYPE_EXT_SU:
if (gi_ltf == 0 || gi_ltf == 1)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
else if (gi_ltf == 2)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
else if (gi_ltf == 3)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
else
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
break;
case RATE_MCS_HE_TYPE_MU:
if (gi_ltf == 0 || gi_ltf == 1)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
else if (gi_ltf == 2)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
else
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
break;
case RATE_MCS_HE_TYPE_TRIG:
if (gi_ltf == 0 || gi_ltf == 1)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
else
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
break;
}
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
break;
case RATE_MCS_HT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
break;
case RATE_MCS_VHT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
break;
}
}
void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
/* This API is not EMLSR ready, so we cannot provide complete
* information if EMLSR is active
*/
if (hweight16(vif->active_links) > 1)
return;
if (iwl_mld_fw_stats_to_mac80211(mld_sta->mld, mld_sta, sinfo))
return;
iwl_mld_sta_stats_fill_txrate(mld_sta, sinfo);
/* TODO: NL80211_STA_INFO_BEACON_RX */
/* TODO: NL80211_STA_INFO_BEACON_SIGNAL_AVG */
}
#define IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH 10 /* percentage */
#define IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH 50 /* percentage */
#define IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC (500 * 1000)
static u8 iwl_mld_stats_load_percentage(u32 last_ts_usec, u32 curr_ts_usec,
u32 total_airtime_usec)
{
u32 elapsed_usec = curr_ts_usec - last_ts_usec;
if (elapsed_usec < IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC)
return 0;
return (100 * total_airtime_usec / elapsed_usec);
}
static void iwl_mld_stats_recalc_traffic_load(struct iwl_mld *mld,
u32 total_airtime_usec,
u32 curr_ts_usec)
{
u32 last_ts_usec = mld->scan.traffic_load.last_stats_ts_usec;
u8 load_prec;
/* Skip the calculation as this is the first notification received */
if (!last_ts_usec)
goto out;
load_prec = iwl_mld_stats_load_percentage(last_ts_usec, curr_ts_usec,
total_airtime_usec);
if (load_prec > IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH)
mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_HIGH;
else if (load_prec > IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH)
mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_MEDIUM;
else
mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_LOW;
out:
mld->scan.traffic_load.last_stats_ts_usec = curr_ts_usec;
}
static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
struct ieee80211_bss_conf *bss_conf)
{
struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
int exit_emlsr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n");
return;
}
/* TODO: task=statistics handle CQM notifications */
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
/* Handle inactive EMLSR, check whether to switch links */
if (!iwl_mld_emlsr_active(vif)) {
if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
iwl_mld_trigger_link_selection(mld, vif);
return;
}
/* We are in EMLSR, check if we need to exit */
exit_emlsr_thresh =
iwl_mld_get_emlsr_rssi_thresh(mld, &bss_conf->chanreq.oper,
true);
if (sig < exit_emlsr_thresh)
iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LOW_RSSI,
iwl_mld_get_other_link(vif,
bss_conf->link_id));
}
static void
iwl_mld_process_per_link_stats(struct iwl_mld *mld,
const struct iwl_stats_ntfy_per_link *per_link,
u32 curr_ts_usec)
{
u32 total_airtime_usec = 0;
for (u32 fw_id = 0;
fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf);
fw_id++) {
const struct iwl_stats_ntfy_per_link *link_stats;
struct ieee80211_bss_conf *bss_conf;
int sig;
bss_conf = wiphy_dereference(mld->wiphy,
mld->fw_id_to_bss_conf[fw_id]);
if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION)
continue;
link_stats = &per_link[fw_id];
total_airtime_usec += le32_to_cpu(link_stats->air_time);
sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
iwl_mld_update_link_sig(bss_conf->vif, sig, bss_conf);
/* TODO: parse more fields here (task=statistics)*/
}
iwl_mld_stats_recalc_traffic_load(mld, total_airtime_usec,
curr_ts_usec);
}
static void
iwl_mld_process_per_sta_stats(struct iwl_mld *mld,
const struct iwl_stats_ntfy_per_sta *per_sta)
{
for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
struct ieee80211_link_sta *link_sta =
wiphy_dereference(mld->wiphy,
mld->fw_id_to_link_sta[i]);
struct iwl_mld_link_sta *mld_link_sta;
s8 avg_energy =
-(s8)le32_to_cpu(per_sta[i].average_energy);
if (IS_ERR_OR_NULL(link_sta) || !avg_energy)
continue;
mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
if (WARN_ON(!mld_link_sta))
continue;
mld_link_sta->signal_avg = avg_energy;
}
}
static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
void *data)
{
struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
const struct iwl_stats_ntfy_per_phy *per_phy = data;
if (WARN_ON(phy->fw_id >= IWL_STATS_MAX_PHY_OPERATIONAL))
return;
phy->channel_load_by_us =
le32_to_cpu(per_phy[phy->fw_id].channel_load_by_us);
/* TODO: channel load not by us (task=statistics) */
}
static void
iwl_mld_process_per_phy_stats(struct iwl_mld *mld,
const struct iwl_stats_ntfy_per_phy *per_phy)
{
ieee80211_iter_chan_contexts_mtx(mld->hw,
iwl_mld_fill_chanctx_stats,
(void *)(uintptr_t)per_phy);
/* channel_load_by_us may have been updated, so recheck */
iwl_mld_emlsr_check_chan_load(mld);
}
void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
const struct iwl_system_statistics_notif_oper *stats =
(void *)&pkt->data;
u32 curr_ts_usec = le32_to_cpu(stats->time_stamp);
BUILD_BUG_ON(ARRAY_SIZE(stats->per_sta) != IWL_STATION_COUNT_MAX);
BUILD_BUG_ON(ARRAY_SIZE(stats->per_link) <
ARRAY_SIZE(mld->fw_id_to_bss_conf));
iwl_mld_process_per_link_stats(mld, stats->per_link, curr_ts_usec);
iwl_mld_process_per_sta_stats(mld, stats->per_sta);
iwl_mld_process_per_phy_stats(mld, stats->per_phy);
iwl_mld_check_omi_bw_reduction(mld);
}
void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
/* TODO */
}

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_stats_h__
#define __iwl_mld_stats_h__
int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable);
void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo);
void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld);
#endif /* __iwl_mld_stats_h__ */

View File

@ -0,0 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o
ccflags-y += -I$(src)/../
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o

View File

@ -0,0 +1,663 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* KUnit tests for channel helper functions
*
* Copyright (C) 2024 Intel Corporation
*/
#include <kunit/test.h>
#include <kunit/static_stub.h>
#include <kunit/skbuff.h>
#include "utils.h"
#include "mld.h"
#include "sta.h"
#include "agg.h"
#include "rx.h"
#define FC_QOS_DATA (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)
#define BA_WINDOW_SIZE 64
#define QUEUE 0
static const struct reorder_buffer_case {
const char *desc;
struct {
/* ieee80211_hdr fields */
u16 fc;
u8 tid;
bool multicast;
/* iwl_rx_mpdu_desc fields */
u16 nssn;
/* used also for setting hdr::seq_ctrl */
u16 sn;
u8 baid;
bool amsdu;
bool last_subframe;
bool old_sn;
bool dup;
} rx_pkt;
struct {
bool valid;
u16 head_sn;
u8 baid;
u16 num_entries;
/* The test prepares the reorder buffer with fake skbs based
* on the sequence numbers provided in @entries array.
*/
struct {
u16 sn;
/* Set add_subframes > 0 to simulate an A-MSDU by
* queueing additional @add_subframes skbs in the
* appropriate reorder buffer entry (based on the @sn)
*/
u8 add_subframes;
} entries[BA_WINDOW_SIZE];
} reorder_buf_state;
struct {
enum iwl_mld_reorder_result reorder_res;
u16 head_sn;
u16 num_stored;
u16 skb_release_order[BA_WINDOW_SIZE];
u16 skb_release_order_count;
} expected;
} reorder_buffer_cases[] = {
{
.desc = "RX packet with invalid BAID",
.rx_pkt = {
.fc = FC_QOS_DATA,
.baid = IWL_RX_REORDER_DATA_INVALID_BAID,
},
.reorder_buf_state = {
.valid = true,
},
.expected = {
/* Invalid BAID should not be buffered. The frame is
* passed to the network stack immediately.
*/
.reorder_res = IWL_MLD_PASS_SKB,
.num_stored = 0,
},
},
{
.desc = "RX Multicast packet",
.rx_pkt = {
.fc = FC_QOS_DATA,
.multicast = true,
},
.reorder_buf_state = {
.valid = true,
},
.expected = {
/* Multicast packets are not buffered. The packet is
* passed to the network stack immediately.
*/
.reorder_res = IWL_MLD_PASS_SKB,
.num_stored = 0,
},
},
{
.desc = "RX non-QoS data",
.rx_pkt = {
.fc = IEEE80211_FTYPE_DATA,
},
.reorder_buf_state = {
.valid = true,
},
.expected = {
/* non-QoS data frames do not require reordering.
* The packet is passed to the network stack
* immediately.
*/
.reorder_res = IWL_MLD_PASS_SKB,
},
},
{
.desc = "RX old SN packet, reorder buffer is not yet valid",
.rx_pkt = {
.fc = FC_QOS_DATA,
.old_sn = true,
},
.reorder_buf_state = {
.valid = false,
},
.expected = {
/* The buffer is invalid and the RX packet has an old
* SN. The packet is passed to the network stack
* immediately.
*/
.reorder_res = IWL_MLD_PASS_SKB,
},
},
{
.desc = "RX old SN packet, reorder buffer valid",
.rx_pkt = {
.fc = FC_QOS_DATA,
.old_sn = true,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
},
.expected = {
/* The buffer is valid and the RX packet has an old SN.
* The packet should be dropped.
*/
.reorder_res = IWL_MLD_DROP_SKB,
.num_stored = 0,
.head_sn = 100,
},
},
{
.desc = "RX duplicate packet",
.rx_pkt = {
.fc = FC_QOS_DATA,
.dup = true,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
},
.expected = {
/* Duplicate packets should be dropped */
.reorder_res = IWL_MLD_DROP_SKB,
.num_stored = 0,
.head_sn = 100,
},
},
{
.desc = "RX In-order packet, sn < nssn",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 100,
.nssn = 101,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
},
.expected = {
/* 1. Reorder buffer is empty.
* 2. RX packet SN is in order and less than NSSN.
* Packet is released to the network stack immediately
* and buffer->head_sn is updated to NSSN.
*/
.reorder_res = IWL_MLD_PASS_SKB,
.num_stored = 0,
.head_sn = 101,
},
},
{
.desc = "RX In-order packet, sn == head_sn",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 101,
.nssn = 100,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 101,
},
.expected = {
/* 1. Reorder buffer is empty.
* 2. RX packet SN is equal to buffer->head_sn.
* Packet is released to the network stack immediately
* and buffer->head_sn is incremented.
*/
.reorder_res = IWL_MLD_PASS_SKB,
.num_stored = 0,
.head_sn = 102,
},
},
{
.desc = "RX In-order packet, IEEE80211_MAX_SN wrap around",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = IEEE80211_MAX_SN,
.nssn = IEEE80211_MAX_SN - 1,
},
.reorder_buf_state = {
.valid = true,
.head_sn = IEEE80211_MAX_SN,
},
.expected = {
/* 1. Reorder buffer is empty.
* 2. RX SN == buffer->head_sn == IEEE80211_MAX_SN
* Packet is released to the network stack immediately
* and buffer->head_sn is incremented correctly (wraps
* around to 0).
*/
.reorder_res = IWL_MLD_PASS_SKB,
.num_stored = 0,
.head_sn = 0,
},
},
{
.desc = "RX Out-of-order packet, pending packet in buffer",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 100,
.nssn = 102,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
.num_entries = 1,
.entries[0].sn = 101,
},
.expected = {
/* 1. Reorder buffer contains one packet with SN=101.
* 2. RX packet SN = buffer->head_sn.
* Both packets are released (in order) to the network
* stack as there are no gaps.
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 0,
.head_sn = 102,
.skb_release_order = {100, 101},
.skb_release_order_count = 2,
},
},
{
.desc = "RX Out-of-order packet, pending packet in buffer (wrap around)",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 0,
.nssn = 1,
},
.reorder_buf_state = {
.valid = true,
.head_sn = IEEE80211_MAX_SN - 1,
.num_entries = 1,
.entries[0].sn = IEEE80211_MAX_SN,
},
.expected = {
/* 1. Reorder buffer contains one packet with
* SN=IEEE80211_MAX_SN.
* 2. RX Packet SN = 0 (after wrap around)
* Both packets are released (in order) to the network
* stack as there are no gaps.
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 0,
.head_sn = 1,
.skb_release_order = { 4095, 0 },
.skb_release_order_count = 2,
},
},
{
.desc = "RX Out-of-order packet, filling 1/2 holes in buffer, release RX packet",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 100,
.nssn = 101,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
.num_entries = 1,
.entries[0].sn = 102,
},
.expected = {
/* 1. Reorder buffer contains one packet with SN=102.
* 2. There are 2 holes at SN={100, 101}.
* Only the RX packet (SN=100) is released, there is
* still a hole at 101.
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 1,
.head_sn = 101,
.skb_release_order = {100},
.skb_release_order_count = 1,
},
},
{
.desc = "RX Out-of-order packet, filling 1/2 holes, release 2 packets",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 102,
.nssn = 103,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
.num_entries = 3,
.entries[0].sn = 101,
.entries[1].sn = 104,
.entries[2].sn = 105,
},
.expected = {
/* 1. Reorder buffer contains three packets.
* 2. RX packet fills one of two holes (at SN=102).
* Two packets are released (until the next hole at
* SN=103).
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 2,
.head_sn = 103,
.skb_release_order = {101, 102},
.skb_release_order_count = 2,
},
},
{
.desc = "RX Out-of-order packet, filling 1/1 holes, no packets released",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 102,
.nssn = 100,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
.num_entries = 3,
.entries[0].sn = 101,
.entries[1].sn = 103,
.entries[2].sn = 104,
},
.expected = {
/* 1. Reorder buffer contains three packets:
* SN={101, 103, 104}.
* 2. RX packet fills a hole (SN=102), but NSSN is
* smaller than buffered frames.
* No packets can be released yet and buffer->head_sn
* is not updated.
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 4,
.head_sn = 100,
},
},
{
.desc = "RX In-order A-MSDU, last subframe",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 100,
.nssn = 101,
.amsdu = true,
.last_subframe = true,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
.num_entries = 1,
.entries[0] = {
.sn = 100,
.add_subframes = 1,
},
},
.expected = {
/* 1. Reorder buffer contains a 2-sub frames A-MSDU
* at SN=100.
* 2. RX packet is the last SN=100 A-MSDU subframe
* All packets are released in order (3 x SN=100).
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 0,
.head_sn = 101,
.skb_release_order = {100, 100, 100},
.skb_release_order_count = 3,
},
},
{
.desc = "RX In-order A-MSDU, not the last subframe",
.rx_pkt = {
.fc = FC_QOS_DATA,
.sn = 100,
.nssn = 101,
.amsdu = true,
.last_subframe = false,
},
.reorder_buf_state = {
.valid = true,
.head_sn = 100,
.num_entries = 1,
.entries[0] = {
.sn = 100,
.add_subframes = 1,
},
},
.expected = {
/* 1. Reorder buffer contains a 2-sub frames A-MSDU
* at SN=100.
* 2. RX packet additional SN=100 A-MSDU subframe,
* but not the last one
* No packets are released and head_sn is not updated.
*/
.reorder_res = IWL_MLD_BUFFERED_SKB,
.num_stored = 3,
.head_sn = 100,
},
},
};
KUNIT_ARRAY_PARAM_DESC(test_reorder_buffer, reorder_buffer_cases, desc);
static struct sk_buff_head g_released_skbs;
static u16 g_num_released_skbs;
/* Add released skbs from reorder buffer to a global list; This allows us
* to verify the correct release order of packets after they pass through the
* simulated reorder logic.
*/
static void
fake_iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
{
__skb_queue_tail(&g_released_skbs, skb);
g_num_released_skbs++;
}
static u32
fake_iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld_link_sta *mld_link_sta;
u8 link_id;
u32 sta_mask = 0;
/* This is the expectation in the real function */
lockdep_assert_wiphy(mld->wiphy);
/* We can't use for_each_sta_active_link */
for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
sta_mask |= BIT(mld_link_sta->fw_id);
return sta_mask;
}
static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void)
{
struct kunit *test = kunit_get_current_test();
const struct reorder_buffer_case *param =
(const void *)(test->param_value);
struct iwl_rx_mpdu_desc *mpdu_desc;
KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc);
mpdu_desc->reorder_data |=
cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK,
param->rx_pkt.baid));
mpdu_desc->reorder_data |=
cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK,
param->rx_pkt.sn));
mpdu_desc->reorder_data |=
cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK,
param->rx_pkt.nssn));
if (param->rx_pkt.old_sn)
mpdu_desc->reorder_data |=
cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN);
if (param->rx_pkt.dup)
mpdu_desc->status |= cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE);
if (param->rx_pkt.amsdu) {
mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
if (param->rx_pkt.last_subframe)
mpdu_desc->amsdu_info |=
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
}
return mpdu_desc;
}
static struct sk_buff *alloc_and_setup_skb(u16 fc, u16 seq_ctrl, u8 tid,
bool mcast)
{
struct kunit *test = kunit_get_current_test();
struct ieee80211_hdr hdr = {
.frame_control = cpu_to_le16(fc),
.seq_ctrl = cpu_to_le16(seq_ctrl),
};
struct sk_buff *skb;
skb = kunit_zalloc_skb(test, 128, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, skb);
if (ieee80211_is_data_qos(hdr.frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(&hdr);
qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
}
if (mcast)
hdr.addr1[0] = 0x1;
skb_set_mac_header(skb, skb->len);
skb_put_data(skb, &hdr, ieee80211_hdrlen(hdr.frame_control));
return skb;
}
static struct iwl_mld_reorder_buffer *
setup_reorder_buffer(struct iwl_mld_baid_data *baid_data)
{
struct kunit *test = kunit_get_current_test();
const struct reorder_buffer_case *param =
(const void *)(test->param_value);
struct iwl_mld_reorder_buffer *buffer = baid_data->reorder_buf;
struct iwl_mld_reorder_buf_entry *entries = baid_data->entries;
struct sk_buff *fake_skb;
buffer->valid = param->reorder_buf_state.valid;
buffer->head_sn = param->reorder_buf_state.head_sn;
buffer->queue = QUEUE;
for (int i = 0; i < baid_data->buf_size; i++)
__skb_queue_head_init(&entries[i].frames);
for (int i = 0; i < param->reorder_buf_state.num_entries; i++) {
u16 sn = param->reorder_buf_state.entries[i].sn;
int index = sn % baid_data->buf_size;
u8 add_subframes =
param->reorder_buf_state.entries[i].add_subframes;
/* create 1 skb per entry + additional skbs per num of
* requested subframes
*/
u8 num_skbs = 1 + add_subframes;
for (int j = 0; j < num_skbs; j++) {
fake_skb = alloc_and_setup_skb(FC_QOS_DATA, sn, 0,
false);
__skb_queue_tail(&entries[index].frames, fake_skb);
buffer->num_stored++;
}
}
return buffer;
}
static struct iwl_mld_reorder_buffer *setup_ba_data(struct ieee80211_sta *sta)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
const struct reorder_buffer_case *param =
(const void *)(test->param_value);
struct iwl_mld_baid_data *baid_data = NULL;
struct iwl_mld_reorder_buffer *buffer;
u32 reorder_buf_size = BA_WINDOW_SIZE * sizeof(baid_data->entries[0]);
u8 baid = param->reorder_buf_state.baid;
/* Assuming only 1 RXQ */
KUNIT_ALLOC_AND_ASSERT_SIZE(test, baid_data,
sizeof(*baid_data) + reorder_buf_size);
baid_data->baid = baid;
baid_data->tid = param->rx_pkt.tid;
baid_data->buf_size = BA_WINDOW_SIZE;
wiphy_lock(mld->wiphy);
baid_data->sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
wiphy_unlock(mld->wiphy);
baid_data->entries_per_queue = BA_WINDOW_SIZE;
buffer = setup_reorder_buffer(baid_data);
KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld->fw_id_to_ba[baid]));
rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
return buffer;
}
static void test_reorder_buffer(struct kunit *test)
{
struct iwl_mld *mld = test->priv;
const struct reorder_buffer_case *param =
(const void *)(test->param_value);
struct iwl_rx_mpdu_desc *mpdu_desc;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct sk_buff *skb;
struct iwl_mld_reorder_buffer *buffer;
enum iwl_mld_reorder_result reorder_res;
u16 skb_release_order_count = param->expected.skb_release_order_count;
u16 skb_idx = 0;
/* Init globals and activate stubs */
__skb_queue_head_init(&g_released_skbs);
g_num_released_skbs = 0;
kunit_activate_static_stub(test, iwl_mld_fw_sta_id_mask,
fake_iwl_mld_fw_sta_id_mask);
kunit_activate_static_stub(test, iwl_mld_pass_packet_to_mac80211,
fake_iwl_mld_pass_packet_to_mac80211);
vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
/* Prepare skb, mpdu_desc, BA data and the reorder buffer */
skb = alloc_and_setup_skb(param->rx_pkt.fc, param->rx_pkt.sn,
param->rx_pkt.tid, param->rx_pkt.multicast);
buffer = setup_ba_data(sta);
mpdu_desc = setup_mpdu_desc();
rcu_read_lock();
reorder_res = iwl_mld_reorder(mld, NULL, QUEUE, sta, skb, mpdu_desc);
rcu_read_unlock();
KUNIT_ASSERT_EQ(test, reorder_res, param->expected.reorder_res);
KUNIT_ASSERT_EQ(test, buffer->num_stored, param->expected.num_stored);
KUNIT_ASSERT_EQ(test, buffer->head_sn, param->expected.head_sn);
/* Verify skbs release order */
KUNIT_ASSERT_EQ(test, skb_release_order_count, g_num_released_skbs);
while ((skb = __skb_dequeue(&g_released_skbs))) {
struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
KUNIT_ASSERT_EQ(test, le16_to_cpu(hdr->seq_ctrl),
param->expected.skb_release_order[skb_idx]);
skb_idx++;
}
KUNIT_ASSERT_EQ(test, skb_idx, skb_release_order_count);
}
static struct kunit_case reorder_buffer_test_cases[] = {
KUNIT_CASE_PARAM(test_reorder_buffer, test_reorder_buffer_gen_params),
{},
};
static struct kunit_suite reorder_buffer = {
.name = "iwlmld-reorder-buffer",
.test_cases = reorder_buffer_test_cases,
.init = iwlmld_kunit_test_init,
};
kunit_test_suite(reorder_buffer);

View File

@ -0,0 +1,62 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* KUnit tests for channel helper functions
*
* Copyright (C) 2024 Intel Corporation
*/
#include <kunit/test.h>
#include <iwl-trans.h>
#include "mld.h"
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void test_hcmd_names_sorted(struct kunit *test)
{
int i;
for (i = 0; i < global_iwl_mld_goups_size; i++) {
const struct iwl_hcmd_arr *arr = &iwl_mld_groups[i];
int j;
if (!arr->arr)
continue;
for (j = 0; j < arr->size - 1; j++)
KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
arr->arr[j + 1].cmd_id);
}
}
static void test_hcmd_names_for_rx(struct kunit *test)
{
static struct iwl_trans t = {
.command_groups = iwl_mld_groups,
};
t.command_groups_size = global_iwl_mld_goups_size;
for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) {
const struct iwl_rx_handler *rxh;
const char *name;
rxh = &iwl_mld_rx_handlers[i];
name = iwl_get_cmd_string(&t, rxh->cmd_id);
KUNIT_EXPECT_NOT_NULL(test, name);
KUNIT_EXPECT_NE_MSG(test, strcmp(name, "UNKNOWN"), 0,
"ID 0x%04x is UNKNOWN", rxh->cmd_id);
}
}
static struct kunit_case hcmd_names_cases[] = {
KUNIT_CASE(test_hcmd_names_sorted),
KUNIT_CASE(test_hcmd_names_for_rx),
{},
};
static struct kunit_suite hcmd_names = {
.name = "iwlmld-hcmd-names",
.test_cases = hcmd_names_cases,
};
kunit_test_suite(hcmd_names);

View File

@ -0,0 +1,171 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* KUnit tests for link selection functions
*
* Copyright (C) 2025 Intel Corporation
*/
#include <kunit/static_stub.h>
#include "utils.h"
#include "mld.h"
#include "link.h"
#include "iface.h"
#include "phy.h"
static const struct link_grading_test_case {
const char *desc;
struct {
struct {
u8 link_id;
const struct cfg80211_chan_def *chandef;
bool active;
s32 signal;
bool has_chan_util_elem;
u8 chan_util; /* 0-255 , used only if has_chan_util_elem is true */
u8 chan_load_by_us; /* 0-100, used only if active is true */;
} link;
} input;
unsigned int expected_grade;
} link_grading_cases[] = {
{
.desc = "channel util of 128 (50%)",
.input.link = {
.link_id = 0,
.chandef = &chandef_2ghz,
.active = false,
.has_chan_util_elem = true,
.chan_util = 128,
},
.expected_grade = 86,
},
{
.desc = "channel util of 180 (70%)",
.input.link = {
.link_id = 0,
.chandef = &chandef_2ghz,
.active = false,
.has_chan_util_elem = true,
.chan_util = 180,
},
.expected_grade = 51,
},
{
.desc = "channel util of 180 (70%), channel load by us of 10%",
.input.link = {
.link_id = 0,
.chandef = &chandef_2ghz,
.has_chan_util_elem = true,
.chan_util = 180,
.active = true,
.chan_load_by_us = 10,
},
.expected_grade = 67,
},
{
.desc = "no channel util element",
.input.link = {
.link_id = 0,
.chandef = &chandef_2ghz,
.active = true,
},
.expected_grade = 120,
},
};
KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc);
static void setup_link(struct ieee80211_bss_conf *link)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
const struct link_grading_test_case *test_param =
(const void *)(test->param_value);
KUNIT_ALLOC_AND_ASSERT(test, link->bss);
link->bss->signal = DBM_TO_MBM(test_param->input.link.signal);
link->chanreq.oper = *test_param->input.link.chandef;
if (test_param->input.link.has_chan_util_elem) {
struct cfg80211_bss_ies *ies;
struct ieee80211_bss_load_elem bss_load = {
.channel_util = test_param->input.link.chan_util,
};
struct element *elem =
iwlmld_kunit_gen_element(WLAN_EID_QBSS_LOAD,
&bss_load,
sizeof(bss_load));
unsigned int elem_len = sizeof(*elem) + sizeof(bss_load);
KUNIT_ALLOC_AND_ASSERT_SIZE(test, ies, sizeof(*ies) + elem_len);
memcpy(ies->data, elem, elem_len);
ies->len = elem_len;
rcu_assign_pointer(link->bss->beacon_ies, ies);
rcu_assign_pointer(link->bss->ies, ies);
}
if (test_param->input.link.active) {
struct ieee80211_chanctx_conf *chan_ctx =
wiphy_dereference(mld->wiphy, link->chanctx_conf);
struct iwl_mld_phy *phy;
KUNIT_ASSERT_NOT_NULL(test, chan_ctx);
phy = iwl_mld_phy_from_mac80211(chan_ctx);
phy->channel_load_by_us = test_param->input.link.chan_load_by_us;
}
}
static void test_link_grading(struct kunit *test)
{
struct iwl_mld *mld = test->priv;
const struct link_grading_test_case *test_param =
(const void *)(test->param_value);
struct ieee80211_vif *vif;
struct ieee80211_bss_conf *link;
unsigned int actual_grade;
u8 assoc_link_id;
/* Extract test case parameters */
u8 link_id = test_param->input.link.link_id;
enum nl80211_band band = test_param->input.link.chandef->chan->band;
bool active = test_param->input.link.active;
u16 valid_links;
/* If the link is not active, use a different link as the assoc link */
if (active) {
assoc_link_id = link_id;
valid_links = BIT(link_id);
} else {
assoc_link_id = BIT(ffz(BIT(link_id)));
valid_links = BIT(assoc_link_id) | BIT(link_id);
}
vif = iwlmld_kunit_setup_mlo_assoc(valid_links, assoc_link_id, band);
wiphy_lock(mld->wiphy);
link = wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
KUNIT_ASSERT_NOT_NULL(test, link);
setup_link(link);
actual_grade = iwl_mld_get_link_grade(mld, link);
wiphy_unlock(mld->wiphy);
/* Assert that the returned grade matches the expected grade */
KUNIT_EXPECT_EQ(test, actual_grade, test_param->expected_grade);
}
static struct kunit_case link_selection_cases[] = {
KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
{},
};
static struct kunit_suite link_selection = {
.name = "iwlmld-link-selection-tests",
.test_cases = link_selection_cases,
.init = iwlmld_kunit_test_init,
};
kunit_test_suite(link_selection);

View File

@ -0,0 +1,103 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* KUnit tests for channel helper functions
*
* Copyright (C) 2024 Intel Corporation
*/
#include <kunit/static_stub.h>
#include "utils.h"
#include "mld.h"
#include "link.h"
#include "iface.h"
#include "fw/api/mac-cfg.h"
static const struct missed_beacon_test_case {
const char *desc;
struct {
struct iwl_missed_beacons_notif notif;
bool emlsr;
} input;
struct {
bool disconnected;
bool emlsr;
} output;
} missed_beacon_cases[] = {
{
.desc = "no EMLSR, no disconnect",
.input.notif = {
.consec_missed_beacons = cpu_to_le32(4),
},
},
{
.desc = "no EMLSR, no beacon loss since Rx, no disconnect",
.input.notif = {
.consec_missed_beacons = cpu_to_le32(20),
},
},
{
.desc = "no EMLSR, beacon loss since Rx, disconnect",
.input.notif = {
.consec_missed_beacons = cpu_to_le32(20),
.consec_missed_beacons_since_last_rx =
cpu_to_le32(10),
},
.output.disconnected = true,
},
};
KUNIT_ARRAY_PARAM_DESC(test_missed_beacon, missed_beacon_cases, desc);
static void fake_ieee80211_connection_loss(struct ieee80211_vif *vif)
{
vif->cfg.assoc = false;
}
static void test_missed_beacon(struct kunit *test)
{
struct iwl_mld *mld = test->priv;
struct iwl_missed_beacons_notif *notif;
const struct missed_beacon_test_case *test_param =
(const void *)(test->param_value);
struct ieee80211_vif *vif;
struct iwl_rx_packet *pkt;
kunit_activate_static_stub(test, ieee80211_connection_loss,
fake_ieee80211_connection_loss);
pkt = iwl_mld_kunit_create_pkt(test_param->input.notif);
notif = (void *)pkt->data;
if (test_param->input.emlsr) {
vif = iwlmld_kunit_assoc_emlsr(0x3, NL80211_BAND_5GHZ,
NL80211_BAND_6GHZ);
} else {
struct iwl_mld_vif *mld_vif;
vif = iwlmld_kunit_setup_non_mlo_assoc(NL80211_BAND_6GHZ);
mld_vif = iwl_mld_vif_from_mac80211(vif);
notif->link_id = cpu_to_le32(mld_vif->deflink.fw_id);
}
wiphy_lock(mld->wiphy);
iwl_mld_handle_missed_beacon_notif(mld, pkt);
wiphy_unlock(mld->wiphy);
KUNIT_ASSERT_NE(test, vif->cfg.assoc, test_param->output.disconnected);
/* TODO: add test cases for esr and check */
}
static struct kunit_case link_cases[] = {
KUNIT_CASE_PARAM(test_missed_beacon, test_missed_beacon_gen_params),
{},
};
static struct kunit_suite link = {
.name = "iwlmld-link",
.test_cases = link_cases,
.init = iwlmld_kunit_test_init,
};
kunit_test_suite(link);

View File

@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* This is just module boilerplate for the iwlmld kunit module.
*
* Copyright (C) 2024 Intel Corporation
*/
#include <linux/module.h>
MODULE_IMPORT_NS("IWLWIFI");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("kunit tests for iwlmld");

View File

@ -0,0 +1,353 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* KUnit tests for channel helper functions
*
* Copyright (C) 2024 Intel Corporation
*/
#include <kunit/test.h>
#include "utils.h"
#include "iwl-trans.h"
#include "mld.h"
#include "sta.h"
static const struct is_dup_case {
const char *desc;
struct {
/* ieee80211_hdr fields */
__le16 fc;
__le16 seq;
u8 tid;
bool multicast;
/* iwl_rx_mpdu_desc fields */
bool is_amsdu;
u8 sub_frame_idx;
} rx_pkt;
struct {
__le16 last_seq;
u8 last_sub_frame_idx;
u8 tid;
} dup_data_state;
struct {
bool is_dup;
u32 rx_status_flag;
} result;
} is_dup_cases[] = {
{
.desc = "Control frame",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_CTL),
},
.result = {
.is_dup = false,
.rx_status_flag = 0,
}
},
{
.desc = "Null func frame",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC),
},
.result = {
.is_dup = false,
.rx_status_flag = 0,
}
},
{
.desc = "Multicast data",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
.multicast = true,
},
.result = {
.is_dup = false,
.rx_status_flag = 0,
}
},
{
.desc = "QoS null func frame",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC),
},
.result = {
.is_dup = false,
.rx_status_flag = 0,
}
},
{
.desc = "QoS data new sequence",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA),
.seq = __cpu_to_le16(0x101),
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "QoS data same sequence, no retry",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA),
.seq = __cpu_to_le16(0x100),
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "QoS data same sequence, has retry",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA |
IEEE80211_FCTL_RETRY),
.seq = __cpu_to_le16(0x100),
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = true,
.rx_status_flag = 0,
},
},
{
.desc = "QoS data invalid tid",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA),
.seq = __cpu_to_le16(0x100),
.tid = IWL_MAX_TID_COUNT + 1,
},
.result = {
.is_dup = true,
.rx_status_flag = 0,
},
},
{
.desc = "non-QoS data, same sequence, same tid, no retry",
.rx_pkt = {
/* Driver will use tid = IWL_MAX_TID_COUNT */
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
.seq = __cpu_to_le16(0x100),
},
.dup_data_state = {
.tid = IWL_MAX_TID_COUNT,
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "non-QoS data, same sequence, same tid, has retry",
.rx_pkt = {
/* Driver will use tid = IWL_MAX_TID_COUNT */
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_FCTL_RETRY),
.seq = __cpu_to_le16(0x100),
},
.dup_data_state = {
.tid = IWL_MAX_TID_COUNT,
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = true,
.rx_status_flag = 0,
},
},
{
.desc = "non-QoS data, same sequence on different tid's",
.rx_pkt = {
/* Driver will use tid = IWL_MAX_TID_COUNT */
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
.seq = __cpu_to_le16(0x100),
},
.dup_data_state = {
.tid = 7,
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "A-MSDU new subframe, allow same PN",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA),
.seq = __cpu_to_le16(0x100),
.is_amsdu = true,
.sub_frame_idx = 1,
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_ALLOW_SAME_PN |
RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "A-MSDU subframe with smaller idx, disallow same PN",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA),
.seq = __cpu_to_le16(0x100),
.is_amsdu = true,
.sub_frame_idx = 1,
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 2,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "A-MSDU same subframe, no retry, disallow same PN",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA),
.seq = __cpu_to_le16(0x100),
.is_amsdu = true,
.sub_frame_idx = 0,
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = false,
.rx_status_flag = RX_FLAG_DUP_VALIDATED,
},
},
{
.desc = "A-MSDU same subframe, has retry",
.rx_pkt = {
.fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_DATA |
IEEE80211_FCTL_RETRY),
.seq = __cpu_to_le16(0x100),
.is_amsdu = true,
.sub_frame_idx = 0,
},
.dup_data_state = {
.last_seq = __cpu_to_le16(0x100),
.last_sub_frame_idx = 0,
},
.result = {
.is_dup = true,
.rx_status_flag = 0,
},
},
};
KUNIT_ARRAY_PARAM_DESC(test_is_dup, is_dup_cases, desc);
static void
setup_dup_data_state(struct ieee80211_sta *sta)
{
struct kunit *test = kunit_get_current_test();
const struct is_dup_case *param = (const void *)(test->param_value);
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
u8 tid = param->dup_data_state.tid;
struct iwl_mld_rxq_dup_data *dup_data;
/* Allocate dup_data only for 1 queue */
KUNIT_ALLOC_AND_ASSERT(test, dup_data);
/* Initialize dup data, see iwl_mld_alloc_dup_data */
memset(dup_data->last_seq, 0xff, sizeof(dup_data->last_seq));
dup_data->last_seq[tid] = param->dup_data_state.last_seq;
dup_data->last_sub_frame_idx[tid] =
param->dup_data_state.last_sub_frame_idx;
mld_sta->dup_data = dup_data;
}
static void setup_rx_pkt(const struct is_dup_case *param,
struct ieee80211_hdr *hdr,
struct iwl_rx_mpdu_desc *mpdu_desc)
{
u8 tid = param->rx_pkt.tid;
/* Set "new rx packet" header */
hdr->frame_control = param->rx_pkt.fc;
hdr->seq_ctrl = param->rx_pkt.seq;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
}
if (param->rx_pkt.multicast)
hdr->addr1[0] = 0x1;
/* Set mpdu_desc */
mpdu_desc->amsdu_info = param->rx_pkt.sub_frame_idx &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (param->rx_pkt.is_amsdu)
mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
}
static void test_is_dup(struct kunit *test)
{
const struct is_dup_case *param = (const void *)(test->param_value);
struct iwl_mld *mld = test->priv;
struct iwl_rx_mpdu_desc mpdu_desc = { };
struct ieee80211_rx_status rx_status = { };
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct ieee80211_hdr hdr;
vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
/* Prepare test case state */
setup_dup_data_state(sta);
setup_rx_pkt(param, &hdr, &mpdu_desc);
KUNIT_EXPECT_EQ(test,
iwl_mld_is_dup(mld, sta, &hdr, &mpdu_desc, &rx_status,
0), /* assuming only 1 queue */
param->result.is_dup);
KUNIT_EXPECT_EQ(test, rx_status.flag, param->result.rx_status_flag);
}
static struct kunit_case is_dup_test_cases[] = {
KUNIT_CASE_PARAM(test_is_dup, test_is_dup_gen_params),
{},
};
static struct kunit_suite is_dup = {
.name = "iwlmld-rx-is-dup",
.test_cases = is_dup_test_cases,
.init = iwlmld_kunit_test_init,
};
kunit_test_suite(is_dup);

View File

@ -0,0 +1,475 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* KUnit tests for channel helper functions
*
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include "utils.h"
#include <linux/device.h>
#include "fw/api/scan.h"
#include "fw/api/mac-cfg.h"
#include "iwl-trans.h"
#include "mld.h"
#include "iface.h"
#include "link.h"
#include "phy.h"
#include "sta.h"
int iwlmld_kunit_test_init(struct kunit *test)
{
struct iwl_mld *mld;
struct iwl_trans *trans;
const struct iwl_cfg *cfg;
struct iwl_fw *fw;
struct ieee80211_hw *hw;
KUNIT_ALLOC_AND_ASSERT(test, trans);
KUNIT_ALLOC_AND_ASSERT(test, trans->dev);
KUNIT_ALLOC_AND_ASSERT(test, cfg);
KUNIT_ALLOC_AND_ASSERT(test, fw);
KUNIT_ALLOC_AND_ASSERT(test, hw);
KUNIT_ALLOC_AND_ASSERT(test, hw->wiphy);
mutex_init(&hw->wiphy->mtx);
/* Allocate and initialize the mld structure */
KUNIT_ALLOC_AND_ASSERT(test, mld);
iwl_construct_mld(mld, trans, cfg, fw, hw, NULL);
fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1;
mld->fwrt.trans = trans;
mld->fwrt.fw = fw;
mld->fwrt.dev = trans->dev;
/* TODO: add priv_size to hw allocation and setup hw->priv to enable
* testing mac80211 callbacks
*/
KUNIT_ALLOC_AND_ASSERT(test, mld->nvm_data);
KUNIT_ALLOC_AND_ASSERT_SIZE(test, mld->scan.cmd,
sizeof(struct iwl_scan_req_umac_v17));
mld->scan.cmd_size = sizeof(struct iwl_scan_req_umac_v17);
/* This is not the state at the end of the regular opmode_start,
* but it is more common to need it. Explicitly undo this if needed.
*/
mld->trans->state = IWL_TRANS_FW_ALIVE;
mld->fw_status.running = true;
/* Avoid passing mld struct around */
test->priv = mld;
return 0;
}
IWL_MLD_ALLOC_FN(link, bss_conf)
static void iwlmld_kunit_init_link(struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
struct iwl_mld_link *mld_link, int link_id)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
int ret;
/* setup mac80211 link */
rcu_assign_pointer(vif->link_conf[link_id], link);
link->link_id = link_id;
link->vif = vif;
link->beacon_int = 100;
link->dtim_period = 3;
link->qos = true;
/* and mld_link */
ret = iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
KUNIT_ASSERT_EQ(test, ret, 0);
rcu_assign_pointer(mld_vif->link[link_id], mld_link);
rcu_assign_pointer(vif->link_conf[link_id], link);
}
IWL_MLD_ALLOC_FN(vif, vif)
/* Helper function to add and initialize a VIF for KUnit tests */
struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct ieee80211_vif *vif;
struct iwl_mld_vif *mld_vif;
int ret;
/* TODO: support more types */
KUNIT_ASSERT_EQ(test, type, NL80211_IFTYPE_STATION);
KUNIT_ALLOC_AND_ASSERT_SIZE(test, vif,
sizeof(*vif) + sizeof(*mld_vif));
vif->type = type;
mld_vif = iwl_mld_vif_from_mac80211(vif);
mld_vif->mld = mld;
ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
KUNIT_ASSERT_EQ(test, ret, 0);
/* TODO: revisit (task=EHT) */
if (mlo)
return vif;
/* Initialize the default link */
iwlmld_kunit_init_link(vif, &vif->bss_conf, &mld_vif->deflink, 0);
return vif;
}
/* Use only for MLO vif */
struct ieee80211_bss_conf *
iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id)
{
struct kunit *test = kunit_get_current_test();
struct ieee80211_bss_conf *link;
struct iwl_mld_link *mld_link;
KUNIT_ALLOC_AND_ASSERT(test, link);
KUNIT_ALLOC_AND_ASSERT(test, mld_link);
iwlmld_kunit_init_link(vif, link, mld_link, link_id);
vif->valid_links |= BIT(link_id);
return link;
}
struct ieee80211_chanctx_conf *
iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct ieee80211_chanctx_conf *ctx;
struct iwl_mld_phy *phy;
int fw_id;
KUNIT_ALLOC_AND_ASSERT_SIZE(test, ctx, sizeof(*ctx) + sizeof(*phy));
/* Setup the chanctx conf */
ctx->def = *def;
ctx->min_def = *def;
ctx->ap = *def;
/* and the iwl_mld_phy */
phy = iwl_mld_phy_from_mac80211(ctx);
fw_id = iwl_mld_allocate_fw_phy_id(mld);
KUNIT_ASSERT_GE(test, fw_id, 0);
phy->fw_id = fw_id;
phy->chandef = *iwl_mld_get_chandef_from_chanctx(ctx);
return ctx;
}
void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
struct ieee80211_chanctx_conf *ctx)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct iwl_mld_link *mld_link;
KUNIT_EXPECT_NULL(test, rcu_access_pointer(link->chanctx_conf));
rcu_assign_pointer(link->chanctx_conf, ctx);
lockdep_assert_wiphy(mld->wiphy);
mld_link = iwl_mld_link_from_mac80211(link);
KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld_link->chan_ctx));
KUNIT_EXPECT_FALSE(test, mld_link->active);
rcu_assign_pointer(mld_link->chan_ctx, ctx);
mld_link->active = true;
if (ieee80211_vif_is_mld(vif))
vif->active_links |= BIT(link->link_id);
}
IWL_MLD_ALLOC_FN(link_sta, link_sta)
static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta,
struct ieee80211_link_sta *link_sta,
struct iwl_mld_link_sta *mld_link_sta,
u8 link_id)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld *mld = test->priv;
u8 fw_id;
int ret;
/* initialize mac80211's link_sta */
link_sta->link_id = link_id;
rcu_assign_pointer(sta->link[link_id], link_sta);
link_sta->sta = sta;
/* and the iwl_mld_link_sta */
ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
KUNIT_ASSERT_EQ(test, ret, 0);
mld_link_sta->fw_id = fw_id;
rcu_assign_pointer(mld_sta->link[link_id], mld_link_sta);
}
static struct ieee80211_link_sta *
iwlmld_kunit_alloc_link_sta(struct ieee80211_sta *sta, int link_id)
{
struct kunit *test = kunit_get_current_test();
struct ieee80211_link_sta *link_sta;
struct iwl_mld_link_sta *mld_link_sta;
/* Only valid for MLO */
KUNIT_ASSERT_TRUE(test, sta->valid_links);
KUNIT_ALLOC_AND_ASSERT(test, link_sta);
KUNIT_ALLOC_AND_ASSERT(test, mld_link_sta);
iwlmld_kunit_add_link_sta(sta, link_sta, mld_link_sta, link_id);
sta->valid_links |= BIT(link_id);
return link_sta;
}
/* Allocate and initialize a STA with the first link_sta */
static struct ieee80211_sta *
iwlmld_kunit_add_sta(struct ieee80211_vif *vif, int link_id)
{
struct kunit *test = kunit_get_current_test();
struct ieee80211_sta *sta;
struct iwl_mld_sta *mld_sta;
/* Allocate memory for ieee80211_sta with embedded iwl_mld_sta */
KUNIT_ALLOC_AND_ASSERT_SIZE(test, sta, sizeof(*sta) + sizeof(*mld_sta));
/* TODO: allocate and initialize the TXQs ? */
mld_sta = iwl_mld_sta_from_mac80211(sta);
mld_sta->vif = vif;
mld_sta->mld = test->priv;
/* TODO: adjust for internal stations */
mld_sta->sta_type = STATION_TYPE_PEER;
if (link_id >= 0) {
iwlmld_kunit_add_link_sta(sta, &sta->deflink,
&mld_sta->deflink, link_id);
sta->valid_links = BIT(link_id);
} else {
iwlmld_kunit_add_link_sta(sta, &sta->deflink,
&mld_sta->deflink, 0);
}
return sta;
}
/* Move s STA to a state */
static void iwlmld_kunit_move_sta_state(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state state)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld_sta *mld_sta;
struct iwl_mld_vif *mld_vif;
/* The sta will be removed automatically at the end of the test */
KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
mld_sta = iwl_mld_sta_from_mac80211(sta);
mld_sta->sta_state = state;
mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
mld_vif->authorized = state == IEEE80211_STA_AUTHORIZED;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mld_vif->ap_sta = sta;
}
struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
enum ieee80211_sta_state state,
int link_id)
{
struct kunit *test = kunit_get_current_test();
struct ieee80211_sta *sta;
/* The sta will be removed automatically at the end of the test */
KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
/* First - allocate and init the STA */
sta = iwlmld_kunit_add_sta(vif, link_id);
/* Now move it all the way to the wanted state */
for (enum ieee80211_sta_state _state = IEEE80211_STA_NONE;
_state <= state; _state++)
iwlmld_kunit_move_sta_state(vif, sta, state);
return sta;
}
static void iwlmld_kunit_set_vif_associated(struct ieee80211_vif *vif)
{
/* TODO: setup chanreq */
/* TODO setup capabilities */
vif->cfg.assoc = 1;
}
static struct ieee80211_vif *
iwlmld_kunit_setup_assoc(bool mlo, u8 link_id, enum nl80211_band band)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct ieee80211_vif *vif;
struct ieee80211_bss_conf *link;
struct ieee80211_chanctx_conf *chan_ctx;
KUNIT_ASSERT_TRUE(test, mlo || link_id == 0);
vif = iwlmld_kunit_add_vif(mlo, NL80211_IFTYPE_STATION);
if (mlo)
link = iwlmld_kunit_add_link(vif, link_id);
else
link = &vif->bss_conf;
chan_ctx = iwlmld_kunit_add_chanctx(band);
wiphy_lock(mld->wiphy);
iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
wiphy_unlock(mld->wiphy);
/* The AP sta will now be pointer to by mld_vif->ap_sta */
iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, link_id);
iwlmld_kunit_set_vif_associated(vif);
return vif;
}
struct ieee80211_vif *iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
u8 assoc_link_id,
enum nl80211_band band)
{
struct kunit *test = kunit_get_current_test();
struct ieee80211_vif *vif;
KUNIT_ASSERT_TRUE(test,
hweight16(valid_links) == 1 ||
hweight16(valid_links) == 2);
KUNIT_ASSERT_TRUE(test, valid_links & BIT(assoc_link_id));
vif = iwlmld_kunit_setup_assoc(true, assoc_link_id, band);
/* Add the other link, if applicable */
if (hweight16(valid_links) > 1) {
u8 other_link_id = ffs(valid_links & ~BIT(assoc_link_id)) - 1;
iwlmld_kunit_add_link(vif, other_link_id);
}
return vif;
}
struct ieee80211_vif *iwlmld_kunit_setup_non_mlo_assoc(enum nl80211_band band)
{
return iwlmld_kunit_setup_assoc(false, 0, band);
}
struct iwl_rx_packet *
_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz)
{
struct kunit *test = kunit_get_current_test();
struct iwl_rx_packet *pkt;
KUNIT_ALLOC_AND_ASSERT_SIZE(test, pkt, sizeof(pkt) + notif_sz);
memcpy(pkt->data, notif, notif_sz);
pkt->len_n_flags = cpu_to_le32(sizeof(pkt->hdr) + notif_sz);
return pkt;
}
struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(u16 valid_links,
enum nl80211_band band1,
enum nl80211_band band2)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct ieee80211_vif *vif;
struct ieee80211_bss_conf *link;
struct ieee80211_chanctx_conf *chan_ctx;
struct ieee80211_sta *sta;
struct iwl_mld_vif *mld_vif;
u8 assoc_link_id, other_link_id;
KUNIT_ASSERT_TRUE(test, hweight16(valid_links) == 2);
assoc_link_id = ffs(valid_links) - 1;
other_link_id = ffs(valid_links & ~BIT(assoc_link_id)) - 1;
vif = iwlmld_kunit_setup_mlo_assoc(valid_links, assoc_link_id, band1);
mld_vif = iwl_mld_vif_from_mac80211(vif);
/* Activate second link */
wiphy_lock(mld->wiphy);
link = wiphy_dereference(mld->wiphy, vif->link_conf[other_link_id]);
KUNIT_EXPECT_NOT_NULL(test, link);
chan_ctx = iwlmld_kunit_add_chanctx(band2);
iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
wiphy_unlock(mld->wiphy);
/* And other link sta */
sta = mld_vif->ap_sta;
KUNIT_EXPECT_NOT_NULL(test, sta);
iwlmld_kunit_alloc_link_sta(sta, other_link_id);
return vif;
}
struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len)
{
struct kunit *test = kunit_get_current_test();
struct element *elem;
KUNIT_ALLOC_AND_ASSERT_SIZE(test, elem, sizeof(*elem) + len);
elem->id = id;
elem->datalen = len;
memcpy(elem->data, data, len);
return elem;
}
struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
u8 link_id)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
struct ieee80211_chanctx_conf *chanctx;
struct ieee80211_bss_conf *link =
wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
KUNIT_EXPECT_NOT_NULL(test, link);
chanctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
KUNIT_EXPECT_NOT_NULL(test, chanctx);
return iwl_mld_phy_from_mac80211(chanctx);
}

View File

@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_mld_kunit_utils_h__
#define __iwl_mld_kunit_utils_h__
#include <net/mac80211.h>
#include <kunit/test-bug.h>
struct iwl_mld;
int iwlmld_kunit_test_init(struct kunit *test);
enum nl80211_iftype;
struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type);
struct ieee80211_bss_conf *
iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id);
#define KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, size) \
do { \
(ptr) = kunit_kzalloc((test), (size), GFP_KERNEL); \
KUNIT_ASSERT_NOT_NULL((test), (ptr)); \
} while (0)
#define KUNIT_ALLOC_AND_ASSERT(test, ptr) \
KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, sizeof(*(ptr)))
#define CHANNEL(_name, _band, _freq) \
static struct ieee80211_channel _name = { \
.band = (_band), \
.center_freq = (_freq), \
.hw_value = (_freq), \
}
#define CHANDEF(_name, _channel, _freq1, _width) \
__maybe_unused static struct cfg80211_chan_def _name = { \
.chan = &(_channel), \
.center_freq1 = (_freq1), \
.width = (_width), \
}
CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412);
CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200);
CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115);
/* Feel free to add more */
CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20);
CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40);
CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160);
/* Feel free to add more */
//struct cfg80211_chan_def;
struct ieee80211_chanctx_conf *
iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def);
static inline struct ieee80211_chanctx_conf *
iwlmld_kunit_add_chanctx(enum nl80211_band band)
{
struct cfg80211_chan_def *chandef;
switch (band) {
case NL80211_BAND_2GHZ:
chandef = &chandef_2ghz;
break;
case NL80211_BAND_5GHZ:
chandef = &chandef_5ghz;
break;
default:
case NL80211_BAND_6GHZ:
chandef = &chandef_6ghz;
break;
}
return iwlmld_kunit_add_chanctx_from_def(chandef);
}
void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
struct ieee80211_chanctx_conf *ctx);
/* Allocate a sta, initialize it and move it to the wanted state */
struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
enum ieee80211_sta_state state,
int link_id);
struct ieee80211_vif *iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
u8 assoc_link_id,
enum nl80211_band band);
struct ieee80211_vif *iwlmld_kunit_setup_non_mlo_assoc(enum nl80211_band band);
struct iwl_rx_packet *
_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz);
#define iwl_mld_kunit_create_pkt(_notif) \
_iwl_mld_kunit_create_pkt(&(_notif), sizeof(_notif))
struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(u16 valid_links,
enum nl80211_band band1,
enum nl80211_band band2);
struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len);
/**
* iwlmld_kunit_get_phy_of_link - Get the phy of a link
*
* @vif: The vif to get the phy from.
* @link_id: The id of the link to get the phy for.
*
* given a vif and link id, return the phy pointer of that link.
* This assumes that the link exists, and that it had a chanctx
* assigned.
* If this is not the case, the test will fail.
*
* Return: phy pointer.
*/
struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
u8 link_id);
#endif /* __iwl_mld_kunit_utils_h__ */

View File

@ -0,0 +1,438 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifdef CONFIG_THERMAL
#include <linux/sort.h>
#include <linux/thermal.h>
#endif
#include "fw/api/phy.h"
#include "thermal.h"
#include "mld.h"
#include "hcmd.h"
#define IWL_MLD_CT_KILL_DURATION (5 * HZ)
void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
const struct ct_kill_notif *notif = (const void *)pkt->data;
IWL_ERR(mld,
"CT Kill notification: temp = %d, DTS = 0x%x, Scheme 0x%x - Enter CT Kill\n",
le16_to_cpu(notif->temperature), notif->dts,
notif->scheme);
iwl_mld_set_ctkill(mld, true);
wiphy_delayed_work_queue(mld->wiphy, &mld->ct_kill_exit_wk,
round_jiffies_relative(IWL_MLD_CT_KILL_DURATION));
}
static void iwl_mld_exit_ctkill(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mld *mld;
mld = container_of(wk, struct iwl_mld, ct_kill_exit_wk.work);
IWL_ERR(mld, "Exit CT Kill\n");
iwl_mld_set_ctkill(mld, false);
}
void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
{
const struct iwl_dts_measurement_notif *notif =
(const void *)pkt->data;
int temp;
u32 ths_crossed;
temp = le32_to_cpu(notif->temp);
/* shouldn't be negative, but since it's s32, make sure it isn't */
if (IWL_FW_CHECK(mld, temp < 0, "negative temperature %d\n", temp))
return;
ths_crossed = le32_to_cpu(notif->threshold_idx);
/* 0xFF in ths_crossed means the notification is not related
* to a trip, so we can ignore it here.
*/
if (ths_crossed == 0xFF)
return;
IWL_DEBUG_TEMP(mld, "Temp = %d Threshold crossed = %d\n",
temp, ths_crossed);
if (IWL_FW_CHECK(mld, ths_crossed >= IWL_MAX_DTS_TRIPS,
"bad threshold: %d\n", ths_crossed))
return;
#ifdef CONFIG_THERMAL
if (mld->tzone)
thermal_zone_device_update(mld->tzone, THERMAL_TRIP_VIOLATED);
#endif /* CONFIG_THERMAL */
}
#ifdef CONFIG_THERMAL
static int iwl_mld_get_temp(struct iwl_mld *mld, s32 *temp)
{
struct iwl_host_cmd cmd = {
.id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
.flags = CMD_WANT_SKB,
};
const struct iwl_dts_measurement_resp *resp;
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_send_cmd(mld, &cmd);
if (ret) {
IWL_ERR(mld,
"Failed to send the temperature measurement command (err=%d)\n",
ret);
return ret;
}
if (iwl_rx_packet_payload_len(cmd.resp_pkt) < sizeof(*resp)) {
IWL_ERR(mld,
"Failed to get a valid response to DTS measurement\n");
ret = -EIO;
goto free_resp;
}
resp = (const void *)cmd.resp_pkt->data;
*temp = le32_to_cpu(resp->temp);
IWL_DEBUG_TEMP(mld,
"Got temperature measurement response: temp=%d\n",
*temp);
free_resp:
iwl_free_resp(&cmd);
return ret;
}
static int compare_temps(const void *a, const void *b)
{
return ((s16)le16_to_cpu(*(__le16 *)a) -
(s16)le16_to_cpu(*(__le16 *)b));
}
struct iwl_trip_walk_data {
__le16 *thresholds;
int count;
};
static int iwl_trip_temp_iter(struct thermal_trip *trip, void *arg)
{
struct iwl_trip_walk_data *twd = arg;
if (trip->temperature == THERMAL_TEMP_INVALID)
return 0;
twd->thresholds[twd->count++] =
cpu_to_le16((s16)(trip->temperature / 1000));
return 0;
}
#endif
int iwl_mld_config_temp_report_ths(struct iwl_mld *mld)
{
struct temp_report_ths_cmd cmd = {0};
int ret;
#ifdef CONFIG_THERMAL
struct iwl_trip_walk_data twd = {
.thresholds = cmd.thresholds,
.count = 0
};
if (!mld->tzone)
goto send;
/* The thermal core holds an array of temperature trips that are
* unsorted and uncompressed, the FW should get it compressed and
* sorted.
*/
/* compress trips to cmd array, remove uninitialized values*/
for_each_thermal_trip(mld->tzone, iwl_trip_temp_iter, &twd);
cmd.num_temps = cpu_to_le32(twd.count);
if (twd.count)
sort(cmd.thresholds, twd.count, sizeof(s16),
compare_temps, NULL);
send:
#endif
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
TEMP_REPORTING_THRESHOLDS_CMD),
&cmd);
if (ret)
IWL_ERR(mld, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
ret);
return ret;
}
#ifdef CONFIG_THERMAL
static int iwl_mld_tzone_get_temp(struct thermal_zone_device *device,
int *temperature)
{
struct iwl_mld *mld = thermal_zone_device_priv(device);
int temp;
int ret = 0;
wiphy_lock(mld->wiphy);
if (!mld->fw_status.running) {
/* Tell the core that there is no valid temperature value to
* return, but it need not worry about this.
*/
*temperature = THERMAL_TEMP_INVALID;
goto unlock;
}
ret = iwl_mld_get_temp(mld, &temp);
if (ret)
goto unlock;
*temperature = temp * 1000;
unlock:
wiphy_unlock(mld->wiphy);
return ret;
}
static int iwl_mld_tzone_set_trip_temp(struct thermal_zone_device *device,
const struct thermal_trip *trip,
int temp)
{
struct iwl_mld *mld = thermal_zone_device_priv(device);
int ret;
wiphy_lock(mld->wiphy);
if (!mld->fw_status.running) {
ret = -EIO;
goto unlock;
}
if ((temp / 1000) > S16_MAX) {
ret = -EINVAL;
goto unlock;
}
ret = iwl_mld_config_temp_report_ths(mld);
unlock:
wiphy_unlock(mld->wiphy);
return ret;
}
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = iwl_mld_tzone_get_temp,
.set_trip_temp = iwl_mld_tzone_set_trip_temp,
};
static void iwl_mld_thermal_zone_register(struct iwl_mld *mld)
{
int ret;
char name[16];
static atomic_t counter = ATOMIC_INIT(0);
struct thermal_trip trips[IWL_MAX_DTS_TRIPS] = {
[0 ... IWL_MAX_DTS_TRIPS - 1] = {
.temperature = THERMAL_TEMP_INVALID,
.type = THERMAL_TRIP_PASSIVE,
.flags = THERMAL_TRIP_FLAG_RW_TEMP,
},
};
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
mld->tzone =
thermal_zone_device_register_with_trips(name, trips,
IWL_MAX_DTS_TRIPS,
mld, &tzone_ops,
NULL, 0, 0);
if (IS_ERR(mld->tzone)) {
IWL_DEBUG_TEMP(mld,
"Failed to register to thermal zone (err = %ld)\n",
PTR_ERR(mld->tzone));
mld->tzone = NULL;
return;
}
ret = thermal_zone_device_enable(mld->tzone);
if (ret) {
IWL_DEBUG_TEMP(mld, "Failed to enable thermal zone\n");
thermal_zone_device_unregister(mld->tzone);
}
}
/* budget in mWatt */
static const u32 iwl_mld_cdev_budgets[] = {
2400, /* cooling state 0 */
2000, /* cooling state 1 */
1800, /* cooling state 2 */
1600, /* cooling state 3 */
1400, /* cooling state 4 */
1200, /* cooling state 5 */
1000, /* cooling state 6 */
900, /* cooling state 7 */
800, /* cooling state 8 */
700, /* cooling state 9 */
650, /* cooling state 10 */
600, /* cooling state 11 */
550, /* cooling state 12 */
500, /* cooling state 13 */
450, /* cooling state 14 */
400, /* cooling state 15 */
350, /* cooling state 16 */
300, /* cooling state 17 */
250, /* cooling state 18 */
200, /* cooling state 19 */
150, /* cooling state 20 */
};
int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
enum iwl_ctdp_cmd_operation op)
{
struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
.budget = cpu_to_le32(iwl_mld_cdev_budgets[state]),
.window_size = 0,
};
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD),
&cmd);
if (ret) {
IWL_ERR(mld, "cTDP command failed (err=%d)\n", ret);
return ret;
}
if (op == CTDP_CMD_OPERATION_START)
mld->cooling_dev.cur_state = state;
return 0;
}
static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1;
return 0;
}
static int iwl_mld_tcool_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
*state = mld->cooling_dev.cur_state;
return 0;
}
static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long new_state)
{
struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
int ret;
wiphy_lock(mld->wiphy);
if (!mld->fw_status.running) {
ret = -EIO;
goto unlock;
}
if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) {
ret = -EINVAL;
goto unlock;
}
ret = iwl_mld_config_ctdp(mld, new_state, CTDP_CMD_OPERATION_START);
unlock:
wiphy_unlock(mld->wiphy);
return ret;
}
static const struct thermal_cooling_device_ops tcooling_ops = {
.get_max_state = iwl_mld_tcool_get_max_state,
.get_cur_state = iwl_mld_tcool_get_cur_state,
.set_cur_state = iwl_mld_tcool_set_cur_state,
};
static void iwl_mld_cooling_device_register(struct iwl_mld *mld)
{
char name[] = "iwlwifi";
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
mld->cooling_dev.cdev =
thermal_cooling_device_register(name,
mld,
&tcooling_ops);
if (IS_ERR(mld->cooling_dev.cdev)) {
IWL_DEBUG_TEMP(mld,
"Failed to register to cooling device (err = %ld)\n",
PTR_ERR(mld->cooling_dev.cdev));
mld->cooling_dev.cdev = NULL;
return;
}
}
static void iwl_mld_thermal_zone_unregister(struct iwl_mld *mld)
{
if (!mld->tzone)
return;
IWL_DEBUG_TEMP(mld, "Thermal zone device unregister\n");
if (mld->tzone) {
thermal_zone_device_unregister(mld->tzone);
mld->tzone = NULL;
}
}
static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld)
{
if (!mld->cooling_dev.cdev)
return;
IWL_DEBUG_TEMP(mld, "Cooling device unregister\n");
if (mld->cooling_dev.cdev) {
thermal_cooling_device_unregister(mld->cooling_dev.cdev);
mld->cooling_dev.cdev = NULL;
}
}
#endif /* CONFIG_THERMAL */
void iwl_mld_thermal_initialize(struct iwl_mld *mld)
{
wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill);
#ifdef CONFIG_THERMAL
iwl_mld_cooling_device_register(mld);
iwl_mld_thermal_zone_register(mld);
#endif
}
void iwl_mld_thermal_exit(struct iwl_mld *mld)
{
wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk);
#ifdef CONFIG_THERMAL
iwl_mld_cooling_device_unregister(mld);
iwl_mld_thermal_zone_unregister(mld);
#endif
}

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_thermal_h__
#define __iwl_mld_thermal_h__
#include "iwl-trans.h"
struct iwl_mld;
#ifdef CONFIG_THERMAL
#include <linux/thermal.h>
/*
* struct iwl_mld_cooling_device
* @cur_state: current state
* @cdev: struct thermal cooling device
*/
struct iwl_mld_cooling_device {
u32 cur_state;
struct thermal_cooling_device *cdev;
};
int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
enum iwl_ctdp_cmd_operation op);
#endif
void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
int iwl_mld_config_temp_report_ths(struct iwl_mld *mld);
void iwl_mld_thermal_initialize(struct iwl_mld *mld);
void iwl_mld_thermal_exit(struct iwl_mld *mld);
#endif /* __iwl_mld_thermal_h__ */

View File

@ -0,0 +1,240 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2025 Intel Corporation
*/
#include "mld.h"
#include "hcmd.h"
#include "ptp.h"
#include "time_sync.h"
#include <linux/ieee80211.h>
static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols,
const u8 *addr)
{
struct iwl_mld_time_sync_data *time_sync = kzalloc(sizeof(*time_sync),
GFP_KERNEL);
if (!time_sync)
return -ENOMEM;
time_sync->active_protocols = protocols;
ether_addr_copy(time_sync->peer_addr, addr);
skb_queue_head_init(&time_sync->frame_list);
rcu_assign_pointer(mld->time_sync, time_sync);
return 0;
}
int iwl_mld_time_sync_fw_config(struct iwl_mld *mld)
{
struct iwl_time_sync_cfg_cmd cmd = {};
struct iwl_mld_time_sync_data *time_sync;
int err;
time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
if (!time_sync)
return -EINVAL;
cmd.protocols = cpu_to_le32(time_sync->active_protocols);
ether_addr_copy(cmd.peer_addr, time_sync->peer_addr);
err = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(DATA_PATH_GROUP,
WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
&cmd);
if (err)
IWL_ERR(mld, "Failed to send time sync cfg cmd: %d\n", err);
return err;
}
int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr, u32 protocols)
{
struct iwl_mld_time_sync_data *time_sync;
int err;
time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
/* The fw only supports one peer. We do allow reconfiguration of the
* same peer for cases of fw reset etc.
*/
if (time_sync && time_sync->active_protocols &&
!ether_addr_equal(addr, time_sync->peer_addr)) {
IWL_DEBUG_INFO(mld, "Time sync: reject config for peer: %pM\n",
addr);
return -ENOBUFS;
}
if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM |
IWL_TIME_SYNC_PROTOCOL_FTM))
return -EINVAL;
IWL_DEBUG_INFO(mld, "Time sync: set peer addr=%pM\n", addr);
iwl_mld_deinit_time_sync(mld);
err = iwl_mld_init_time_sync(mld, protocols, addr);
if (err)
return err;
err = iwl_mld_time_sync_fw_config(mld);
return err;
}
void iwl_mld_deinit_time_sync(struct iwl_mld *mld)
{
struct iwl_mld_time_sync_data *time_sync =
wiphy_dereference(mld->wiphy, mld->time_sync);
if (!time_sync)
return;
RCU_INIT_POINTER(mld->time_sync, NULL);
skb_queue_purge(&time_sync->frame_list);
kfree_rcu(time_sync, rcu_head);
}
bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb, u8 *addr)
{
struct iwl_mld_time_sync_data *time_sync;
rcu_read_lock();
time_sync = rcu_dereference(mld->time_sync);
if (time_sync && ether_addr_equal(time_sync->peer_addr, addr) &&
(ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) {
skb_queue_tail(&time_sync->frame_list, skb);
rcu_read_unlock();
return true;
}
rcu_read_unlock();
return false;
}
static bool iwl_mld_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
u8 skb_dialog_token;
if (ieee80211_is_timing_measurement(skb))
skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token;
else
skb_dialog_token = mgmt->u.action.u.ftm.dialog_token;
if ((ether_addr_equal(mgmt->sa, addr) ||
ether_addr_equal(mgmt->da, addr)) &&
skb_dialog_token == dialog_token)
return true;
return false;
}
static struct sk_buff *iwl_mld_time_sync_find_skb(struct iwl_mld *mld, u8 *addr,
u8 dialog_token)
{
struct iwl_mld_time_sync_data *time_sync;
struct sk_buff *skb;
rcu_read_lock();
time_sync = rcu_dereference(mld->time_sync);
if (IWL_FW_CHECK(mld, !time_sync,
"Time sync notification but time sync is not initialized\n")) {
rcu_read_unlock();
return NULL;
}
/* The notifications are expected to arrive in the same order of the
* frames. If the incoming notification doesn't match the first SKB
* in the queue, it means there was no time sync notification for this
* SKB and it can be dropped.
*/
while ((skb = skb_dequeue(&time_sync->frame_list))) {
if (iwl_mld_is_skb_match(skb, addr, dialog_token))
break;
kfree_skb(skb);
skb = NULL;
IWL_DEBUG_DROP(mld,
"Time sync: drop SKB without matching notification\n");
}
rcu_read_unlock();
return skb;
}
static u64 iwl_mld_get_64_bit(__le32 high, __le32 low)
{
return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low);
}
void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct ptp_data *data = &mld->ptp_data;
struct iwl_time_msmt_notify *notif = (void *)pkt->data;
struct ieee80211_rx_status *rx_status;
struct skb_shared_hwtstamps *shwt;
u64 ts_10ns;
struct sk_buff *skb =
iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
le32_to_cpu(notif->dialog_token));
u64 adj_time;
if (IWL_FW_CHECK(mld, !skb, "Time sync event but no pending skb\n"))
return;
spin_lock_bh(&data->lock);
ts_10ns = iwl_mld_get_64_bit(notif->t2_hi, notif->t2_lo);
adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
shwt = skb_hwtstamps(skb);
shwt->hwtstamp = ktime_set(0, adj_time);
ts_10ns = iwl_mld_get_64_bit(notif->t3_hi, notif->t3_lo);
adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time);
spin_unlock_bh(&data->lock);
IWL_DEBUG_INFO(mld,
"Time sync: RX event - report frame t2=%llu t3=%llu\n",
ktime_to_ns(shwt->hwtstamp),
ktime_to_ns(rx_status->ack_tx_hwtstamp));
ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
}
void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct ptp_data *data = &mld->ptp_data;
struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data;
struct ieee80211_tx_status status = {};
struct skb_shared_hwtstamps *shwt;
u64 ts_10ns, adj_time;
status.skb =
iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
le32_to_cpu(notif->dialog_token));
if (IWL_FW_CHECK(mld, !status.skb,
"Time sync confirm but no pending skb\n"))
return;
spin_lock_bh(&data->lock);
ts_10ns = iwl_mld_get_64_bit(notif->t1_hi, notif->t1_lo);
adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
shwt = skb_hwtstamps(status.skb);
shwt->hwtstamp = ktime_set(0, adj_time);
ts_10ns = iwl_mld_get_64_bit(notif->t4_hi, notif->t4_lo);
adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
status.info = IEEE80211_SKB_CB(status.skb);
status.ack_hwtstamp = ktime_set(0, adj_time);
spin_unlock_bh(&data->lock);
IWL_DEBUG_INFO(mld,
"Time sync: TX event - report frame t1=%llu t4=%llu\n",
ktime_to_ns(shwt->hwtstamp),
ktime_to_ns(status.ack_hwtstamp));
ieee80211_tx_status_ext(mld->hw, &status);
}

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2025 Intel Corporation
*/
#ifndef __iwl_mld_time_sync_h__
#define __iwl_mld_time_sync_h__
struct iwl_mld_time_sync_data {
struct rcu_head rcu_head;
u8 peer_addr[ETH_ALEN];
u32 active_protocols;
struct sk_buff_head frame_list;
};
int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr,
u32 protocols);
int iwl_mld_time_sync_fw_config(struct iwl_mld *mld);
void iwl_mld_deinit_time_sync(struct iwl_mld *mld);
void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb,
u8 *addr);
void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
#endif /* __iwl_mld_time_sync_h__ */

View File

@ -0,0 +1,717 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
*/
#include <net/mac80211.h>
#include "tlc.h"
#include "hcmd.h"
#include "sta.h"
#include "fw/api/rs.h"
#include "fw/api/context.h"
#include "fw/api/dhc.h"
static u8 iwl_mld_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta)
{
switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_320:
return IWL_TLC_MNG_CH_WIDTH_320MHZ;
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
return IWL_TLC_MNG_CH_WIDTH_80MHZ;
case IEEE80211_STA_RX_BW_40:
return IWL_TLC_MNG_CH_WIDTH_40MHZ;
case IEEE80211_STA_RX_BW_20:
default:
return IWL_TLC_MNG_CH_WIDTH_20MHZ;
}
}
static __le16
iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap)
{
struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
bool has_vht = vht_cap->vht_supported;
u16 flags = 0;
/* STBC flags */
if (mld->cfg->ht_params->stbc &&
(hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) {
if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
}
/* LDPC */
if (mld->cfg->ht_params->ldpc &&
((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
(has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
if (own_he_cap &&
!(own_he_cap->he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
/* DCM */
if (he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
own_he_cap &&
own_he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
/* Extra EHT LTF */
if (own_eht_cap &&
own_eht_cap->eht_cap_elem.phy_cap_info[5] &
IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF &&
link_sta->eht_cap.has_eht &&
link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) {
flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK;
}
return cpu_to_le16(flags);
}
static u8 iwl_mld_get_fw_chains(struct iwl_mld *mld)
{
u8 chains = iwl_mld_get_valid_tx_ant(mld);
u8 fw_chains = 0;
if (chains & ANT_A)
fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
if (chains & ANT_B)
fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
return fw_chains;
}
static u8 iwl_mld_get_fw_sgi(struct ieee80211_link_sta *link_sta)
{
struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
u8 sgi_chwidths = 0;
/* If the association supports HE, HT/VHT rates will never be used for
* Tx and therefor there's no need to set the
* sgi-per-channel-width-support bits
*/
if (he_cap->has_he)
return 0;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
return sgi_chwidths;
}
static int
iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
int nss)
{
u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
(0x3 << (2 * (nss - 1)));
rx_mcs >>= (2 * (nss - 1));
switch (rx_mcs) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
return IWL_TLC_MNG_HT_RATE_MCS7;
case IEEE80211_VHT_MCS_SUPPORT_0_8:
return IWL_TLC_MNG_HT_RATE_MCS8;
case IEEE80211_VHT_MCS_SUPPORT_0_9:
return IWL_TLC_MNG_HT_RATE_MCS9;
default:
WARN_ON_ONCE(1);
break;
}
return 0;
}
static void
iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_vht_cap *vht_cap,
struct iwl_tlc_config_cmd_v4 *cmd)
{
u16 supp;
int i, highest_mcs;
u8 max_nss = link_sta->rx_nss;
struct ieee80211_vht_cap ieee_vht_cap = {
.vht_cap_info = cpu_to_le32(vht_cap->cap),
.supp_mcs = vht_cap->vht_mcs,
};
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
max_nss = 1;
for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
int nss = i + 1;
highest_mcs = iwl_mld_get_highest_fw_mcs(vht_cap, nss);
if (!highest_mcs)
continue;
supp = BIT(highest_mcs + 1) - 1;
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
/* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
* decoded the MCS bits anyway ourselves.
*/
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
ieee80211_get_vht_max_nss(&ieee_vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true, nss) >= nss)
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
}
}
static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
{
switch (mcs) {
case IEEE80211_HE_MCS_SUPPORT_0_7:
return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
case IEEE80211_HE_MCS_SUPPORT_0_9:
return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
case IEEE80211_HE_MCS_SUPPORT_0_11:
return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
case IEEE80211_HE_MCS_NOT_SUPPORTED:
return 0;
}
WARN(1, "invalid HE MCS %d\n", mcs);
return 0;
}
static void
iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
struct iwl_tlc_config_cmd_v4 *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
u16 tx_mcs_80 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_80);
u16 tx_mcs_160 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_160);
int i;
u8 nss = link_sta->rx_nss;
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
nss = 1;
for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
/* If one side doesn't support - mark both as not supporting */
if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
_tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
_tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
}
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
_tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
_tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
}
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
}
}
static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
enum IWL_TLC_MCS_PER_BW bw,
u8 max_nss, u16 mcs_msk)
{
if (max_nss >= 2)
ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
if (max_nss >= 1)
ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
}
static const
struct ieee80211_eht_mcs_nss_supp_bw *
iwl_mld_get_eht_mcs_of_bw(enum IWL_TLC_MCS_PER_BW bw,
const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
{
switch (bw) {
case IWL_TLC_MCS_PER_BW_80:
return &eht_mcs->bw._80;
case IWL_TLC_MCS_PER_BW_160:
return &eht_mcs->bw._160;
case IWL_TLC_MCS_PER_BW_320:
return &eht_mcs->bw._320;
default:
return NULL;
}
}
static u8 iwl_mld_get_eht_max_nss(u8 rx_nss, u8 tx_nss)
{
u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
/* the max nss that can be used,
* is the min with our tx capa and the peer rx capa.
*/
return min(tx, rx);
}
#define MAX_NSS_MCS(mcs_num, rx, tx) \
iwl_mld_get_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
(tx)->rx_tx_mcs ##mcs_num## _max_nss)
static void
iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
struct iwl_tlc_config_cmd_v4 *cmd)
{
/* peer RX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
&link_sta->eht_cap.eht_mcs_nss_supp;
/* our TX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
&own_eht_cap->eht_mcs_nss_supp;
enum IWL_TLC_MCS_PER_BW bw;
struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
/* peer is 20 MHz only */
if (vif->type == NL80211_IFTYPE_AP &&
!(link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_rx_20 = eht_rx_mcs->only_20mhz;
} else {
mcs_rx_20.rx_tx_mcs7_max_nss =
eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_rx_20.rx_tx_mcs9_max_nss =
eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_rx_20.rx_tx_mcs11_max_nss =
eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
mcs_rx_20.rx_tx_mcs13_max_nss =
eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
}
/* NIC is capable of 20 MHz only */
if (!(own_he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_tx_20 = eht_tx_mcs->only_20mhz;
} else {
mcs_tx_20.rx_tx_mcs7_max_nss =
eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_tx_20.rx_tx_mcs9_max_nss =
eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_tx_20.rx_tx_mcs11_max_nss =
eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
mcs_tx_20.rx_tx_mcs13_max_nss =
eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
}
/* rates for 20/40/80 MHz */
bw = IWL_TLC_MCS_PER_BW_80;
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20),
GENMASK(7, 0));
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20),
GENMASK(9, 8));
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20),
GENMASK(11, 10));
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20),
GENMASK(13, 12));
/* rates for 160/320 MHz */
for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
iwl_mld_get_eht_mcs_of_bw(bw, eht_rx_mcs);
const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
iwl_mld_get_eht_mcs_of_bw(bw, eht_tx_mcs);
/* got unsupported index for bw */
if (!mcs_rx || !mcs_tx)
continue;
/* break out if we don't support the bandwidth */
if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ))
break;
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(9, mcs_rx, mcs_tx),
GENMASK(9, 0));
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(11, mcs_rx, mcs_tx),
GENMASK(11, 10));
iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
MAX_NSS_MCS(13, mcs_rx, mcs_tx),
GENMASK(13, 12));
}
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC ||
link_sta->rx_nss < 2)
memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
}
static void
iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
struct ieee80211_supported_band *sband,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
struct iwl_tlc_config_cmd_v4 *cmd)
{
int i;
u16 non_ht_rates = 0;
unsigned long rates_bitmap;
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
/* non HT rates */
rates_bitmap = link_sta->supp_rates[sband->band];
for_each_set_bit(i, &rates_bitmap, BITS_PER_LONG)
non_ht_rates |= BIT(sband->bitrates[i].hw_value);
cmd->non_ht_rates = cpu_to_le16(non_ht_rates);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) {
cmd->mode = IWL_TLC_MNG_MODE_EHT;
iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap,
own_eht_cap, cmd);
} else if (he_cap->has_he && own_he_cap) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
iwl_mld_fill_he_rates(link_sta, own_he_cap, cmd);
} else if (vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
iwl_mld_fill_vht_rates(link_sta, vht_cap, cmd);
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[1]);
}
}
static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
enum nl80211_band band)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band];
const struct ieee80211_sta_he_cap *own_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, vif);
const struct ieee80211_sta_eht_cap *own_eht_cap =
ieee80211_get_eht_iftype_cap_vif(sband, vif);
struct iwl_tlc_config_cmd_v4 cmd = {
/* For AP mode, use 20 MHz until the STA is authorized */
.max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
iwl_mld_fw_bw_from_sta_bw(link_sta) :
IWL_TLC_MNG_CH_WIDTH_20MHZ,
.flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta,
own_he_cap, own_eht_cap),
.chains = iwl_mld_get_fw_chains(mld),
.sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta),
.max_mpdu_len = cpu_to_le16(link_sta->agg.max_rc_amsdu_len),
};
int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
int ret;
if (fw_sta_id < 0)
return;
cmd.sta_id = fw_sta_id;
iwl_mld_fill_supp_rates(mld, vif, link_sta, sband,
own_he_cap, own_eht_cap,
&cmd);
IWL_DEBUG_RATE(mld,
"TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cmd.sta_id, cmd.max_ch_width, cmd.mode);
/* Send async since this can be called within a RCU-read section */
ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD),
CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
}
static void iwl_mld_recalc_amsdu_len(struct iwl_mld *mld,
struct ieee80211_link_sta *link_sta)
{
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
/* For EHT, HE and VHT - we can use the value as it was calculated by
* mac80211.
*/
if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he ||
link_sta->vht_cap.vht_supported)
goto recalc;
/* But for HT, mac80211 doesn't enforce to 4095, so force it here */
if (ht_cap->ht_supported && ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
/* Agg is offloaded, so we need to assume that agg are enabled
* and max mpdu in ampdu is 4095 (spec 802.11-2016 9.3.2.1)
*/
link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
recalc:
link_sta->agg.max_rc_amsdu_len = link_sta->agg.max_amsdu_len;
ieee80211_sta_recalc_aggregates(link_sta->sta);
}
int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data)
{
struct {
struct iwl_dhc_cmd dhc;
struct iwl_dhc_tlc_cmd tlc;
} __packed cmd = {
.tlc.sta_id = sta_id,
.tlc.type = cpu_to_le32(type),
.tlc.data[0] = cpu_to_le32(data),
.dhc.length = cpu_to_le32(sizeof(cmd.tlc) >> 2),
.dhc.index_and_mask =
cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
DHC_INTEGRATION_TLC_DEBUG_CONFIG),
};
int ret;
ret = iwl_mld_send_cmd_with_flags_pdu(mld,
WIDE_ID(IWL_ALWAYS_LONG_GROUP,
DEBUG_HOST_COMMAND),
CMD_ASYNC, &cmd);
IWL_DEBUG_RATE(mld, "sta_id %d, type: 0x%X, value: 0x%X, ret%d\n",
sta_id, type, data, ret);
return ret;
}
void iwl_mld_config_tlc_link(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_link_sta *link_sta)
{
enum nl80211_band band;
if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return;
band = link_conf->chanreq.oper.chan->band;
iwl_mld_recalc_amsdu_len(mld, link_sta);
iwl_mld_send_tlc_cmd(mld, vif, link_sta, band);
}
void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ieee80211_bss_conf *link;
int link_id;
lockdep_assert_wiphy(mld->wiphy);
for_each_vif_active_link(vif, link, link_id) {
struct ieee80211_link_sta *link_sta =
link_sta_dereference_check(sta, link_id);
if (!link || !link_sta)
continue;
iwl_mld_config_tlc_link(mld, vif, link, link_sta);
}
}
static u16
iwl_mld_get_amsdu_size_of_tid(struct iwl_mld *mld,
struct ieee80211_link_sta *link_sta,
unsigned int tid)
{
struct ieee80211_sta *sta = link_sta->sta;
struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO,
};
unsigned int result = link_sta->agg.max_rc_amsdu_len;
u8 ac, txf, lmac;
lockdep_assert_wiphy(mld->wiphy);
/* Don't send an AMSDU that will be longer than the TXF.
* Add a security margin of 256 for the TX command + headers.
* We also want to have the start of the next packet inside the
* fifo to be able to send bursts.
*/
if (WARN_ON(tid >= ARRAY_SIZE(tid_to_mac80211_ac)))
return 0;
ac = tid_to_mac80211_ac[tid];
/* For HE redirect to trigger based fifos */
if (link_sta->he_cap.has_he)
ac += 4;
txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(ac);
/* Only one link: take the lmac according to the band */
if (hweight16(sta->valid_links) <= 1) {
enum nl80211_band band;
struct ieee80211_bss_conf *link =
wiphy_dereference(mld->wiphy,
vif->link_conf[link_sta->link_id]);
if (WARN_ON(!link || !link->chanreq.oper.chan))
band = NL80211_BAND_2GHZ;
else
band = link->chanreq.oper.chan->band;
lmac = iwl_mld_get_lmac_id(mld, band);
/* More than one link but with 2 lmacs: take the minimum */
} else if (fw_has_capa(&mld->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
lmac = IWL_LMAC_5G_INDEX;
result = min_t(unsigned int, result,
mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
lmac = IWL_LMAC_24G_INDEX;
/* More than one link but only one lmac */
} else {
lmac = IWL_LMAC_24G_INDEX;
}
return min_t(unsigned int, result,
mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
}
void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
struct iwl_tlc_update_notif *notif = (void *)pkt->data;
struct ieee80211_link_sta *link_sta;
u32 flags = le32_to_cpu(notif->flags);
u32 enabled;
u16 size;
if (IWL_FW_CHECK(mld, notif->sta_id >= mld->fw->ucode_capa.num_stations,
"Invalid sta id (%d) in TLC notification\n",
notif->sta_id))
return;
link_sta = wiphy_dereference(mld->wiphy,
mld->fw_id_to_link_sta[notif->sta_id]);
if (WARN(IS_ERR_OR_NULL(link_sta),
"link_sta of sta id (%d) doesn't exist\n", notif->sta_id))
return;
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
struct iwl_mld_link_sta *mld_link_sta =
iwl_mld_link_sta_from_mac80211(link_sta);
char pretty_rate[100];
if (WARN_ON(!mld_link_sta))
return;
mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
mld_link_sta->last_rate_n_flags);
IWL_DEBUG_RATE(mld, "TLC notif: new rate = %s\n", pretty_rate);
}
/* We are done processing the notif */
if (!(flags & IWL_TLC_NOTIF_FLAG_AMSDU))
return;
enabled = le32_to_cpu(notif->amsdu_enabled);
size = le32_to_cpu(notif->amsdu_size);
if (size < 2000) {
size = 0;
enabled = 0;
}
if (IWL_FW_CHECK(mld, size > link_sta->agg.max_amsdu_len,
"Invalid AMSDU len in TLC notif: %d (Max AMSDU len: %d)\n",
size, link_sta->agg.max_amsdu_len))
return;
link_sta->agg.max_rc_amsdu_len = size;
for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (enabled & BIT(i))
link_sta->agg.max_tid_amsdu_len[i] =
iwl_mld_get_amsdu_size_of_tid(mld, link_sta, i);
else
link_sta->agg.max_tid_amsdu_len[i] = 0;
}
ieee80211_sta_recalc_aggregates(link_sta->sta);
IWL_DEBUG_RATE(mld,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
le32_to_cpu(notif->amsdu_size), size, enabled);
}

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_tlc_h__
#define __iwl_mld_tlc_h__
#include "mld.h"
void iwl_mld_config_tlc_link(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_link_sta *link_sta);
void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data);
#endif /* __iwl_mld_tlc_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_mld_tx_h__
#define __iwl_mld_tx_h__
#include "mld.h"
#define IWL_MLD_INVALID_QUEUE 0xFFFF
#define IWL_MLD_INVALID_DROP_TX 0xFFFE
/**
* struct iwl_mld_txq - TX Queue data
*
* @fw_id: the fw id of this txq. Only valid when &status.allocated is true.
* @status: bitmap of the txq status
* @status.allocated: Indicates that the queue was allocated.
* @status.stop_full: Indicates that the queue is full and should stop TXing.
* @list: list pointer, for &mld::txqs_to_add
* @tx_request: makes sure that if there are multiple threads that want to tx
* from this txq, only one of them will do all the TXing.
* This is needed to avoid spinning the trans txq lock, which is expensive
*/
struct iwl_mld_txq {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
u16 fw_id;
struct {
u8 allocated:1;
u8 stop_full:1;
} status;
);
struct list_head list;
atomic_t tx_request;
/* And here fields that survive a fw restart */
};
static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq)
{
INIT_LIST_HEAD(&mld_txq->list);
atomic_set(&mld_txq->tx_request, 0);
}
static inline struct iwl_mld_txq *
iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq)
{
return (void *)txq->drv_priv;
}
void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk);
void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
void iwl_mld_add_txq_list(struct iwl_mld *mld);
void
iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id);
void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id);
int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq);
int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
struct ieee80211_sta *sta,
u32 old_sta_mask, u32 new_sta_mask);
void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant);
u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
struct ieee80211_tx_info *info,
struct ieee80211_vif *vif);
void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
struct ieee80211_txq *txq);
#endif /* __iwl_mld_tx_h__ */

View File

@ -498,7 +498,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz devices */
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
@ -543,7 +544,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
/* Dr devices */
{IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
#endif /* CONFIG_IWLMVM */
#endif /* CONFIG_IWLMLD */
{0}
};
@ -1109,6 +1110,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
@ -1230,7 +1233,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_br, iwl_br_name),
#endif /* CONFIG_IWLMVM */
#endif /* CONFIG_IWLMLD */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);