Highlights this time are:

- cfg80211/nl80211:
     * improvements for 6 GHz regulatory flexibility
 
  - mac80211:
     * use generic netdev stats
     * multi-link improvements/fixes
 
  - brcmfmac:
     * MFP support (to enable WPA3)
 
  - wilc1000:
     * suspend/resume improvements
 
  - iwlwifi:
     * remove support for older FW for new devices
     * fast resume (keeping the device configured)
 
  - wl18xx:
     * support newer firmware versions
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEpeA8sTs3M8SN2hR410qiO8sPaAAFAmZ9T40ACgkQ10qiO8sP
 aACifQ/+LPYi4q/WWME+ceNrUebkRS9d0QuT5kA3EdtoxstR5582L32+X9G3RZ23
 IAA5Mo7JfPTVqHNcS34Uh0qJge+hNVAJfksenyaCUfLpeNX+c78xlvXIWXpilD/U
 7KK82wpovQ82cFAk4oymTYY/9Fzab9V0WswndzEOEaD7QfR0MHtyC6sDONMbt2Qe
 RSBeZF/rkTjyL2dymVWHUYMMx84sB11Tiwkd7vsk/PhLepOS9PvW2jFGKc0hePeu
 Q59WdM87rG5zlkBwrEy44mrPTR3GmGpQsDvdajH8xxkO48ry2ATe7qi9PrfSjon5
 jaM7oEoHi+XIKfB20Ulpi0hdE67MQhwydfdrtulGe6IZOVpsUbnRiduKDFmkGcFT
 mjj0L01kp/KQtMsZF35WDCeYhaHLpidh2f18e60XBDPt22goDoWD3PyM7Mhy0flY
 bA/sh8hQrWw5+jxTfc5UmZHYlWh4TYOyVs6Ub0qMQtFaCdLDFQG/abkdwHZO4e9G
 3tstlSSa41vziX1rwMTUkYbNzCdjEVnqnvWAICXXgH38ubdAxId/1xkMSHpEEwGL
 X9CVPmu2lPKJ4kwhcUnEE1QH5q9kRwaZ5gIq777PfRx9UzT4ViGiRVWx0qC54vLB
 34fSEstrXKx9crpfFtOFPQxUHsXzod/kWEDSvkzpZAHeWtpDVu0=
 =MR6f
 -----END PGP SIGNATURE-----

Merge tag 'wireless-next-2024-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Johannes Berg says:

====================
Highlights this time are:

 - cfg80211/nl80211:
    * improvements for 6 GHz regulatory flexibility

 - mac80211:
    * use generic netdev stats
    * multi-link improvements/fixes

 - brcmfmac:
    * MFP support (to enable WPA3)

 - wilc1000:
    * suspend/resume improvements

 - iwlwifi:
    * remove support for older FW for new devices
    * fast resume (keeping the device configured)

 - wl18xx:
    * support newer firmware versions

* tag 'wireless-next-2024-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (100 commits)
  wifi: brcmfmac: of: Support interrupts-extended
  wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3
  net: rfkill: Correct return value in invalid parameter case
  wifi: mac80211: fix NULL dereference at band check in starting tx ba session
  wifi: iwlwifi: mvm: fix rs.h kernel-doc
  wifi: iwlwifi: fw: api: datapath: fix kernel-doc
  wifi: iwlwifi: fix remaining mistagged kernel-doc comments
  wifi: iwlwifi: fix prototype mismatch kernel-doc warnings
  wifi: iwlwifi: fix kernel-doc in iwl-fh.h
  wifi: iwlwifi: fix kernel-doc in iwl-trans.h
  wifi: iwlwifi: pcie: fix kernel-doc
  wifi: iwlwifi: dvm: fix kernel-doc warnings
  wifi: iwlwifi: mvm: don't log error for failed UATS table read
  wifi: iwlwifi: trans: make bad state warnings
  wifi: iwlwifi: fw: api: fix some kernel-doc
  wifi: iwlwifi: mvm: remove init_dbg module parameter
  wifi: iwlwifi: update the BA notification API
  wifi: iwlwifi: mvm: always unblock EMLSR on ROC end
  wifi: iwlwifi: mvm: use IWL_FW_CHECK for link ID check
  wifi: iwlwifi: mvm: don't flush BSSes on restart with MLD API
  ...
====================

Link: https://patch.msgid.link/20240627114135.28507-3-johannes@sipsolutions.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-06-27 13:53:43 -07:00
commit 56bf02c26a
183 changed files with 4705 additions and 4312 deletions

View File

@ -1550,7 +1550,7 @@ fail:
return retval;
}
static void adm8211_stop(struct ieee80211_hw *dev)
static void adm8211_stop(struct ieee80211_hw *dev, bool suspend)
{
struct adm8211_priv *priv = dev->priv;

View File

@ -1061,7 +1061,7 @@ err:
return error;
}
static void ar5523_stop(struct ieee80211_hw *hw)
static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar5523 *ar = hw->priv;

View File

@ -5363,7 +5363,7 @@ err:
return ret;
}
static void ath10k_stop(struct ieee80211_hw *hw)
static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath10k *ar = hw->priv;
u32 opt;

View File

@ -6278,7 +6278,7 @@ err:
return ret;
}
static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;

View File

@ -6112,7 +6112,7 @@ static void ath12k_mac_stop(struct ath12k *ar)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;

View File

@ -2847,7 +2847,7 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
void ath5k_stop(struct ieee80211_hw *hw)
void ath5k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath5k_hw *ah = hw->priv;
int ret;

View File

@ -92,7 +92,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
int ath5k_start(struct ieee80211_hw *hw);
void ath5k_stop(struct ieee80211_hw *hw);
void ath5k_stop(struct ieee80211_hw *hw, bool suspend);
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);

View File

@ -973,7 +973,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
return ret;
}
static void ath9k_htc_stop(struct ieee80211_hw *hw)
static void ath9k_htc_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;

View File

@ -895,7 +895,7 @@ static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
ath_key_delete(common, keyix);
}
static void ath9k_stop(struct ieee80211_hw *hw)
static void ath9k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;

View File

@ -439,7 +439,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
cancel_work_sync(&ar->ampdu_work);
}
static void carl9170_op_stop(struct ieee80211_hw *hw)
static void carl9170_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar9170 *ar = hw->priv;

View File

@ -278,7 +278,7 @@ out_err:
return ret;
}
static void wcn36xx_stop(struct ieee80211_hw *hw)
static void wcn36xx_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wcn36xx *wcn = hw->priv;

View File

@ -332,7 +332,7 @@ static int at76_dfu_get_status(struct usb_device *udev,
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATUS,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, 0, status, sizeof(struct dfu_status),
0, 0, status, sizeof(*status),
USB_CTRL_GET_TIMEOUT);
return ret;
}
@ -366,7 +366,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
u32 dfu_timeout = 0;
int bsize = 0;
int blockno = 0;
struct dfu_status *dfu_stat_buf = NULL;
struct dfu_status *dfu_stat_buf;
u8 *dfu_state = NULL;
u8 *block = NULL;
@ -378,7 +378,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
return -EINVAL;
}
dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
dfu_stat_buf = kmalloc(sizeof(*dfu_stat_buf), GFP_KERNEL);
if (!dfu_stat_buf) {
ret = -ENOMEM;
goto exit;
@ -721,9 +721,11 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
int buf_size)
{
int ret;
struct at76_command *cmd_buf = kmalloc(sizeof(struct at76_command) +
buf_size, GFP_KERNEL);
size_t total_size;
struct at76_command *cmd_buf;
total_size = struct_size(cmd_buf, data, buf_size);
cmd_buf = kmalloc(total_size, GFP_KERNEL);
if (!cmd_buf)
return -ENOMEM;
@ -732,15 +734,13 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
cmd_buf->size = cpu_to_le16(buf_size);
memcpy(cmd_buf->data, buf, buf_size);
at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size,
at76_dbg_dump(DBG_CMD, cmd_buf, total_size,
"issuing command %s (0x%02x)",
at76_get_cmd_string(cmd), cmd);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
0, 0, cmd_buf,
sizeof(struct at76_command) + buf_size,
USB_CTRL_GET_TIMEOUT);
0, 0, cmd_buf, total_size, USB_CTRL_GET_TIMEOUT);
kfree(cmd_buf);
return ret;
}
@ -931,14 +931,12 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
{
int i;
int ret;
struct mib_mac_addr *m = kmalloc(sizeof(struct mib_mac_addr),
GFP_KERNEL);
struct mib_mac_addr *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m,
sizeof(struct mib_mac_addr));
ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_ADDR) failed: %d\n", ret);
@ -961,13 +959,12 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
int i;
int ret;
int key_len;
struct mib_mac_wep *m = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL);
struct mib_mac_wep *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m,
sizeof(struct mib_mac_wep));
ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_WEP) failed: %d\n", ret);
@ -997,14 +994,12 @@ exit:
static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
{
int ret;
struct mib_mac_mgmt *m = kmalloc(sizeof(struct mib_mac_mgmt),
GFP_KERNEL);
struct mib_mac_mgmt *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m,
sizeof(struct mib_mac_mgmt));
ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_MGMT) failed: %d\n", ret);
@ -1035,12 +1030,12 @@ exit:
static void at76_dump_mib_mac(struct at76_priv *priv)
{
int ret;
struct mib_mac *m = kmalloc(sizeof(struct mib_mac), GFP_KERNEL);
struct mib_mac *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC) failed: %d\n", ret);
@ -1072,12 +1067,12 @@ exit:
static void at76_dump_mib_phy(struct at76_priv *priv)
{
int ret;
struct mib_phy *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
struct mib_phy *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (PHY) failed: %d\n", ret);
@ -1130,13 +1125,12 @@ exit:
static void at76_dump_mib_mdomain(struct at76_priv *priv)
{
int ret;
struct mib_mdomain *m = kmalloc(sizeof(struct mib_mdomain), GFP_KERNEL);
struct mib_mdomain *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m,
sizeof(struct mib_mdomain));
ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MDOMAIN) failed: %d\n", ret);
@ -1375,7 +1369,7 @@ static int at76_startup_device(struct at76_priv *priv)
priv->scan_min_time, priv->scan_max_time,
priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive");
memset(ccfg, 0, sizeof(struct at76_card_config));
memset(ccfg, 0, sizeof(*ccfg));
ccfg->promiscuous_mode = 0;
ccfg->short_retry_limit = priv->short_retry_limit;
@ -1411,7 +1405,7 @@ static int at76_startup_device(struct at76_priv *priv)
ccfg->beacon_period = cpu_to_le16(priv->beacon_period);
ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config,
sizeof(struct at76_card_config));
sizeof(*ccfg));
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
ret);
@ -1856,7 +1850,7 @@ error:
return 0;
}
static void at76_mac80211_stop(struct ieee80211_hw *hw)
static void at76_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
{
struct at76_priv *priv = hw->priv;
@ -2443,7 +2437,7 @@ static int at76_probe(struct usb_interface *interface,
struct usb_device *udev;
int op_mode;
int need_ext_fw = 0;
struct mib_fw_version *fwv = NULL;
struct mib_fw_version *fwv;
int board_type = (int)id->driver_info;
udev = usb_get_dev(interface_to_usbdev(interface));
@ -2531,7 +2525,7 @@ static int at76_probe(struct usb_interface *interface,
usb_set_intfdata(interface, priv);
memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
memcpy(&priv->fw_version, fwv, sizeof(*fwv));
priv->board_type = board_type;
ret = at76_init_new_device(priv, interface);

View File

@ -151,7 +151,7 @@ struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
u8 data[];
u8 data[] __counted_by_le(size);
} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */

View File

@ -5078,7 +5078,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
return err;
}
static void b43_op_stop(struct ieee80211_hw *hw)
static void b43_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;

View File

@ -3485,7 +3485,7 @@ out_mutex_unlock:
return err;
}
static void b43legacy_op_stop(struct ieee80211_hw *hw)
static void b43legacy_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;

View File

@ -4071,7 +4071,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
struct cfg80211_wowlan_wakeup *wakeup;
u32 wakeind;
s32 err;
int timeout;
long time_left;
err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le,
sizeof(wake_ind_le));
@ -4113,10 +4113,10 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
}
if (wakeind & BRCMF_WOWL_PFN_FOUND) {
brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_PFN_FOUND\n");
timeout = wait_event_timeout(cfg->wowl.nd_data_wait,
cfg->wowl.nd_data_completed,
BRCMF_ND_INFO_TIMEOUT);
if (!timeout)
time_left = wait_event_timeout(cfg->wowl.nd_data_wait,
cfg->wowl.nd_data_completed,
BRCMF_ND_INFO_TIMEOUT);
if (!time_left)
bphy_err(drvr, "No result for wowl net detect\n");
else
wakeup_data.net_detect = cfg->wowl.nd_info;

View File

@ -70,6 +70,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
struct of_phandle_args oirq;
const char *prop;
int irq;
int err;
@ -129,10 +130,10 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
sdio->drive_strength = val;
/* make sure there are interrupts defined in the node */
if (!of_property_present(np, "interrupts"))
if (of_irq_parse_one(np, 0, &oirq))
return;
irq = irq_of_parse_and_map(np, 0);
irq = irq_create_of_mapping(&oirq);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
return;

View File

@ -457,7 +457,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
return err;
}
static void brcms_ops_stop(struct ieee80211_hw *hw)
static void brcms_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct brcms_info *wl = hw->priv;
int status;
@ -1090,6 +1090,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, MFP_CAPABLE);
hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;

View File

@ -2813,7 +2813,7 @@ out_release_irq:
}
static void
il3945_mac_stop(struct ieee80211_hw *hw)
il3945_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct il_priv *il = hw->priv;

View File

@ -5820,7 +5820,7 @@ out:
}
void
il4965_mac_stop(struct ieee80211_hw *hw)
il4965_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct il_priv *il = hw->priv;

View File

@ -151,7 +151,7 @@ void il4965_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int il4965_mac_start(struct ieee80211_hw *hw);
void il4965_mac_stop(struct ieee80211_hw *hw);
void il4965_mac_stop(struct ieee80211_hw *hw, bool suspend);
void il4965_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);

View File

@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@ -13,7 +13,7 @@
#define IWL_22000_UCODE_API_MAX 77
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 50
#define IWL_22000_UCODE_API_MIN 77
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d

View File

@ -13,7 +13,7 @@
#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 59
#define IWL_AX210_UCODE_API_MIN 77
/* NVM versions */
#define IWL_AX210_NVM_VERSION 0x0a1d

View File

@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_BZ_UCODE_API_MAX 91
#define IWL_BZ_UCODE_API_MAX 92
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
#define IWL_BZ_UCODE_API_MIN 90
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d

View File

@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_SC_UCODE_API_MAX 91
#define IWL_SC_UCODE_API_MAX 92
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
#define IWL_SC_UCODE_API_MIN 90
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2023 Intel Corporation
* Copyright (C) 2005-2014, 2023-2024 Intel Corporation
*/
/*
* Please use this file (commands.h) only for uCode API definitions.
@ -177,7 +177,7 @@ enum {
*
*****************************************************************************/
/**
/*
* iwlagn rate_n_flags bit fields
*
* rate_n_flags format is used in following iwlagn commands:
@ -251,7 +251,7 @@ enum {
#define RATE_MCS_SGI_POS 13
#define RATE_MCS_SGI_MSK 0x2000
/**
/*
* rate_n_flags Tx antenna masks
* bit14:16
*/
@ -2767,7 +2767,7 @@ struct iwl_missed_beacon_notif {
*
*****************************************************************************/
/**
/*
* SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that

View File

@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation
* Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -145,8 +145,6 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
priv->trans->ops->d3_suspend &&
priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@ -302,7 +300,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
return ret;
}
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
static void iwlagn_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@ -730,8 +728,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break;
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
if (!iwl_enable_tx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Tx\n");

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*
* Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved.
* Copyright(c) 2007 - 2014, 2023-2024 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@ -64,7 +64,7 @@ struct iwl_tt_trans {
};
/**
* struct iwl_tt_mgnt - Thermal Throttling Management structure
* struct iwl_tt_mgmt - Thermal Throttling Management structure
* @advanced_tt: advanced thermal throttle required
* @state: current Thermal Throttling state
* @tt_power_mode: Thermal Throttling power mode index

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -113,7 +113,7 @@ struct iwl_alive_ntf_v6 {
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
* enum iwl_extended_cfg_flags - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
@ -126,7 +126,7 @@ enum iwl_extended_cfg_flags {
};
/**
* struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
* struct iwl_init_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2020, 2022 Intel Corporation
* Copyright (C) 2012-2014, 2020, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -77,7 +77,7 @@ struct iwl_time_quota_data_v1 {
} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
/**
* struct iwl_time_quota_cmd - configuration of time quota between bindings
* struct iwl_time_quota_cmd_v1 - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
* Note: on non-CDB the fourth one is the auxilary mac and is

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2023 Intel Corporation
* Copyright (C) 2023-2024 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@ -77,73 +77,6 @@ struct iwl_bt_coex_ci_cmd {
__le32 secondary_ch_phy_id;
} __packed; /* BT_CI_MSG_API_S_VER_2 */
#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
enum iwl_bt_mxbox_dw0 {
BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
BT_MBOX(0, LE_PROF1, 3, 1),
BT_MBOX(0, LE_PROF2, 4, 1),
BT_MBOX(0, LE_PROF_OTHER, 5, 1),
BT_MBOX(0, CHL_SEQ_N, 8, 4),
BT_MBOX(0, INBAND_S, 13, 1),
BT_MBOX(0, LE_MIN_RSSI, 16, 4),
BT_MBOX(0, LE_SCAN, 20, 1),
BT_MBOX(0, LE_ADV, 21, 1),
BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
BT_MBOX(0, OPEN_CON_1, 28, 2),
};
enum iwl_bt_mxbox_dw1 {
BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
BT_MBOX(1, IP_SR, 4, 1),
BT_MBOX(1, LE_MSTR, 5, 1),
BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
BT_MBOX(1, MSG_TYPE, 16, 3),
BT_MBOX(1, SSN, 19, 2),
};
enum iwl_bt_mxbox_dw2 {
BT_MBOX(2, SNIFF_ACT, 0, 3),
BT_MBOX(2, PAG, 3, 1),
BT_MBOX(2, INQUIRY, 4, 1),
BT_MBOX(2, CONN, 5, 1),
BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
BT_MBOX(2, DISC, 13, 1),
BT_MBOX(2, SCO_TX_ACT, 16, 2),
BT_MBOX(2, SCO_RX_ACT, 18, 2),
BT_MBOX(2, ESCO_RE_TX, 20, 2),
BT_MBOX(2, SCO_DURATION, 24, 6),
};
enum iwl_bt_mxbox_dw3 {
BT_MBOX(3, SCO_STATE, 0, 1),
BT_MBOX(3, SNIFF_STATE, 1, 1),
BT_MBOX(3, A2DP_STATE, 2, 1),
BT_MBOX(3, ACL_STATE, 3, 1),
BT_MBOX(3, MSTR_STATE, 4, 1),
BT_MBOX(3, OBX_STATE, 5, 1),
BT_MBOX(3, A2DP_SRC, 6, 1),
BT_MBOX(3, OPEN_CON_2, 8, 2),
BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
BT_MBOX(3, INBAND_P, 13, 1),
BT_MBOX(3, MSG_TYPE_2, 16, 3),
BT_MBOX(3, SSN_2, 19, 2),
BT_MBOX(3, UPDATE_REQUEST, 21, 1),
};
#define BT_MBOX_MSG(_notif, _num, _field) \
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
#define BT_MBOX_PRINT(_num, _field, _end) \
pos += scnprintf(buf + pos, bufsz - pos, \
"\t%s: %d%s", \
#_field, \
BT_MBOX_MSG(notif, _num, _field), \
true ? "\n" : ", ")
enum iwl_bt_activity_grading {
BT_OFF = 0,
BT_ON_NO_CONNECTION = 1,

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation
* Copyright (C) 2012-2014, 2018-2019, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -76,7 +76,7 @@ struct iwl_phy_specific_cfg {
} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
/**
* struct iwl_phy_cfg_cmd - Phy configuration command
* struct iwl_phy_cfg_cmd_v1 - Phy configuration command
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data

View File

@ -42,7 +42,7 @@ struct iwl_d3_manager_config {
/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
/**
* enum iwl_d3_proto_offloads - enabled protocol offloads
* enum iwl_proto_offloads - enabled protocol offloads
* @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
* @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
* @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid
@ -195,7 +195,7 @@ struct iwl_wowlan_pattern_v1 {
#define IWL_WOWLAN_MAX_PATTERNS 20
/**
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
* struct iwl_wowlan_patterns_cmd_v1 - WoWLAN wakeup patterns
*/
struct iwl_wowlan_patterns_cmd_v1 {
/**

View File

@ -231,28 +231,33 @@ struct iwl_synced_time_rsp {
#define PTP_CTX_MAX_DATA_SIZE 128
/**
* struct iwl_time_msmt_ptp_ctx - Vendor specific information element
* struct iwl_time_msmt_ptp_ctx - Vendor specific element
* to allow a space for flexibility for the userspace App
*
* @element_id: element id of vendor specific ie
* @length: length of vendor specific ie
* @reserved: for alignment
* @data: vendor specific data blob
* @ftm: FTM specific vendor element
* @ftm.element_id: element id of vendor specific ie
* @ftm.length: length of vendor specific ie
* @ftm.reserved: for alignment
* @ftm.data: vendor specific data blob
* @tm: TM specific vendor element
* @tm.element_id: element id of vendor specific ie
* @tm.length: length of vendor specific ie
* @tm.data: vendor specific data blob
*/
struct iwl_time_msmt_ptp_ctx {
/* Differentiate between FTM and TM specific Vendor IEs */
/* Differentiate between FTM and TM specific Vendor elements */
union {
struct {
u8 element_id;
u8 length;
__le16 reserved;
u8 data[PTP_CTX_MAX_DATA_SIZE];
} ftm; /* FTM specific vendor IE */
} ftm;
struct {
u8 element_id;
u8 length;
u8 data[PTP_CTX_MAX_DATA_SIZE];
} tm; /* TM specific vendor IE */
} tm;
};
} __packed /* PTP_CTX_VER_1 */;
@ -531,6 +536,10 @@ struct iwl_rx_baid_cfg_cmd_remove {
/**
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
* @action: the action, from &enum iwl_rx_baid_action
* @alloc: allocation data
* @modify: modify data
* @remove_v1: remove data (version 1)
* @remove: remove data
*/
struct iwl_rx_baid_cfg_cmd {
__le32 action;
@ -565,6 +574,7 @@ enum iwl_scd_queue_cfg_operation {
/**
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
* @u: union depending on command usage
* @u.add.sta_mask: station mask
* @u.add.tid: TID
* @u.add.reserved: reserved
@ -634,6 +644,7 @@ enum iwl_sec_key_flags {
/**
* struct iwl_sec_key_cmd - security key command
* @action: action from &enum iwl_ctxt_action
* @u: union depending on command type
* @u.add.sta_mask: station mask for the new key
* @u.add.key_id: key ID (0-7) for the new key
* @u.add.key_flags: key flags per &enum iwl_sec_key_flags

View File

@ -291,7 +291,7 @@ struct iwl_fw_ini_addr_val {
} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */
/**
* struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory.
* struct iwl_fw_ini_conf_set_tlv - configuration TLV to set register/memory.
*
* @hdr: debug header
* @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point

View File

@ -1,11 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2023 Intel Corporation
* Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_debug_h__
#define __iwl_fw_api_debug_h__
#include "dbg-tlv.h"
/**
* enum iwl_debug_cmds - debug commands

View File

@ -446,7 +446,7 @@ struct iwl_tof_responder_config_cmd {
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
* struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
* struct iwl_tof_responder_dyn_config_cmd_v2 - Dynamic responder settings
* @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
* @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
* @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if

View File

@ -144,7 +144,7 @@ struct iwl_missed_vap_notif {
} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
/**
* struct iwl_channel_switch_start_notif - Channel switch start notification
* struct iwl_channel_switch_start_notif_v1 - Channel switch start notification
*
* @id_and_color: ID and color of the MAC
*/

View File

@ -120,7 +120,7 @@ struct iwl_nvm_access_cmd {
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
/**
* struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
* struct iwl_nvm_access_resp - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
* @length: in bytes, either how much was written or read
* @type: NVM_SECTION_TYPE_*
@ -212,7 +212,7 @@ struct iwl_nvm_get_info_phy {
#define IWL_NUM_CHANNELS 110
/**
* struct iwl_nvm_get_info_regulatory - regulatory information
* struct iwl_nvm_get_info_regulatory_v1 - regulatory information
* @lar_enabled: is LAR enabled
* @channel_profile: regulatory data of this channel
* @reserved: reserved

View File

@ -60,7 +60,7 @@ struct iwl_stored_beacon_notif_common {
} __packed;
/**
* struct iwl_stored_beacon_notif - Stored beacon notification
* struct iwl_stored_beacon_notif_v2 - Stored beacon notification
*
* @common: fields common for all versions
* @data: beacon data, length in @byte_count

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -113,7 +113,7 @@ struct iwl_phy_context_cmd_tail {
} __packed;
/**
* struct iwl_phy_context_cmd - config of the PHY context
* struct iwl_phy_context_cmd_v1 - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
* @action: action to perform, see &enum iwl_ctxt_action

View File

@ -195,7 +195,7 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum ctdp_cmd_operation - CTDP command operations
* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget

View File

@ -462,7 +462,7 @@ struct iwl_per_chain_offset {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
/**
* struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* struct iwl_geo_tx_power_profiles_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
@ -472,7 +472,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */
/**
* struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* struct iwl_geo_tx_power_profiles_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@ -484,7 +484,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */
/**
* struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* struct iwl_geo_tx_power_profiles_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@ -496,7 +496,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */
/**
* struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* struct iwl_geo_tx_power_profiles_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@ -508,7 +508,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */
/**
* struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* struct iwl_geo_tx_power_profiles_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@ -9,7 +9,7 @@
#include "mac.h"
/**
* enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
* enum iwl_tlc_mng_cfg_flags - options for TLC config flags
* @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
* bandwidths <= 80MHz
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2023 Intel Corporation
* Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -976,7 +976,7 @@ struct iwl_ba_window_status_notif {
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
/**
* struct iwl_rfh_queue_config - RX queue configuration
* struct iwl_rfh_queue_data - RX queue configuration
* @q_num: Q num
* @enable: enable queue
* @reserved: alignment

View File

@ -149,7 +149,7 @@ struct iwl_scan_offload_profile_cfg_data {
} __packed;
/**
* struct iwl_scan_offload_profile_cfg
* struct iwl_scan_offload_profile_cfg_v1 - scan offload profile config
* @profiles: profiles to search for match
* @data: the rest of the data for profile_cfg
*/
@ -423,7 +423,7 @@ struct iwl_lmac_scan_complete_notif {
} __packed;
/**
* struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
* struct iwl_periodic_scan_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: &enum iwl_scan_offload_complete_status
@ -443,10 +443,10 @@ struct iwl_periodic_scan_complete {
/* UMAC Scan API */
/* The maximum of either of these cannot exceed 8, because we use an
* 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
* 8-bit mask (see enum iwl_scan_status).
*/
#define IWL_MVM_MAX_UMAC_SCANS 4
#define IWL_MVM_MAX_LMAC_SCANS 1
#define IWL_MAX_UMAC_SCANS 4
#define IWL_MAX_LMAC_SCANS 1
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
@ -789,7 +789,7 @@ struct iwl_scan_req_umac_tail_v1 {
} __packed;
/**
* struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
* struct iwl_scan_req_umac_tail_v2 - the rest of the UMAC scan request command
* parameters following channels configuration array.
* @schedule: two scheduling plans.
* @delay: delay in TUs before starting the first scan iteration
@ -1085,7 +1085,7 @@ struct iwl_scan_req_params_v12 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
* struct iwl_scan_req_params_v16
* struct iwl_scan_req_params_v17 - scan request parameters (v17)
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v7
* @periodic_params: &struct iwl_scan_periodic_parms_v1
@ -1111,7 +1111,7 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
* struct iwl_scan_req_umac_v16
* struct iwl_scan_req_umac_v17 - scan request command (v17)
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation
* Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -340,11 +340,13 @@ struct iwl_hs20_roc_res {
* @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity
* @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity
* @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity
* @ROC_ACTIVITY_P2P_NEG: ROC for p2p negotiation (used also for TX)
*/
enum iwl_roc_activity {
ROC_ACTIVITY_HOTSPOT,
ROC_ACTIVITY_P2P_DISC,
ROC_ACTIVITY_P2P_TXRX,
ROC_ACTIVITY_P2P_NEG,
ROC_NUM_ACTIVITIES
}; /* ROC_ACTIVITY_API_E_VER_1 */

View File

@ -698,6 +698,7 @@ enum iwl_mvm_ba_resp_flags {
* @query_frame_cnt: SCD query frame count
* @txed: number of frames sent in the aggregation (all-TIDs)
* @done: number of frames that were Acked by the BA (all-TIDs)
* @rts_retry_cnt: RTS retry count
* @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
* @tx_rate: the rate the aggregation was sent at
@ -718,7 +719,8 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
__le16 reserved;
u8 rts_retry_cnt;
u8 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@ -864,7 +866,7 @@ enum iwl_dump_control {
};
/**
* struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
* struct iwl_tx_path_flush_cmd_v1 -- queue/FIFO flush command
* @queues_ctl: bitmap of queues to flush
* @flush_ctl: control flags
* @reserved: reserved

View File

@ -1168,17 +1168,13 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
le32_to_cpu(reg->dev_addr.offset);
int i;
/* we shouldn't get here if the trans doesn't have read_config32 */
if (WARN_ON_ONCE(!trans->ops->read_config32))
return -EOPNOTSUPP;
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->dev_addr.size;
for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
int ret;
u32 tmp;
ret = trans->ops->read_config32(trans, addr + i, &tmp);
ret = iwl_trans_read_config32(trans, addr + i, &tmp);
if (ret < 0)
return ret;

View File

@ -248,7 +248,7 @@ struct iwl_fw_error_dump_mem {
#define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24))
/**
* struct iwl_fw_error_dump_data - data for one type
* struct iwl_fw_ini_error_dump_data - data for one type
* @type: &enum iwl_fw_ini_region_type
* @sub_type: sub type id
* @sub_type_ver: sub type version
@ -278,7 +278,7 @@ struct iwl_fw_ini_dump_entry {
} __packed;
/**
* struct iwl_fw_error_dump_file - header of dump file
* struct iwl_fw_ini_dump_file_hdr - header of dump file
* @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER
* @file_len: the length of all the file including the header
*/

View File

@ -103,7 +103,6 @@ struct iwl_txf_iter_data {
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
* @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
* @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
* 0: Unlocked, 1 and 2: Locked.
@ -183,7 +182,6 @@ struct iwl_fw_runtime {
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
u8 uefi_tables_lock_status;
bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,

View File

@ -241,7 +241,7 @@ enum iwl_cfg_trans_ltr_delay {
};
/**
* struct iwl_cfg_trans - information needed to start the trans
* struct iwl_cfg_trans_params - information needed to start the trans
*
* These values are specific to the device ID and do not change when
* multiple configs are used for a single device ID. They values are
@ -258,6 +258,7 @@ enum iwl_cfg_trans_ltr_delay {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2023 Intel Corporation
* Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@ -304,9 +304,7 @@
#define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
#define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
/**
* hw_rev values
*/
/* hw_rev values */
enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,

View File

@ -223,12 +223,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EINVAL;
}
if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
!trans->ops->read_config32) {
IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
return -EOPNOTSUPP;
}
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
trans->dbg.imr_data.sram_addr =
le32_to_cpu(reg->internal_buffer.base_addr);

View File

@ -1836,7 +1836,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
mutex_unlock(&iwlwifi_opmode_table_mtx);
#ifdef CONFIG_IWLWIFI_DEBUGFS
drv->trans->ops->debugfs_cleanup(drv->trans);
iwl_trans_debugfs_cleanup(drv->trans);
debugfs_remove_recursive(drv->dbgfs_drv);
#endif

View File

@ -15,7 +15,7 @@
/* Flow Handler Definitions */
/****************************/
/**
/*
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
@ -24,7 +24,7 @@
#define FH_MEM_LOWER_BOUND_GEN2 (0xa06000)
#define FH_MEM_UPPER_BOUND_GEN2 (0xa08000)
/**
/*
* Keep-Warm (KW) buffer base address.
*
* Driver must allocate a 4KByte buffer that is for keeping the
@ -44,7 +44,7 @@
#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
/**
/*
* TFD Circular Buffers Base (CBBC) addresses
*
* Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
@ -143,7 +143,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
/**
/*
* Rx SRAM Control and Status Registers (RSCSR)
*
* These registers provide handshake between driver and device for the Rx queue
@ -216,21 +216,21 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
/**
/*
* Physical base address of 8-byte Rx Status buffer.
* Bit fields:
* 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
*/
#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
/**
/*
* Physical base address of Rx Buffer Descriptor Circular Buffer.
* Bit fields:
* 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
*/
#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
/**
/*
* Rx write pointer (index, really!).
* Bit fields:
* 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
@ -242,7 +242,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
/**
/*
* Rx Config/Status Registers (RCSR)
* Rx Config Reg for channel 0 (only channel used)
*
@ -300,7 +300,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
/**
/*
* Rx Shared Status Registers (RSSR)
*
* After stopping Rx DMA channel (writing 0 to
@ -356,7 +356,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RFH_RBDBUF_RBD0_LSB 0xA08300
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
/**
/*
* RFH Status Register
*
* Bit fields:
@ -440,7 +440,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
/**
/*
* Transmit DMA Channel Control/Status Registers (TCSR)
*
* Device has one configuration register for each of 8 Tx DMA/FIFO channels
@ -501,7 +501,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
/**
/*
* Tx Shared Status Registers (TSSR)
*
* After stopping Tx DMA channel (writing 0 to
@ -518,7 +518,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
/**
/*
* Bit fields for TSSR(Tx Shared Status & Control) error status register:
* 31: Indicates an address error when accessed to internal memory
* uCode/driver must write "1" in order to clear this flag
@ -634,7 +634,7 @@ enum iwl_tfd_tb_hi_n_len {
};
/**
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
* struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@ -648,7 +648,7 @@ struct iwl_tfd_tb {
} __packed;
/**
* struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
* struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@ -717,7 +717,7 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* struct iwlagn_scd_bc_tbl - scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
@ -734,7 +734,7 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
* struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
* struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
#include <linux/delay.h>
@ -460,7 +460,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
*/
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
} else {

View File

@ -370,7 +370,9 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
flags |= IEEE80211_CHAN_IR_CONCURRENT;
/* Set the AP type for the UHB case. */
if (!(nvm_flags & NVM_CHANNEL_VLP))
if (nvm_flags & NVM_CHANNEL_VLP)
flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
else
flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
if (!(nvm_flags & NVM_CHANNEL_AFC))
flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
@ -1608,8 +1610,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
const struct iwl_cfg *cfg,
bool uats_enabled)
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@ -1660,13 +1661,13 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
}
/* Set the AP type for the UHB case. */
if (uats_enabled) {
if (!(nvm_flags & NVM_CHANNEL_VLP))
flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
if (nvm_flags & NVM_CHANNEL_VLP)
flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP;
else
flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
if (!(nvm_flags & NVM_CHANNEL_AFC))
flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
}
if (!(nvm_flags & NVM_CHANNEL_AFC))
flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
/*
* reg_capa is per regulatory domain so apply it for every channel
@ -1722,7 +1723,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
u16 geo_info, u32 cap, u8 resp_ver)
{
int ch_idx;
u16 ch_flags;
@ -1789,7 +1790,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
cfg, uats_enabled);
cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2015, 2018-2023 Intel Corporation
* Copyright (C) 2005-2015, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
@ -38,7 +38,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
* iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
* MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
u16 geo_info, u32 cap, u8 resp_ver);
/**
* struct iwl_nvm_section - describes an NVM section in memory.

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@ -185,7 +185,8 @@ static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
{
might_sleep();
op_mode->ops->nic_config(op_mode);
if (op_mode->ops->nic_config)
op_mode->ops->nic_config(op_mode);
}
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)

View File

@ -96,7 +96,7 @@
#define DTSC_PTAT_AVG (0x00a10650)
/**
/*
* Tx Scheduler
*
* The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
@ -169,7 +169,7 @@
*/
#define SCD_MEM_LOWER_BOUND (0x0000)
/**
/*
* Max Tx window size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.

View File

@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
* Copyright (C) 2019-2021, 2023 Intel Corporation
* Copyright (C) 2019-2021, 2023-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@ -11,13 +11,13 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
#include "queue/tx.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
#include "pcie/internal.h"
#include "iwl-context-info-gen3.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
@ -37,22 +37,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#endif
trans->dev = dev;
trans->ops = ops;
trans->num_rx_queues = 1;
WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
if (trans->trans_cfg->gen2) {
trans->txqs.tfd.addr_size = 64;
trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
} else {
trans->txqs.tfd.addr_size = 36;
trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
trans->txqs.tfd.size = sizeof(struct iwl_tfd);
}
trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
return trans;
}
@ -78,31 +64,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
trans->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
else
trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
* allocate here.
*/
if (trans->trans_cfg->gen2) {
trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
trans->txqs.bc_tbl_size,
256, 0);
if (!trans->txqs.bc_pool)
return -ENOMEM;
}
/* Some things must not change even if the config does */
WARN_ON(trans->txqs.tfd.addr_size !=
(trans->trans_cfg->gen2 ? 64 : 36));
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
@ -112,12 +73,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (!trans->dev_cmd_pool)
return -ENOMEM;
trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans->txqs.tso_hdr_page) {
kmem_cache_destroy(trans->dev_cmd_pool);
return -ENOMEM;
}
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans->wait_command_queue);
@ -126,20 +81,6 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
int i;
if (trans->txqs.tso_hdr_page) {
for_each_possible_cpu(i) {
struct iwl_tso_hdr_page *p =
per_cpu_ptr(trans->txqs.tso_hdr_page, i);
if (p && p->page)
__free_page(p->page);
}
free_percpu(trans->txqs.tso_hdr_page);
}
kmem_cache_destroy(trans->dev_cmd_pool);
}
@ -167,10 +108,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
}
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@ -180,7 +120,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
cmd->id = DEF_ID(cmd->id);
}
ret = iwl_trans_txq_send_hcmd(trans, cmd);
ret = iwl_trans_pcie_send_hcmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
@ -247,3 +187,379 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
return 0;
}
IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
void iwl_trans_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
trans->op_mode = trans_cfg->op_mode;
iwl_trans_pcie_configure(trans, trans_cfg);
WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
}
IWL_EXPORT_SYMBOL(iwl_trans_configure);
int iwl_trans_start_hw(struct iwl_trans *trans)
{
might_sleep();
return iwl_trans_pcie_start_hw(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
void iwl_trans_op_mode_leave(struct iwl_trans *trans)
{
might_sleep();
iwl_trans_pcie_op_mode_leave(trans);
trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
}
IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave);
void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
iwl_trans_pcie_write8(trans, ofs, val);
}
IWL_EXPORT_SYMBOL(iwl_trans_write8);
void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
iwl_trans_pcie_write32(trans, ofs, val);
}
IWL_EXPORT_SYMBOL(iwl_trans_write32);
u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
return iwl_trans_pcie_read32(trans, ofs);
}
IWL_EXPORT_SYMBOL(iwl_trans_read32);
u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
{
return iwl_trans_pcie_read_prph(trans, ofs);
}
IWL_EXPORT_SYMBOL(iwl_trans_read_prph);
void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
return iwl_trans_pcie_write_prph(trans, ofs, val);
}
IWL_EXPORT_SYMBOL(iwl_trans_write_prph);
int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
return iwl_trans_pcie_read_mem(trans, addr, buf, dwords);
}
IWL_EXPORT_SYMBOL(iwl_trans_read_mem);
int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
{
return iwl_trans_pcie_write_mem(trans, addr, buf, dwords);
}
IWL_EXPORT_SYMBOL(iwl_trans_write_mem);
void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
if (state)
set_bit(STATUS_TPOWER_PMI, &trans->status);
else
clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_pmi);
int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
return iwl_trans_pcie_sw_reset(trans, retake_ownership);
}
IWL_EXPORT_SYMBOL(iwl_trans_sw_reset);
struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx)
{
return iwl_trans_pcie_dump_data(trans, dump_mask,
sanitize_ops, sanitize_ctx);
}
IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
might_sleep();
return iwl_trans_pcie_d3_suspend(trans, test, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
bool test, bool reset)
{
might_sleep();
return iwl_trans_pcie_d3_resume(trans, status, test, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
{
iwl_trans_pci_interrupts(trans, enable);
}
IWL_EXPORT_SYMBOL(iwl_trans_interrupts);
void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
iwl_trans_pcie_sync_nmi(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi);
int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
u64 src_addr, u32 byte_cnt)
{
return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt);
}
IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem);
void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value)
{
iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask);
int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val)
{
return iwl_trans_pcie_read_config32(trans, ofs, val);
}
IWL_EXPORT_SYMBOL(iwl_trans_read_config32);
bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
{
return iwl_trans_pcie_grab_nic_access(trans);
}
IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access);
void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans)
{
iwl_trans_pcie_release_nic_access(trans);
__release(nic_access);
}
IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
might_sleep();
trans->state = IWL_TRANS_FW_ALIVE;
if (trans->trans_cfg->gen2)
iwl_trans_pcie_gen2_fw_alive(trans);
else
iwl_trans_pcie_fw_alive(trans, scd_addr);
}
IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill)
{
int ret;
might_sleep();
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
clear_bit(STATUS_FW_ERROR, &trans->status);
if (trans->trans_cfg->gen2)
ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
else
ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
if (ret == 0)
trans->state = IWL_TRANS_FW_STARTED;
return ret;
}
IWL_EXPORT_SYMBOL(iwl_trans_start_fw);
void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
if (trans->trans_cfg->gen2)
iwl_trans_pcie_gen2_stop_device(trans);
else
iwl_trans_pcie_stop_device(trans);
trans->state = IWL_TRANS_NO_FW;
}
IWL_EXPORT_SYMBOL(iwl_trans_stop_device);
int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
if (trans->trans_cfg->gen2)
return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
}
IWL_EXPORT_SYMBOL(iwl_trans_tx);
void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs, bool is_flush)
{
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return;
iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush);
}
IWL_EXPORT_SYMBOL(iwl_trans_reclaim);
void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd)
{
iwl_trans_pcie_txq_disable(trans, queue, configure_scd);
}
IWL_EXPORT_SYMBOL(iwl_trans_txq_disable);
bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout)
{
might_sleep();
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return false;
return iwl_trans_pcie_txq_enable(trans, queue, ssn,
cfg, queue_wdg_timeout);
}
IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
return iwl_trans_pcie_wait_txq_empty(trans, queue);
}
IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty);
int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs)
{
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
return iwl_trans_pcie_wait_txqs_empty(trans, txqs);
}
IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty);
void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs, bool freeze)
{
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return;
iwl_pcie_freeze_txq_timer(trans, txqs, freeze);
}
IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer);
void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
int txq_id, bool shared_mode)
{
iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode);
}
IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_debugfs_cleanup(struct iwl_trans *trans)
{
iwl_trans_pcie_debugfs_cleanup(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup);
#endif
void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr)
{
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return;
iwl_pcie_set_q_ptrs(trans, queue, ptr);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs);
int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
u8 tid, int size, unsigned int wdg_timeout)
{
might_sleep();
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid,
size, wdg_timeout);
}
IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc);
void iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
iwl_txq_dyn_free(trans, queue);
}
IWL_EXPORT_SYMBOL(iwl_trans_txq_free);
int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
return iwl_trans_pcie_rxq_dma_data(trans, queue, data);
}
IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data);
int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa)
{
return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
int iwl_trans_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{
return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);

View File

@ -26,11 +26,9 @@
* DOC: Transport layer - what is it ?
*
* The transport layer is the layer that deals with the HW directly. It provides
* an abstraction of the underlying HW to the upper layer. The transport layer
* doesn't provide any policy, algorithm or anything of this kind, but only
* mechanisms to make the HW do something. It is not completely stateless but
* close to it.
* We will have an implementation for each different supported bus.
* the PCIe access to the underlying hardwarwe. The transport layer doesn't
* provide any policy, algorithm or anything of this kind, but only mechanisms
* to make the HW do something. It is not completely stateless but close to it.
*/
/**
@ -131,6 +129,11 @@ enum CMD_MODE {
* For allocation of the command and tx queues, this establishes the overall
* size of the largest command we send to uCode, except for commands that
* aren't fully copied and use other TFD space.
*
* @hdr: command header
* @payload: payload for the command
* @hdr_wide: wide command header
* @payload_wide: payload for the wide command
*/
struct iwl_device_cmd {
union {
@ -167,12 +170,6 @@ struct iwl_device_tx_cmd {
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
/* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
@ -281,7 +278,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
* enum iwl_wowlan_status - WoWLAN image/device status
* enum iwl_d3_status - WoWLAN image/device status
* @IWL_D3_STATUS_ALIVE: firmware is still running after resume
* @IWL_D3_STATUS_RESET: device was reset while suspended
*/
@ -299,9 +296,6 @@ enum iwl_d3_status {
* @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
@ -314,8 +308,6 @@ enum iwl_trans_status {
STATUS_RFKILL_HW,
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
};
@ -481,183 +473,6 @@ struct iwl_pnvm_image {
u32 version;
};
/**
* struct iwl_trans_ops - transport specific operations
*
* All the handlers MUST be implemented
*
* @start_hw: starts the HW. From that point on, the HW can send interrupts.
* May sleep.
* @op_mode_leave: Turn off the HW RF kill indication if on
* May sleep
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
* May sleep
* @fw_alive: called when the fw sends alive notification. If the fw provides
* the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
* @stop_device: stops the whole device (embedded CPU put to reset) and stops
* the HW. From that point on, the HW will be stopped but will still issue
* an interrupt if the HW RF kill switch is triggered.
* This callback must do the right thing and not crash even if %start_hw()
* was called but not &start_fw(). May sleep.
* @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
* @d3_resume: resume the device after WoWLAN, enabling the opmode to
* talk to the WoWLAN image to get its status. This is optional, if not
* implemented WoWLAN will not be supported. This callback may sleep.
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
* If RFkill is asserted in the middle of a SYNC host command, it must
* return -ERFKILL straight away.
* May sleep only if CMD_ASYNC is not set
* @tx: send an skb. The transport relies on the op_mode to zero the
* the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
* the CSUM will be taken care of (TCP CSUM and IP header in case of
* IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
* header if it is IPv4.
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
* @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
* @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
* configuration may be %NULL, in which case the hardware will not be
* configured. If true is returned, the operation mode needs to increment
* the sequence number of the packets routed to this queue because of a
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @txq_alloc: Allocate a new TX queue, may sleep.
* @txq_free: Free a previously allocated TX queue.
* @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
* @freeze_txq_timer: prevents the timer of the queue from firing until the
* queue is set to awake. Must be atomic.
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
* @read_prph: read a DWORD from a periphery register
* @write_prph: write a DWORD to a periphery register
* @read_mem: read device's SRAM in DWORD
* @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
* will be zeroed.
* @read_config32: read a u32 value from the device's config space at
* the given offset.
* @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
* @sw_reset: trigger software reset of the NIC
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
* Sleeping is not allowed between grab_nic_access and
* release_nic_access.
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
* @set_bits_mask: set SRAM register according to value and mask.
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
* @sync_nmi: trigger a firmware NMI and wait for it to complete
* @load_pnvm: save the pnvm data in DRAM
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
* @load_reduce_power: copy reduce power table to the corresponding DRAM memory
* @set_reduce_power: set reduce power table addresses in the sratch buffer
* @interrupts: disable/enable interrupts to transport
* @imr_dma_data: set up IMR DMA
* @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
*/
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
bool test, bool reset);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue);
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs, bool is_flush);
void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
/* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
u32 sta_mask, u8 tid,
int size, unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
int (*read_mem)(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
int (*write_mem)(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords);
int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
bool (*grab_nic_access)(struct iwl_trans *trans);
void (*release_nic_access)(struct iwl_trans *trans);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*load_pnvm)(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_payloads,
const struct iwl_ucode_capabilities *capa);
void (*set_pnvm)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
int (*load_reduce_power)(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa);
void (*set_reduce_power)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
void (*interrupts)(struct iwl_trans *trans, bool enable);
int (*imr_dma_data)(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr,
u32 byte_cnt);
};
/**
* enum iwl_trans_state - state of the transport layer
*
@ -998,59 +813,10 @@ struct iwl_txq {
bool overflow_tx;
};
/**
* struct iwl_trans_txqs - transport tx queues data
*
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used: bit mask of used queues
* @queue_stopped: bit mask of stopped queues
* @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
* @bc_pool: bytecount DMA allocations pool
* @bc_tbl_size: bytecount table size
* @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
* (and similar usage)
* @tfd: TFD data
* @tfd.max_tbs: max number of buffers per TFD
* @tfd.size: TFD size
* @tfd.addr_size: TFD/TB address size
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct dma_pool *bc_pool;
size_t bc_tbl_size;
bool bc_table_dword;
u8 page_offs;
u8 dev_cmd_offs;
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
struct {
u8 fifo;
u8 q_id;
unsigned int wdg_timeout;
} cmd;
struct {
u8 max_tbs;
u16 size;
u8 addr_size;
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
u8 queue_alloc_cmd_ver;
};
/**
* struct iwl_trans - transport common data
*
* @csme_own: true if we couldn't get ownership on the device
* @ops: pointer to iwl_trans_ops
* @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
* @cfg: pointer to the configuration
@ -1099,7 +865,6 @@ struct iwl_trans_txqs {
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
* @name: the device name
* @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
@ -1112,7 +877,6 @@ struct iwl_trans_txqs {
*/
struct iwl_trans {
bool csme_own;
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;
const struct iwl_cfg *cfg;
@ -1169,7 +933,6 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
const char *name;
struct iwl_trans_txqs txqs;
u32 mbx_addr_0_step;
u32 mbx_addr_1_step;
@ -1185,101 +948,29 @@ struct iwl_trans {
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
static inline void iwl_trans_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
trans->op_mode = trans_cfg->op_mode;
void iwl_trans_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
trans->ops->configure(trans, trans_cfg);
WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
}
int iwl_trans_start_hw(struct iwl_trans *trans);
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
might_sleep();
void iwl_trans_op_mode_leave(struct iwl_trans *trans);
return trans->ops->start_hw(trans);
}
void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
{
might_sleep();
int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
if (trans->ops->op_mode_leave)
trans->ops->op_mode_leave(trans);
void iwl_trans_stop_device(struct iwl_trans *trans);
trans->op_mode = NULL;
int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
trans->state = IWL_TRANS_NO_FW;
}
int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
bool test, bool reset);
static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
might_sleep();
trans->state = IWL_TRANS_FW_ALIVE;
trans->ops->fw_alive(trans, scd_addr);
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
const struct fw_img *fw,
bool run_in_rfkill)
{
int ret;
might_sleep();
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
clear_bit(STATUS_FW_ERROR, &trans->status);
ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
if (ret == 0)
trans->state = IWL_TRANS_FW_STARTED;
return ret;
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
trans->ops->stop_device(trans);
trans->state = IWL_TRANS_NO_FW;
}
static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
bool reset)
{
might_sleep();
if (!trans->ops->d3_suspend)
return -EOPNOTSUPP;
return trans->ops->d3_suspend(trans, test, reset);
}
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
{
might_sleep();
if (!trans->ops->d3_resume)
return -EOPNOTSUPP;
return trans->ops->d3_resume(trans, status, test, reset);
}
static inline struct iwl_trans_dump_data *
struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx)
{
if (!trans->ops->dump_data)
return NULL;
return trans->ops->dump_data(trans, dump_mask,
sanitize_ops, sanitize_ctx);
}
void *sanitize_ctx);
static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
@ -1295,109 +986,31 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue);
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs, bool is_flush);
return trans->ops->tx(trans, skb, dev_cmd, queue);
}
void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
int ssn, struct sk_buff_head *skbs,
bool is_flush)
{
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return;
}
void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
}
bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout);
static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
int ptr)
{
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return;
}
int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
trans->ops->set_q_ptrs(trans, queue, ptr);
}
void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd)
{
trans->ops->txq_disable(trans, queue, configure_scd);
}
int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
u8 tid, int size, unsigned int wdg_timeout);
static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int queue_wdg_timeout)
{
might_sleep();
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return false;
}
return trans->ops->txq_enable(trans, queue, ssn,
cfg, queue_wdg_timeout);
}
static inline int
iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
return -EOPNOTSUPP;
return trans->ops->rxq_dma_data(trans, queue, data);
}
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
if (WARN_ON_ONCE(!trans->ops->txq_free))
return;
trans->ops->txq_free(trans, queue);
}
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
u32 flags, u32 sta_mask, u8 tid,
int size, unsigned int wdg_timeout)
{
might_sleep();
if (WARN_ON_ONCE(!trans->ops->txq_alloc))
return -EOPNOTSUPP;
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
int queue, bool shared_mode)
{
if (trans->ops->txq_set_shared_mode)
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
int txq_id, bool shared_mode);
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
@ -1430,78 +1043,32 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
}
static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs,
bool freeze)
{
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return;
}
void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs, bool freeze);
if (trans->ops->freeze_txq_timer)
trans->ops->freeze_txq_timer(trans, txqs, freeze);
}
int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
u32 txqs)
{
if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
return -EOPNOTSUPP;
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
/* No need to wait if the firmware is not alive */
if (trans->state != IWL_TRANS_FW_ALIVE) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
return trans->ops->wait_tx_queues_empty(trans, txqs);
}
void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
return -EOPNOTSUPP;
u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
return trans->ops->wait_txq_empty(trans, queue);
}
void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);
}
int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
trans->ops->write32(trans, ofs, val);
}
int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val);
static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
return trans->ops->read32(trans, ofs);
}
static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
{
return trans->ops->read_prph(trans, ofs);
}
static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
u32 val)
{
return trans->ops->write_prph(trans, ofs, val);
}
static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
return trans->ops->read_mem(trans, addr, buf, dwords);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
#endif
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
do { \
@ -1510,14 +1077,8 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr,
u32 byte_cnt)
{
if (trans->ops->imr_dma_data)
return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
return 0;
}
int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
u64 src_addr, u32 byte_cnt);
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
@ -1529,11 +1090,8 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
return value;
}
static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
{
return trans->ops->write_mem(trans, addr, buf, dwords);
}
int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords);
static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
u32 val)
@ -1541,36 +1099,21 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
return iwl_trans_write_mem(trans, addr, &val, 1);
}
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
if (trans->ops->set_pmi)
trans->ops->set_pmi(trans, state);
}
void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
bool retake_ownership)
{
if (trans->ops->sw_reset)
return trans->ops->sw_reset(trans, retake_ownership);
return 0;
}
int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership);
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{
trans->ops->set_bits_mask(trans, reg, mask, value);
}
void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value);
bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
#define iwl_trans_grab_nic_access(trans) \
__cond_lock(nic_access, \
likely((trans)->ops->grab_nic_access(trans)))
likely(_iwl_trans_grab_nic_access(trans)))
static inline void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans)
{
trans->ops->release_nic_access(trans);
__release(nic_access);
}
void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans);
static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
{
@ -1589,44 +1132,24 @@ static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
return trans->state == IWL_TRANS_FW_ALIVE;
}
static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
if (trans->ops->sync_nmi)
trans->ops->sync_nmi(trans);
}
void iwl_trans_sync_nmi(struct iwl_trans *trans);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit);
static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa)
{
return trans->ops->load_pnvm(trans, pnvm_data, capa);
}
int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa);
static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
if (trans->ops->set_pnvm)
trans->ops->set_pnvm(trans, capa);
}
void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
static inline int iwl_trans_load_reduce_power
(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{
return trans->ops->load_reduce_power(trans, payloads, capa);
}
int iwl_trans_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa);
static inline void
iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
if (trans->ops->set_reduce_power)
trans->ops->set_reduce_power(trans, capa);
}
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
@ -1634,18 +1157,13 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
}
static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
{
if (trans->ops->interrupts)
trans->ops->interrupts(trans, enable);
}
void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
/*****************************************************
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans);
int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
@ -1656,10 +1174,13 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
}
/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
* PCIe handling
*****************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
#endif /* __iwl_trans_h__ */

View File

@ -456,8 +456,11 @@ void iwl_mei_device_state(bool up);
/**
* iwl_mei_pldr_req() - must be called before loading the fw
*
* Return: 0 if the PLDR flow was successful and the fw can be loaded, negative
* value otherwise.
* Requests from the ME that it releases its potential bus access to
* the WiFi NIC so that the device can safely undergo product reset.
*
* Return: 0 if the request was successful and the device can be
* reset, a negative error value otherwise
*/
int iwl_mei_pldr_req(void);

View File

@ -56,7 +56,6 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_HW_CSUM_DISABLE 0
#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */

View File

@ -2493,6 +2493,9 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
return;
}
if (mvm->fast_resume)
return;
iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
iwl_mvm_convert_gtk_v3(status, data->gtk);
iwl_mvm_convert_igtk(status, &data->igtk[0]);
@ -3049,7 +3052,7 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_mvm_rt_status(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
@ -3366,7 +3369,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
return ret;
}
#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3)
static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
struct iwl_d3_data *d3_data)
@ -3377,12 +3380,22 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
static const u16 d3_fast_resume_notif[] = {
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
struct iwl_notification_wait wait_d3_notif;
int ret;
iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
iwl_mvm_wait_d3_notif, d3_data);
if (mvm->fast_resume)
iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
d3_fast_resume_notif,
ARRAY_SIZE(d3_fast_resume_notif),
iwl_mvm_wait_d3_notif, d3_data);
else
iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
d3_resume_notif,
ARRAY_SIZE(d3_resume_notif),
iwl_mvm_wait_d3_notif, d3_data);
ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
if (ret) {
@ -3567,6 +3580,68 @@ void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(mvm->trans->dev, enabled);
}
void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
{
struct iwl_d3_manager_config d3_cfg_cmd_data = {};
int ret;
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n");
mvm->fast_resume = true;
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
WARN_ON(iwl_mvm_power_update_device(mvm));
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3,
sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
if (ret)
IWL_ERR(mvm,
"fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
WARN_ON(iwl_mvm_power_update_mac(mvm));
ret = iwl_trans_d3_suspend(mvm->trans, false, false);
if (ret)
IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
}
int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
{
struct iwl_d3_data d3_data = {
.notif_expected =
IWL_D3_NOTIF_D3_END_NOTIF,
};
int ret;
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n");
mvm->last_reset_or_resume_time_jiffies = jiffies;
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
if (iwl_mvm_check_rt_status(mvm, NULL)) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
return -ENODEV;
}
ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mvm->fast_resume = false;
if (ret)
IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
return ret;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
{

View File

@ -151,37 +151,6 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
if (!iwl_mvm_firmware_running(mvm) ||
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL;
if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations)
return -EINVAL;
if (drain < 0 || drain > 1)
return -EINVAL;
mutex_lock(&mvm->mutex);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
ret = -ENOENT;
else
ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@ -568,186 +537,6 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
static
int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
int pos, int bufsz)
{
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
BT_MBOX_PRINT(0, LE_PROF1, false);
BT_MBOX_PRINT(0, LE_PROF2, false);
BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
BT_MBOX_PRINT(0, CHL_SEQ_N, false);
BT_MBOX_PRINT(0, INBAND_S, false);
BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
BT_MBOX_PRINT(0, LE_SCAN, false);
BT_MBOX_PRINT(0, LE_ADV, false);
BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
BT_MBOX_PRINT(0, OPEN_CON_1, true);
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
BT_MBOX_PRINT(1, IP_SR, false);
BT_MBOX_PRINT(1, LE_MSTR, false);
BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
BT_MBOX_PRINT(1, MSG_TYPE, false);
BT_MBOX_PRINT(1, SSN, true);
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
BT_MBOX_PRINT(2, SNIFF_ACT, false);
BT_MBOX_PRINT(2, PAG, false);
BT_MBOX_PRINT(2, INQUIRY, false);
BT_MBOX_PRINT(2, CONN, false);
BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
BT_MBOX_PRINT(2, DISC, false);
BT_MBOX_PRINT(2, SCO_TX_ACT, false);
BT_MBOX_PRINT(2, SCO_RX_ACT, false);
BT_MBOX_PRINT(2, ESCO_RE_TX, false);
BT_MBOX_PRINT(2, SCO_DURATION, true);
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
BT_MBOX_PRINT(3, SCO_STATE, false);
BT_MBOX_PRINT(3, SNIFF_STATE, false);
BT_MBOX_PRINT(3, A2DP_STATE, false);
BT_MBOX_PRINT(3, A2DP_SRC, false);
BT_MBOX_PRINT(3, ACL_STATE, false);
BT_MBOX_PRINT(3, MSTR_STATE, false);
BT_MBOX_PRINT(3, OBX_STATE, false);
BT_MBOX_PRINT(3, OPEN_CON_2, false);
BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
BT_MBOX_PRINT(3, INBAND_P, false);
BT_MBOX_PRINT(3, MSG_TYPE_2, false);
BT_MBOX_PRINT(3, SSN_2, false);
BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
return pos;
}
static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
char *buf;
int ret, pos = 0, bufsz = sizeof(char) * 1024;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&mvm->mutex);
pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
notif->bt_ci_compliance);
pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
le32_to_cpu(notif->primary_ch_lut));
pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
le32_to_cpu(notif->secondary_ch_lut));
pos += scnprintf(buf + pos,
bufsz - pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->bt_activity_grading));
pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
notif->rrc_status & 0xF);
pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
notif->ttc_status & 0xF);
pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
IWL_MVM_BT_COEX_SYNC2SCO);
pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n",
IWL_MVM_BT_COEX_MPLUT);
mutex_unlock(&mvm->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
#undef BT_MBOX_PRINT
static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
char buf[256];
int bufsz = sizeof(buf);
int pos = 0;
mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
pos += scnprintf(buf + pos, bufsz - pos,
"\tPrimary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_primary_ci));
pos += scnprintf(buf + pos, bufsz - pos,
"\tSecondary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_secondary_ci));
mutex_unlock(&mvm->mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t
iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
u32 bt_tx_prio;
if (sscanf(buf, "%u", &bt_tx_prio) != 1)
return -EINVAL;
if (bt_tx_prio > 4)
return -EINVAL;
mvm->bt_tx_prio = bt_tx_prio;
return count;
}
static ssize_t
iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
static const char * const modes_str[BT_FORCE_ANT_MAX] = {
[BT_FORCE_ANT_DIS] = "dis",
[BT_FORCE_ANT_AUTO] = "auto",
[BT_FORCE_ANT_BT] = "bt",
[BT_FORCE_ANT_WIFI] = "wifi",
};
int ret, bt_force_ant_mode;
ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
if (ret < 0)
return ret;
bt_force_ant_mode = ret;
ret = 0;
mutex_lock(&mvm->mutex);
if (mvm->bt_force_ant_mode == bt_force_ant_mode)
goto out;
mvm->bt_force_ant_mode = bt_force_ant_mode;
IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
modes_str[mvm->bt_force_ant_mode]);
if (iwl_mvm_firmware_running(mvm))
ret = iwl_mvm_send_bt_init_conf(mvm);
else
ret = 0;
out:
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@ -2165,15 +1954,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
@ -2183,8 +1969,6 @@ MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
@ -2371,7 +2155,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
spin_lock_init(&mvm->drv_stats_lock);
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
@ -2380,8 +2163,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
@ -2389,8 +2170,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);

View File

@ -732,10 +732,7 @@ static void iter(struct ieee80211_hw *hw,
WARN_ON(!sta->mfp);
if (WARN_ON(key->keylen > sizeof(target->tk)))
return;
memcpy(target->tk, key->key, key->keylen);
target->tk = key->key;
*target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID);
}
@ -774,9 +771,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
!memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) {
struct iwl_mvm_ftm_iter_data target;
target.cipher = cipher;
target.bssid = bssid;
target.tk = tk;
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
} else {
memcpy(tk, entry->tk, sizeof(entry->tk));

View File

@ -28,9 +28,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
#define IWL_UATS_VLP_AP_SUPPORTED BIT(29)
#define IWL_UATS_AFC_AP_SUPPORTED BIT(30)
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@ -408,7 +405,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
if (ret == -ETIMEDOUT && !mvm->pldr_sync)
if (ret == -ETIMEDOUT && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
@ -460,12 +457,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
#endif
/*
* For pre-MLD API (MLD API doesn't use the timestamps):
* All the BSSes in the BSS table include the GP2 in the system
* at the beacon Rx time, this is of course no longer relevant
* since we are resetting the firmware.
* Purge all the BSS table.
*/
cfg80211_bss_flush(mvm->hw->wiphy);
if (!mvm->mld_api_is_used)
cfg80211_bss_flush(mvm->hw->wiphy);
return 0;
}
@ -491,17 +490,11 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
if (!(mvm->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210)) {
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
return;
}
if (!mvm->fwrt.uats_enabled) {
IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n");
return;
}
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 1) {
@ -513,7 +506,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
if (ret < 0) {
IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret);
IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
return;
}
@ -627,8 +620,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
/* if needed, we'll reset this on our way out later */
mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM;
if (mvm->pldr_sync && iwl_mei_pldr_req())
mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
if (mvm->fw_product_reset && iwl_mei_pldr_req())
return -EBUSY;
}
@ -647,7 +640,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
/* if we needed reset then fail here, but notify and remove */
if (mvm->pldr_sync) {
if (mvm->fw_product_reset) {
iwl_mei_alive_notif(false);
iwl_trans_pcie_remove(mvm->trans, true);
}
@ -686,14 +679,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
goto error;
}
if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
ret = iwl_nvm_init(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
goto error;
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE),
CMD_SEND_IN_RFKILL,
@ -718,7 +703,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
return ret;
/* Read the NVM only at driver load time, no need to do this twice */
if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
if (!mvm->nvm_data) {
mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
mvm->set_tx_ant, mvm->set_rx_ant);
if (IS_ERR(mvm->nvm_data)) {
@ -843,7 +828,7 @@ remove_notif:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
mvm->rfkill_safe_init_done = false;
if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
if (!mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@ -1231,10 +1216,6 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
ret);
}
if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
mvm->fwrt.uats_enabled = true;
}
void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
@ -1376,9 +1357,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
if (iwlmvm_mod_params.init_dbg)
return 0;
return ret;
}
@ -1415,14 +1393,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
if (ret != -ERFKILL && !mvm->pldr_sync)
if (ret != -ERFKILL && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
/* FW loaded successfully */
mvm->pldr_sync = false;
mvm->fw_product_reset = false;
iwl_fw_disable_dbg_asserts(&mvm->fwrt);
iwl_get_shared_mem_conf(&mvm->fwrt);
@ -1617,8 +1595,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
if (!iwlmvm_mod_params.init_dbg || !ret)
iwl_mvm_stop_device(mvm);
iwl_mvm_stop_device(mvm);
return ret;
}

View File

@ -1086,8 +1086,10 @@ static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
/* We exited due to an EXIT reason, so MLO scan was scheduled already */
if (mvmvif->last_esr_exit.reason &&
/* If we exited due to an EXIT reason, and the exit was in less than
* 30 seconds, then a MLO scan was scheduled already.
*/
if (!need_new_sel &&
!(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
return;

View File

@ -296,6 +296,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
mvmvif->time_event_data.id = TE_MAX;
mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;

View File

@ -30,21 +30,28 @@
#include "iwl-nvm-parse.h"
#include "time-sync.h"
#define IWL_MVM_LIMITS(ap) \
{ \
.max = 1, \
.types = BIT(NL80211_IFTYPE_STATION), \
}, \
{ \
.max = 1, \
.types = ap | \
BIT(NL80211_IFTYPE_P2P_CLIENT) | \
BIT(NL80211_IFTYPE_P2P_GO), \
}, \
{ \
.max = 1, \
.types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
}
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
},
IWL_MVM_LIMITS(0)
};
static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = {
IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP))
};
static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
@ -52,7 +59,13 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
.num_different_channels = 2,
.max_interfaces = 3,
.limits = iwl_mvm_limits,
.n_limits = ARRAY_SIZE(iwl_mvm_limits),
.n_limits = ARRAY_SIZE(iwl_mvm_limits) - 1,
},
{
.num_different_channels = 1,
.max_interfaces = 3,
.limits = iwl_mvm_limits_ap,
.n_limits = ARRAY_SIZE(iwl_mvm_limits_ap) - 1,
},
};
@ -138,8 +151,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
le32_to_cpu(resp->cap), resp_ver,
mvm->fwrt.uats_enabled);
le32_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@ -579,13 +591,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
mvm->max_scans = IWL_MAX_UMAC_SCANS;
else
mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
mvm->max_scans = IWL_MAX_LMAC_SCANS;
if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[NL80211_BAND_2GHZ] =
@ -727,8 +739,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@ -823,7 +833,7 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
if (offchannel &&
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@ -1104,6 +1114,8 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
mvmvif->bf_enabled = false;
mvmvif->ba_enabled = false;
mvmvif->ap_sta = NULL;
@ -1209,6 +1221,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
{
bool fast_resume = false;
int ret;
lockdep_assert_held(&mvm->mutex);
@ -1234,6 +1247,30 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
#ifdef CONFIG_PM
/* fast_resume will be cleared by iwl_mvm_fast_resume */
fast_resume = mvm->fast_resume;
if (fast_resume) {
ret = iwl_mvm_fast_resume(mvm);
if (ret) {
iwl_mvm_stop_device(mvm);
/* iwl_mvm_up() will be called further down */
} else {
/*
* We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon
* mac_down() so that debugfs will stop honoring
* requests after we flush all the workers.
* Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again
* now that we are back. This is a bit abusing the
* flag since the firmware wasn't really ever stopped,
* but this still serves the purpose.
*/
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
}
}
#endif /* CONFIG_PM */
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@ -1244,7 +1281,10 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
/* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm);
}
ret = iwl_mvm_up(mvm);
/* we also want to load the firmware if fast_resume failed */
if (!fast_resume || ret)
ret = iwl_mvm_up(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
NULL);
@ -1327,7 +1367,7 @@ void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
}
}
void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
{
lockdep_assert_held(&mvm->mutex);
@ -1343,7 +1383,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
if (!iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
if (suspend &&
mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_mvm_fast_suspend(mvm);
else
iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@ -1376,7 +1420,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
}
}
void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@ -1412,7 +1456,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
iwl_mvm_mei_set_sw_rfkill_state(mvm);
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
__iwl_mvm_mac_stop(mvm, suspend);
mutex_unlock(&mvm->mutex);
/*
@ -1854,12 +1898,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
cancel_delayed_work_sync(&mvmvif->csa_work);
}
/* This function is doing the common part of removing the interface for
* both - MLD and non-MLD modes. Returns true if removing the interface
* is done
*/
static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@ -1907,21 +1947,10 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
return true;
goto out;
}
iwl_mvm_power_update_mac(mvm);
return false;
}
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (iwl_mvm_mac_remove_interface_common(hw, vif))
goto out;
/* Before the interface removal, mac80211 would cancel the ROC, and the
* ROC worker would be scheduled if needed. The worker would be flushed
@ -4773,6 +4802,37 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
return ret;
}
static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
int duration,
enum ieee80211_roc_type type)
{
enum iwl_roc_activity activity;
int ret;
lockdep_assert_held(&mvm->mutex);
switch (type) {
case IEEE80211_ROC_TYPE_NORMAL:
activity = ROC_ACTIVITY_P2P_DISC;
break;
case IEEE80211_ROC_TYPE_MGMT_TX:
activity = ROC_ACTIVITY_P2P_NEG;
break;
default:
WARN_ONCE(1, "Got an invalid P2P ROC type\n");
return -EINVAL;
}
ret = iwl_mvm_mld_add_aux_sta(mvm,
iwl_mvm_get_lmac_id(mvm, channel->band));
if (ret)
return ret;
return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity);
}
static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel *channel)
@ -4826,6 +4886,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
int ret;
@ -4838,9 +4899,12 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
ret = iwl_mvm_esr_non_bss_link(mvm, vif, 0, true);
if (ret)
return ret;
if (!IS_ERR_OR_NULL(bss_vif)) {
ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
IWL_MVM_ESR_BLOCKED_ROC);
if (ret)
return ret;
}
guard(mvm)(mvm);
@ -4861,6 +4925,10 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -EINVAL;
}
if (iwl_mvm_has_p2p_over_aux(mvm)) {
ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type);
return ret;
}
ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel);
if (ret)

View File

@ -83,14 +83,9 @@ extern const struct ieee80211_ops iwl_mvm_mld_hw_ops;
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
* @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
* We will register to mac80211 to have testmode working. The NIC must not
* be up'ed after the INIT fw asserted. This is useful to be able to use
* proprietary tools over testmode to debug the INIT fw.
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
bool init_dbg;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@ -361,7 +356,9 @@ struct iwl_mvm_vif_link_info {
* @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
* @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
* @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
* @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-bssid link's preventing EMLSR
* @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
* preventing EMLSR
* @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
* @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
* @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
* due to low RSSI.
@ -378,6 +375,7 @@ enum iwl_mvm_esr_state {
IWL_MVM_ESR_BLOCKED_TPT = 0x4,
IWL_MVM_ESR_BLOCKED_FW = 0x8,
IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
IWL_MVM_ESR_BLOCKED_ROC = 0x20,
IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
IWL_MVM_ESR_EXIT_COEX = 0x40000,
@ -450,6 +448,8 @@ struct iwl_mvm_esr_exit {
* @prevent_esr_done_wk: work that should be done when esr prevention ends.
* @mlo_int_scan_wk: work for the internal MLO scan.
* @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
* @roc_activity: currently running ROC activity for this vif (or
* ROC_NUM_ACTIVITIES if no activity is running).
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@ -527,6 +527,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data time_event_data;
struct iwl_mvm_time_event_data hs_time_event_data;
enum iwl_roc_activity roc_activity;
/* TCP Checksum Offload */
netdev_features_t features;
@ -611,7 +612,7 @@ enum iwl_mvm_sched_scan_pass_all_states {
};
/**
* struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
* struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
@ -1065,7 +1066,7 @@ struct iwl_mvm {
unsigned int max_scans;
/* UMAC scan tracking */
u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
u32 scan_uid_status[IWL_MAX_UMAC_SCANS];
/* start time of last scan in TSF of the mac that requested the scan */
u64 scan_start;
@ -1155,6 +1156,7 @@ struct iwl_mvm {
struct ieee80211_channel **nd_channels;
int n_nd_channels;
bool net_detect;
bool fast_resume;
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
@ -1318,7 +1320,12 @@ struct iwl_mvm {
bool sta_remove_requires_queue_remove;
bool mld_api_is_used;
bool pldr_sync;
/*
* Indicates that firmware will do a product reset (and then
* therefore fail to load) when we start it (due to OTP burn),
* if so don't dump errors etc. since this is expected.
*/
bool fw_product_reset;
struct iwl_time_sync_data time_sync;
@ -1342,7 +1349,8 @@ DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mu
* enum iwl_mvm_status - MVM status bits
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
* @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
* @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when
* P2P is not over AUX)
* @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@ -1356,7 +1364,7 @@ DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mu
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
IWL_MVM_STATUS_ROC_P2P_RUNNING,
IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
@ -1447,7 +1455,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
static inline struct ieee80211_bss_conf *
iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
{
if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
"erroneous FW link ID: %d\n", link_id))
return NULL;
if (rcu)
@ -1732,7 +1741,7 @@ struct iwl_rate_info {
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
};
void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend);
int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
/******************
@ -1868,10 +1877,10 @@ static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
{
u8 rx_ant = mvm->fw->valid_tx_ant;
u8 rx_ant = mvm->fw->valid_rx_ant;
if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant)
rx_ant &= mvm->nvm_data->valid_tx_ant;
rx_ant &= mvm->nvm_data->valid_rx_ant;
if (mvm->set_rx_ant)
rx_ant &= mvm->set_rx_ant;
@ -2254,11 +2263,22 @@ extern const struct file_operations iwl_dbgfs_d3_test_ops;
#ifdef CONFIG_PM
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
int iwl_mvm_fast_resume(struct iwl_mvm *mvm);
#else
static inline void
iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
{
}
static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
{
return 0;
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd *cmd);
@ -2770,6 +2790,13 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
sw_rfkill);
}
static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4;
}
static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
struct sk_buff *skb)
{
@ -2804,7 +2831,7 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int iwl_mvm_mac_start(struct ieee80211_hw *hw);
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
void iwl_mvm_mac_stop(struct ieee80211_hw *hw);
void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend);
static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
@ -2935,7 +2962,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
int duration, u32 activity);
int duration, enum iwl_roc_activity activity);
/* EMLSR */
bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);

View File

@ -41,12 +41,8 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
/* rest of fields are 0 by default */
};
module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
MODULE_PARM_DESC(init_dbg,
"set to true to debug an ASSERT in INIT fw (default: false");
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
@ -471,7 +467,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
struct iwl_time_msmt_cfm_notify),
RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC,
iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_roc_notif),
RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
@ -864,8 +860,7 @@ get_nvm_from_fw:
ret = iwl_mvm_init_mcc(mvm);
}
if (!iwlmvm_mod_params.init_dbg || !ret)
iwl_mvm_stop_device(mvm);
iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
wiphy_unlock(mvm->hw->wiphy);
@ -875,7 +870,7 @@ get_nvm_from_fw:
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* no longer need this regardless of failure or not */
mvm->pldr_sync = false;
mvm->fw_product_reset = false;
return ret;
}
@ -1500,8 +1495,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
return op_mode;
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(trans);

View File

@ -282,62 +282,64 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
return data.allow_uapsd;
}
static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_bss_conf *link_conf;
bool radar_detect = false;
unsigned int link_id;
rcu_read_lock();
for_each_vif_active_link(vif, link_conf, link_id) {
chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
/* this happens on link switching, just ignore inactive ones */
if (!chanctx_conf)
continue;
chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
radar_detect = !!(chanctx_conf->def.chan->flags &
IEEE80211_CHAN_RADAR);
if (radar_detect)
goto out;
}
/* this happens on link switching, just ignore inactive ones */
if (!chanctx_conf)
return false;
out:
rcu_read_unlock();
return radar_detect;
return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
}
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
int dtimper = vif->bss_conf.dtim_period ?: 1;
int skip;
struct ieee80211_bss_conf *link_conf;
unsigned int min_link_skip = ~0;
unsigned int link_id;
/* disable, in case we're supposed to override */
cmd->skip_dtim_periods = 0;
cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
if (iwl_mvm_power_is_radar(vif))
return;
if (dtimper >= 10)
return;
if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
skip = 2;
} else {
int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
if (WARN_ON(!dtimper_tu))
return;
/* configure skip over dtim up to 900 TU DTIM interval */
skip = max_t(u8, 1, 900 / dtimper_tu);
cmd->skip_dtim_periods = 2;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
return;
}
cmd->skip_dtim_periods = skip;
rcu_read_lock();
for_each_vif_active_link(vif, link_conf, link_id) {
unsigned int dtimper = link_conf->dtim_period ?: 1;
unsigned int dtimper_tu = dtimper * link_conf->beacon_int;
unsigned int skip;
if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) {
rcu_read_unlock();
return;
}
if (WARN_ON(!dtimper_tu))
continue;
/* configure skip over dtim up to 900 TU DTIM interval */
skip = max_t(int, 1, 900 / dtimper_tu);
min_link_skip = min(min_link_skip, skip);
}
rcu_read_unlock();
/* no WARN_ON, can only happen with WARN_ON above */
if (min_link_skip == ~0)
return;
cmd->skip_dtim_periods = min_link_skip;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}

View File

@ -3,7 +3,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation
* Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation
*****************************************************************************/
#ifndef __rs_h__
@ -198,11 +198,12 @@ struct rs_rate {
/**
* struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
* @last_rate_n_flags: last rate reported by FW
* @pers: persistent fields
* @pers.sta_id: the id of the station
* @chains: bitmask of chains reported in %chain_signal
* @chain_signal: per chain signal strength
* @last_rssi: last rssi reported
* @drv: pointer back to the driver data
* @pers.chains: bitmask of chains reported in %chain_signal
* @pers.chain_signal: per chain signal strength
* @pers.last_rssi: last rssi reported
* @pers.drv: pointer back to the driver data
*/
struct iwl_lq_sta_rs_fw {
/* last tx rate_n_flags */
@ -213,11 +214,11 @@ struct iwl_lq_sta_rs_fw {
u32 sta_id;
#ifdef CONFIG_MAC80211_DEBUGFS
/**
* @dbg_fixed_rate: for debug, use fixed rate if not 0
* @pers.dbg_fixed_rate: for debug, use fixed rate if not 0
*/
u32 dbg_fixed_rate;
/**
* @dbg_agg_frame_count_lim: for debug, max number of
* @pers.dbg_agg_frame_count_lim: for debug, max number of
* frames in A-MPDU
*/
u16 dbg_agg_frame_count_lim;
@ -402,7 +403,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info, bool ndp);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
* iwl_mvm_rate_control_register - Register the rate control algorithm callbacks
*
* Since the rate control algorithm is hardware specific, there is no need
* or reason to place it as a stand alone module. The driver can call
@ -414,7 +415,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int iwl_mvm_rate_control_register(void);
/**
* iwl_rate_control_unregister - Unregister the rate control callbacks
* iwl_mvm_rate_control_unregister - Unregister the rate control callbacks
*
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.

View File

@ -857,12 +857,6 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
size = iwl_mvm_get_queue_size(sta);
}
/* take the min with bc tbl entries allowed */
size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
/* size needs to be power of 2 values for calculating read/write pointers */
size = rounddown_pow_of_two(size);
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_link_sta *link_sta;
@ -887,22 +881,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
if (!sta_mask)
return -EINVAL;
do {
queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
tid, size, timeout);
queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
tid, size, timeout);
if (queue < 0)
IWL_DEBUG_TX_QUEUES(mvm,
"Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n",
size, sta_mask, tid, queue);
size /= 2;
} while (queue < 0 && size >= 16);
if (queue < 0)
return queue;
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
queue, sta_mask, tid);
if (queue >= 0)
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d for sta mask 0x%x tid %d\n",
queue, sta_mask, tid);
return queue;
}

View File

@ -478,7 +478,7 @@ struct iwl_mvm_int_sta {
};
/**
* Send the STA info to the FW.
* iwl_mvm_sta_send_to_fw - Send the STA info to the FW.
*
* @mvm: the iwl_mvm* to use
* @sta: the STA

View File

@ -47,12 +47,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
struct ieee80211_vif *vif = mvm->p2p_device_vif;
lockdep_assert_held(&mvm->mutex);
/*
* Clear the ROC_RUNNING status bit.
* Clear the ROC_P2P_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware.
@ -62,7 +63,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
synchronize_net();
@ -99,7 +100,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
}
}
/* Do the same for AUX ROC */
/*
* P2P AUX ROC and HS2.0 ROC do not run simultaneously.
* Clear the ROC_AUX_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
synchronize_net();
@ -119,9 +127,9 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_rm_aux_sta(mvm);
}
if (!IS_ERR_OR_NULL(bss_vif))
iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
mutex_unlock(&mvm->mutex);
if (vif)
iwl_mvm_esr_non_bss_link(mvm, vif, 0, false);
}
void iwl_mvm_roc_done_wk(struct work_struct *wk)
@ -378,7 +386,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
@ -388,14 +396,51 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
}
}
struct iwl_mvm_rx_roc_iterator_data {
u32 activity;
bool end_activity;
bool found;
};
static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_rx_roc_iterator_data *data = _data;
if (mvmvif->roc_activity == data->activity) {
data->found = true;
if (data->end_activity)
mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
}
}
void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_roc_notif *notif = (void *)pkt->data;
u32 activity = le32_to_cpu(notif->activity);
bool started = le32_to_cpu(notif->success) &&
le32_to_cpu(notif->started);
struct iwl_mvm_rx_roc_iterator_data data = {
.activity = activity,
.end_activity = !started,
};
if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
/* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_rx_roc_iterator,
&data);
/*
* It is possible that the ROC was canceled
* but the notification was already fired.
*/
if (!data.found)
return;
if (started) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else {
@ -724,6 +769,21 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
}
static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
{
struct iwl_roc_req roc_cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.activity = cpu_to_le32(activity),
};
int ret;
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
sizeof(roc_cmd), &roc_cmd);
if (ret)
IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
}
static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
u32 *uid)
@ -733,6 +793,9 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
s8 link_id;
bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
if (!vif)
return false;
@ -757,14 +820,22 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
/* When session protection is used, the te_data->id field
* is reused to save session protection's configuration.
* For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
* to HOT_SPOT_CMD.
*/
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
id != HOT_SPOT_CMD) {
if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) ||
(roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) {
if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
iwl_mvm_roc_finished(mvm);
}
return false;
} else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
id != HOT_SPOT_CMD) {
/* When session protection is used, the te_data->id field
* is reused to save session protection's configuration.
* For AUX ROC, HOT_SPOT_CMD is used and the te_data->id
* field is set to HOT_SPOT_CMD.
*/
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, vif, id,
@ -965,7 +1036,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN_ON(mvmvif->time_event_data.id !=
le32_to_cpu(notif->conf_id)))
goto out_unlock;
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
}
@ -1014,7 +1085,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
int duration, u32 activity)
int duration, enum iwl_roc_activity activity)
{
int res;
u32 duration_tu, delay;
@ -1023,9 +1094,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES))
return -EBUSY;
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
channel->hw_value,
@ -1041,14 +1116,16 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
"\t(requested = %ums, max_delay = %ums)\n",
duration, delay);
IWL_DEBUG_TE(mvm,
"Requesting to remain on channel %u for %utu\n",
channel->hw_value, duration_tu);
"Requesting to remain on channel %u for %utu. activity %u\n",
channel->hw_value, duration_tu, activity);
/* Set the node address */
memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
0, sizeof(roc_req), &roc_req);
if (!res)
mvmvif->roc_activity = activity;
return res;
}
@ -1191,62 +1268,40 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
{
int ret;
struct iwl_roc_req roc_cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.activity = cpu_to_le32(activity),
};
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
0, sizeof(roc_cmd), &roc_cmd);
WARN_ON(ret);
}
static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif)
{
u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
&mvmvif->hs_time_event_data);
else if (fw_ver == 3)
iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
else
IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
}
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data;
bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
int iftype = vif->type;
mutex_lock(&mvm->mutex);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) {
if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
}
goto cleanup_roc;
} else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
te_data = &mvmvif->time_event_data;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
IWL_DEBUG_TE(mvm,
"No remain on channel event\n");
mutex_unlock(&mvm->mutex);
return;
}
iwl_mvm_cancel_session_protection(mvm, vif,
te_data->id,
te_data->link_id);
} else {
iwl_mvm_roc_station_remove(mvm, mvmvif);
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
&mvmvif->hs_time_event_data);
}
goto cleanup_roc;
}
@ -1259,8 +1314,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
iftype = te_data->vif->type;
if (iftype == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
@ -1271,9 +1326,10 @@ cleanup_roc:
* (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
* cleanup things properly
*/
set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
&mvm->status);
if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE)
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
else
set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
/* Mutex is released inside this function */
iwl_mvm_cleanup_roc(mvm);

View File

@ -299,7 +299,7 @@ static void check_exit_ctkill(struct work_struct *work)
ret = iwl_mvm_get_temp(mvm, &temp);
__iwl_mvm_mac_stop(mvm);
__iwl_mvm_mac_stop(mvm, false);
if (ret)
goto reschedule;

View File

@ -802,10 +802,30 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP ||
info.control.vif->type == NL80211_IFTYPE_ADHOC) {
if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE &&
p2p_aux) ||
(info.control.vif->type == NL80211_IFTYPE_STATION &&
offchannel)) {
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
* that can be used in 2 different types of vifs, P2P
* Device and STATION.
* P2P Device uses the offchannel queue.
* STATION (HS2.0) uses the auxiliary context of the FW,
* and hence needs to be sent on the aux queue.
* If P2P_DEV_OVER_AUX is supported (p2p_aux = true)
* also P2P Device uses the aux queue.
*/
sta_id = mvm->aux_sta.sta_id;
queue = mvm->aux_queue;
if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE))
return -1;
} else if (info.control.vif->type ==
NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP ||
info.control.vif->type == NL80211_IFTYPE_ADHOC) {
u32 link_id = u32_get_bits(info.control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
struct iwl_mvm_vif_link_info *link;
@ -831,18 +851,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
offchannel) {
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
* that can be used in 2 different types of vifs, P2P &
* STATION.
* P2P uses the offchannel queue.
* STATION (HS2.0) uses the auxiliary context of the FW,
* and hence needs to be sent on the aux queue.
*/
sta_id = mvm->aux_sta.sta_id;
queue = mvm->aux_queue;
}
}

View File

@ -216,7 +216,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);

View File

@ -503,7 +503,37 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2003-2015, 2018-2023 Intel Corporation
* Copyright (C) 2003-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -22,7 +22,6 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
#include "queue/tx.h"
#include "iwl-context-info.h"
/*
@ -273,7 +272,7 @@ enum iwl_pcie_fw_reset_state {
};
/**
* enum wl_pcie_imr_status - imr dma transfer state
* enum iwl_pcie_imr_status - imr dma transfer state
* @IMR_D2S_IDLE: default value of the dma transfer
* @IMR_D2S_REQUESTED: dma transfer requested
* @IMR_D2S_COMPLETED: dma transfer completed
@ -286,6 +285,58 @@ enum iwl_pcie_imr_status {
IMR_D2S_ERROR,
};
/**
* struct iwl_pcie_txqs - TX queues data
*
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used: bit mask of used queues
* @queue_stopped: bit mask of stopped queues
* @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
* @bc_pool: bytecount DMA allocations pool
* @bc_tbl_size: bytecount table size
* @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
* (and similar usage)
* @cmd: command queue data
* @cmd.fifo: FIFO number
* @cmd.q_id: queue ID
* @cmd.wdg_timeout: watchdog timeout
* @tfd: TFD data
* @tfd.max_tbs: max number of buffers per TFD
* @tfd.size: TFD size
* @tfd.addr_size: TFD/TB address size
*/
struct iwl_pcie_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct dma_pool *bc_pool;
size_t bc_tbl_size;
bool bc_table_dword;
u8 page_offs;
u8 dev_cmd_offs;
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
struct {
u8 fifo;
u8 q_id;
unsigned int wdg_timeout;
} cmd;
struct {
u8 max_tbs;
u16 size;
u8 addr_size;
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
u8 queue_alloc_cmd_ver;
};
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
@ -367,6 +418,7 @@ enum iwl_pcie_imr_status {
* @is_down: indicates the NIC is down
* @isr_stats: interrupt statistics
* @napi_dev: (fake) netdev for NAPI registration
* @txqs: transport tx queues data.
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@ -464,6 +516,8 @@ struct iwl_trans_pcie {
enum iwl_pcie_imr_status imr_status;
wait_queue_head_t imr_waitq;
char rf_name[32];
struct iwl_pcie_txqs txqs;
};
static inline struct iwl_trans_pcie *
@ -538,6 +592,17 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
/* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
struct iwl_tso_hdr_page {
struct page *page;
u8 *pos;
};
int iwl_pcie_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
@ -552,10 +617,156 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
struct iwl_tso_hdr_page *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
size_t len, struct sk_buff *skb);
void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
{
return txq->first_tb_dma +
sizeof(struct iwl_pcie_first_tb_buf) * idx;
}
static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
struct iwl_txq *txq, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans->trans_cfg->gen2)
idx = iwl_txq_get_cmd_index(txq, idx);
return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
}
/*
* We need this inline in case dma_addr_t is only 32-bits - since the
* hardware is always 64-bit, the issue can still occur in that case,
* so use u64 for 'phys' here to force the addition in 64-bit.
*/
static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
{
return upper_32_bits(phys) != upper_32_bits(phys + len);
}
int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else {
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
txq->id);
}
}
/**
* iwl_txq_inc_wrap - increment queue index, wrap back to beginning
* @trans: the transport (for configuration data)
* @index: current index
*/
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
{
return ++index &
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
/**
* iwl_txq_dec_wrap - decrement queue index, wrap back to end
* @trans: the transport (for configuration data)
* @index: current index
*/
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
{
return --index &
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
static inline void
iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd, dma_addr_t addr,
u16 len);
static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd)
{
tfd->num_tbs = 0;
iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
trans->invalid_tx_cmd.size);
}
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd);
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
u32 sta_mask, u8 tid,
int size, unsigned int timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
int queue_size);
static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
void *_tfd, u8 idx)
{
struct iwl_tfd *tfd;
struct iwl_tfd_tb *tb;
if (trans->trans_cfg->gen2) {
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
return le16_to_cpu(tfh_tb->tb_len);
}
tfd = (struct iwl_tfd *)_tfd;
tb = &tfd->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs, bool is_flush);
void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs, bool freeze);
int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
/*****************************************************
* Error handling
@ -822,12 +1033,51 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
#else
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords);
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset);
int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value);
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val);
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
/* transport gen 1 exported functions */
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
/* common functions that are used by gen2 transport */
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
void iwl_pcie_apm_config(struct iwl_trans *trans);
@ -849,7 +1099,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
@ -864,5 +1114,7 @@ void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
#endif /* __iwl_trans_int_pcie_h__ */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2003-2014, 2018-2023 Intel Corporation
* Copyright (C) 2003-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -1301,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@ -1678,6 +1678,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
*/
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@ -1694,9 +1695,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
if (!trans->txqs.txq[i])
if (!trans_pcie->txqs.txq[i])
continue;
del_timer(&trans->txqs.txq[i]->stuck_timer);
del_timer(&trans_pcie->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@ -247,7 +247,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@ -339,16 +339,17 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
pos += scnprintf(buf + pos, buflen - pos, "\n");
}
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
memset(trans->txqs.queue_stopped, 0,
sizeof(trans->txqs.queue_stopped));
memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
memset(trans_pcie->txqs.queue_stopped, 0,
sizeof(trans_pcie->txqs.queue_stopped));
memset(trans_pcie->txqs.queue_used, 0,
sizeof(trans_pcie->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2007-2015, 2018-2023 Intel Corporation
* Copyright (C) 2007-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -127,8 +127,7 @@ out:
kfree(buf);
}
static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
bool retake_ownership)
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
@ -1336,8 +1335,8 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
}
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
@ -1423,7 +1422,7 @@ out:
return ret;
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
iwl_pcie_reset_ict(trans);
iwl_pcie_tx_start(trans, scd_addr);
@ -1458,7 +1457,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
@ -1505,9 +1504,17 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
} else {
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
if (reset) {
/*
@ -1552,8 +1559,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return 0;
}
static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
bool reset)
int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
int ret;
@ -1571,9 +1577,9 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
return 0;
}
static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
@ -1586,8 +1592,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
goto out;
}
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans);
if (ret)
@ -1874,7 +1884,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return 0;
}
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@ -1886,7 +1896,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return ret;
}
static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1906,17 +1916,17 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
@ -1929,7 +1939,7 @@ static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
return 0x000FFFFF;
}
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@ -1938,8 +1948,7 @@ static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
u32 val)
void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@ -1948,20 +1957,20 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans->txqs.page_offs = trans_cfg->cb_data_offs;
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@ -1980,7 +1989,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans->command_groups = trans_cfg->command_groups;
@ -2079,15 +2088,20 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
trans->dev);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
if (state)
set_bit(STATUS_TPOWER_PMI, &trans->status);
else
clear_bit(STATUS_TPOWER_PMI, &trans->status);
if (trans_pcie->txqs.tso_hdr_page) {
for_each_possible_cpu(i) {
struct iwl_tso_hdr_page *p =
per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
if (p && p->page)
__free_page(p->page);
}
free_percpu(trans_pcie->txqs.tso_hdr_page);
}
iwl_trans_free(trans);
}
struct iwl_trans_pcie_removal {
@ -2253,7 +2267,7 @@ out:
return true;
}
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
bool ret;
@ -2267,7 +2281,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
return false;
}
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -2297,8 +2311,8 @@ out:
spin_unlock_bh(&trans_pcie->reg_lock);
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
#define IWL_MAX_HW_ERRS 5
unsigned int num_consec_hw_errors = 0;
@ -2347,8 +2361,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
return 0;
}
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
{
int offs, ret = 0;
const u32 *vals = buf;
@ -2365,8 +2379,8 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val)
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val)
{
return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
ofs, val);
@ -2374,8 +2388,8 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
#define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -2390,8 +2404,9 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
return 0;
}
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
@ -2401,11 +2416,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
if (!test_bit(txq_idx, trans->txqs.queue_used))
if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans->txqs.txq[txq_idx];
txq = trans_pcie->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
@ -2451,8 +2466,9 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
return 0;
}
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
@ -2461,9 +2477,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
if (cnt == trans->txqs.cmd.q_id)
if (cnt == trans_pcie->txqs.cmd.q_id)
continue;
if (!test_bit(cnt, trans->txqs.queue_used))
if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
@ -2476,8 +2492,8 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
return ret;
}
static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value)
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -2636,12 +2652,13 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans;
struct iwl_txq *txq = trans->txqs.txq[state->pos];
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos,
!!test_bit(state->pos, trans->txqs.queue_used),
!!test_bit(state->pos, trans->txqs.queue_stopped));
!!test_bit(state->pos, trans_pcie->txqs.queue_used),
!!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
if (txq)
seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
@ -2651,7 +2668,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
if (state->pos == trans->txqs.cmd.q_id)
if (state->pos == trans_pcie->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@ -3055,7 +3072,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(rf, dir, 0400);
}
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct cont_rec *data = &trans_pcie->fw_mon_data;
@ -3068,10 +3085,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
@ -3332,15 +3350,14 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
return 0;
}
static struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans *trans,
u32 dump_mask,
struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@ -3407,7 +3424,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
u16 tfd_size = trans->txqs.tfd.size;
u16 tfd_size = trans_pcie->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
@ -3483,7 +3500,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
return dump_data;
}
static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
{
if (enable)
iwl_enable_interrupts(trans);
@ -3491,7 +3508,7 @@ static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
iwl_disable_interrupts(trans);
}
static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
u32 inta_addr, sw_err_bit;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -3510,81 +3527,6 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
#define IWL_TRANS_COMMON_OPS \
.op_mode_leave = iwl_trans_pcie_op_mode_leave, \
.write8 = iwl_trans_pcie_write8, \
.write32 = iwl_trans_pcie_write32, \
.read32 = iwl_trans_pcie_read32, \
.read_prph = iwl_trans_pcie_read_prph, \
.write_prph = iwl_trans_pcie_write_prph, \
.read_mem = iwl_trans_pcie_read_mem, \
.write_mem = iwl_trans_pcie_write_mem, \
.read_config32 = iwl_trans_pcie_read_config32, \
.configure = iwl_trans_pcie_configure, \
.set_pmi = iwl_trans_pcie_set_pmi, \
.sw_reset = iwl_trans_pcie_sw_reset, \
.grab_nic_access = iwl_trans_pcie_grab_nic_access, \
.release_nic_access = iwl_trans_pcie_release_nic_access, \
.set_bits_mask = iwl_trans_pcie_set_bits_mask, \
.dump_data = iwl_trans_pcie_dump_data, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
.interrupts = iwl_trans_pci_interrupts, \
.sync_nmi = iwl_trans_pcie_sync_nmi, \
.imr_dma_data = iwl_trans_pcie_copy_imr \
static const struct iwl_trans_ops trans_ops_pcie = {
IWL_TRANS_COMMON_OPS,
.start_hw = iwl_trans_pcie_start_hw,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
.send_cmd = iwl_pcie_enqueue_hcmd,
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_txq_reclaim,
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
.freeze_txq_timer = iwl_trans_txq_freeze_timer,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
};
static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
IWL_TRANS_COMMON_OPS,
.start_hw = iwl_trans_pcie_start_hw,
.fw_alive = iwl_trans_pcie_gen2_fw_alive,
.start_fw = iwl_trans_pcie_gen2_start_fw,
.stop_device = iwl_trans_pcie_gen2_stop_device,
.send_cmd = iwl_pcie_gen2_enqueue_hcmd,
.tx = iwl_txq_gen2_tx,
.reclaim = iwl_txq_reclaim,
.set_q_ptrs = iwl_txq_set_q_ptrs,
.txq_alloc = iwl_txq_dyn_alloc,
.txq_free = iwl_txq_dyn_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
.load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm,
.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
.load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power,
.set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans)
@ -3592,13 +3534,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
int ret, addr_size;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
void __iomem * const *table;
u32 bar0;
if (!cfg_trans->gen2)
ops = &trans_ops_pcie;
/* reassign our BAR 0 if invalid due to possible runtime PM races */
pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
@ -3611,20 +3549,65 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
cfg_trans);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans->trans_cfg->gen2) {
trans_pcie->txqs.tfd.addr_size = 64;
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
} else {
trans_pcie->txqs.tfd.addr_size = 36;
trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
}
trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
ret = -ENOMEM;
goto out_free_trans;
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
trans_pcie->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
else
trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
* allocate here.
*/
if (trans->trans_cfg->gen2) {
trans_pcie->txqs.bc_pool =
dmam_pool_create("iwlwifi:bc", trans->dev,
trans_pcie->txqs.bc_tbl_size,
256, 0);
if (!trans_pcie->txqs.bc_pool) {
ret = -ENOMEM;
goto out_free_tso;
}
}
/* Some things must not change even if the config does */
WARN_ON(trans_pcie->txqs.tfd.addr_size !=
(trans->trans_cfg->gen2 ? 64 : 36));
/* Initialize NAPI here - it should be before registering to mac80211
* in the opmode but after the HW struct is allocated.
*/
trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
if (!trans_pcie->napi_dev) {
ret = -ENOMEM;
goto out_free_trans;
goto out_free_tso;
}
/* The private struct in netdev is a pointer to struct iwl_trans_pcie */
priv = netdev_priv(trans_pcie->napi_dev);
@ -3663,7 +3646,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
addr_size = trans->txqs.tfd.addr_size;
addr_size = trans_pcie->txqs.tfd.addr_size;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@ -3766,6 +3749,8 @@ out_no_pci:
destroy_workqueue(trans_pcie->rba.alloc_wq);
out_free_ndev:
free_netdev(trans_pcie->napi_dev);
out_free_tso:
free_percpu(trans_pcie->txqs.tso_hdr_page);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,191 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __iwl_trans_queue_tx_h__
#define __iwl_trans_queue_tx_h__
#include "iwl-fh.h"
#include "fw/api/tx.h"
struct iwl_tso_hdr_page {
struct page *page;
u8 *pos;
};
static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
{
return txq->first_tb_dma +
sizeof(struct iwl_pcie_first_tb_buf) * idx;
}
static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
struct iwl_txq *txq, int idx)
{
if (trans->trans_cfg->gen2)
idx = iwl_txq_get_cmd_index(txq, idx);
return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
}
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
bool cmd_queue);
/*
* We need this inline in case dma_addr_t is only 32-bits - since the
* hardware is always 64-bit, the issue can still occur in that case,
* so use u64 for 'phys' here to force the addition in 64-bit.
*/
static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
{
return upper_32_bits(phys) != upper_32_bits(phys + len);
}
int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
{
if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else {
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
txq->id);
}
}
/**
* iwl_txq_inc_wrap - increment queue index, wrap back to beginning
* @trans: the transport (for configuration data)
* @index: current index
*/
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
{
return ++index &
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
/**
* iwl_txq_dec_wrap - decrement queue index, wrap back to end
* @trans: the transport (for configuration data)
* @index: current index
*/
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
{
return --index &
(trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
{
int index = iwl_txq_get_cmd_index(q, i);
int r = iwl_txq_get_cmd_index(q, q->read_ptr);
int w = iwl_txq_get_cmd_index(q, q->write_ptr);
return w >= r ?
(index >= r && index < w) :
!(index < r && index >= w);
}
void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd, dma_addr_t addr,
u16 len);
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd);
int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
u32 sta_mask, u8 tid,
int size, unsigned int timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
bool cmd_queue);
int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
#ifdef CONFIG_INET
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
struct sk_buff *skb);
#endif
static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
struct iwl_tfd *tfd)
{
return tfd->num_tbs & 0x1f;
}
static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
void *_tfd, u8 idx)
{
struct iwl_tfd *tfd;
struct iwl_tfd_tb *tb;
if (trans->trans_cfg->gen2) {
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
return le16_to_cpu(tfh_tb->tb_len);
}
tfd = (struct iwl_tfd *)_tfd;
tb = &tfd->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans,
struct iwl_tfd *tfd,
u8 idx, dma_addr_t addr, u16 len)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
u16 hi_n_len = len << 4;
put_unaligned_le32(addr, &tb->lo);
hi_n_len |= iwl_get_dma_hi_addr(addr);
tb->hi_n_len = cpu_to_le16(hi_n_len);
tfd->num_tbs = idx + 1;
}
void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_txq *txq, int index);
void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_txq *txq);
void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs);
void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs, bool is_flush);
void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
void iwl_txq_progress(struct iwl_txq *txq);
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
#endif /* __iwl_trans_queue_tx_h__ */

View File

@ -216,7 +216,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
struct sk_buff *skb;
size_t eeprom_hdr_size;
int ret = 0;
long timeout;
long time_left;
if (priv->fw_var >= 0x509)
eeprom_hdr_size = sizeof(*eeprom_hdr);
@ -245,9 +245,9 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
p54_tx(priv, skb);
timeout = wait_for_completion_interruptible_timeout(
time_left = wait_for_completion_interruptible_timeout(
&priv->eeprom_comp, HZ);
if (timeout <= 0) {
if (time_left <= 0) {
wiphy_err(priv->hw->wiphy,
"device does not respond or signal received!\n");
ret = -EBUSY;

View File

@ -197,7 +197,7 @@ out:
return err;
}
static void p54_stop(struct ieee80211_hw *dev)
static void p54_stop(struct ieee80211_hw *dev, bool suspend)
{
struct p54_common *priv = dev->priv;
int i;

View File

@ -434,7 +434,7 @@ static int p54p_open(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
int err;
long timeout;
long time_left;
init_completion(&priv->boot_comp);
err = request_irq(priv->pdev->irq, p54p_interrupt,
@ -472,12 +472,12 @@ static int p54p_open(struct ieee80211_hw *dev)
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
P54P_READ(dev_int);
timeout = wait_for_completion_interruptible_timeout(
time_left = wait_for_completion_interruptible_timeout(
&priv->boot_comp, HZ);
if (timeout <= 0) {
if (time_left <= 0) {
wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
p54p_stop(dev);
return timeout ? -ERESTARTSYS : -ETIMEDOUT;
return time_left ? -ERESTARTSYS : -ETIMEDOUT;
}
P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));

View File

@ -518,7 +518,7 @@ out:
static int p54spi_op_start(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
unsigned long timeout;
long time_left;
int ret = 0;
if (mutex_lock_interruptible(&priv->mutex)) {
@ -538,10 +538,10 @@ static int p54spi_op_start(struct ieee80211_hw *dev)
mutex_unlock(&priv->mutex);
timeout = msecs_to_jiffies(2000);
timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp,
timeout);
if (!timeout) {
time_left = msecs_to_jiffies(2000);
time_left = wait_for_completion_interruptible_timeout(&priv->fw_comp,
time_left);
if (!time_left) {
dev_err(&priv->spi->dev, "firmware boot failed");
p54spi_power_off(priv);
ret = -1;

View File

@ -267,7 +267,7 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
return 0;
}
static void lbtf_op_stop(struct ieee80211_hw *hw)
static void lbtf_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct lbtf_private *priv = hw->priv;
unsigned long flags;

View File

@ -2211,7 +2211,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
dma_addr_t dma_addr;
unsigned int dma_size;
int rc;
unsigned long timeout = 0;
unsigned long time_left = 0;
u8 buf[32];
u32 bitmap = 0;
@ -2258,8 +2258,8 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
iowrite32(MWL8K_H2A_INT_DUMMY,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
timeout = wait_for_completion_timeout(&cmd_wait,
msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
time_left = wait_for_completion_timeout(&cmd_wait,
msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
priv->hostcmd_wait = NULL;
@ -2267,7 +2267,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
dma_unmap_single(&priv->pdev->dev, dma_addr, dma_size,
DMA_BIDIRECTIONAL);
if (!timeout) {
if (!time_left) {
wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
MWL8K_CMD_TIMEOUT_MS);
@ -2275,7 +2275,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd
} else {
int ms;
ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout);
ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(time_left);
rc = cmd->result ? -EINVAL : 0;
if (rc)
@ -4768,7 +4768,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return rc;
}
static void mwl8k_stop(struct ieee80211_hw *hw)
static void mwl8k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mwl8k_priv *priv = hw->priv;
int i;
@ -6023,7 +6023,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *vif, *tmp_vif;
mwl8k_stop(hw);
mwl8k_stop(hw, false);
mwl8k_rxq_deinit(hw, 0);
/*

View File

@ -23,7 +23,7 @@ mt7603_start(struct ieee80211_hw *hw)
}
static void
mt7603_stop(struct ieee80211_hw *hw)
mt7603_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7603_dev *dev = hw->priv;

Some files were not shown because too many files have changed in this diff Show More