forked from Minki/linux
wireless-drivers-next patches for 4.9
Major changes: iwlwifi * preparation for new a000 HW continues * some DQA improvements * add support for GMAC * add support for 9460, 9270 and 9170 series mwifiex * support random MAC address for scanning * add HT aggregation support for adhoc mode * add custom regulatory domain support * add manufacturing mode support via nl80211 testmode interface bcma * support BCM53573 series of wireless SoCs bitfield.h * add FIELD_PREP() and FIELD_GET() macros mt7601u * convert to use the new bitfield.h macros brcmfmac * add support for bcm4339 chip with modalias sdio:c00v02D0d4339 ath10k * add nl80211 testmode support for 10.4 firmware * hide kernel addresses from logs using %pK format specifier * implement NAPI support * enable peer stats by default ath9k * use ieee80211_tx_status_noskb where possible wil6210 * extract firmware capabilities from the firmware file ath6kl * enable firmware crash dumps on the AR6004 ath-current is also merged to fix a conflict in ath10k. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQEcBAABAgAGBQJX2rF7AAoJEG4XJFUm622bD3EH/icZDT7vVxnb0VPP8jAScA4h bMNrI3iFxnPohO8Rzp+edWSdxEZoxwrBVk/6BHXO9PHHZwPX7/b8/OOXmLWB2X1c ffj1jt83RENcsZFvd5OJfDYxIq89uOkWybdD6nIUd3umKC9KeFOI5nCju31fEZrQ ZptqvKGIV36bbx07K8Y/PQRL2SA6T+09WqvuljLHZD5hfPGZ+GWXV2p+HAm3Moos iy6HUx5+pYfC+zlcmvJvL47Wxj+HppS/48ujyQ68DD2UkjOtF620YJjVy3o+njip GNJtCgWFDp2ar3uvRP2BfBd9FtseDTKsKusxJQvNGoSR0ON+uGIzURCznQ+2PCM= =FyXw -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2016-09-15' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== wireless-drivers-next patches for 4.9 Major changes: iwlwifi * preparation for new a000 HW continues * some DQA improvements * add support for GMAC * add support for 9460, 9270 and 9170 series mwifiex * support random MAC address for scanning * add HT aggregation support for adhoc mode * add custom regulatory domain support * add manufacturing mode support via nl80211 testmode interface bcma * support BCM53573 series of wireless SoCs bitfield.h * add FIELD_PREP() and FIELD_GET() macros mt7601u * convert to use the new bitfield.h macros brcmfmac * add support for bcm4339 chip with modalias sdio:c00v02D0d4339 ath10k * add nl80211 testmode support for 10.4 firmware * hide kernel addresses from logs using %pK format specifier * implement NAPI support * enable peer stats by default ath9k * use ieee80211_tx_status_noskb where possible wil6210 * extract firmware capabilities from the firmware file ath6kl * enable firmware crash dumps on the AR6004 ath-current is also merged to fix a conflict in ath10k. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e812bd905a
@ -36,12 +36,31 @@ u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock);
|
||||
|
||||
static bool bcma_core_cc_has_pmu_watchdog(struct bcma_drv_cc *cc)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
|
||||
if (cc->capabilities & BCMA_CC_CAP_PMU) {
|
||||
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573) {
|
||||
WARN(bus->chipinfo.rev <= 1, "No watchdog available\n");
|
||||
/* 53573B0 and 53573B1 have bugged PMU watchdog. It can
|
||||
* be enabled but timer can't be bumped. Use CC one
|
||||
* instead.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
u32 nb;
|
||||
|
||||
if (cc->capabilities & BCMA_CC_CAP_PMU) {
|
||||
if (bcma_core_cc_has_pmu_watchdog(cc)) {
|
||||
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
|
||||
nb = 32;
|
||||
else if (cc->core->id.rev < 26)
|
||||
@ -95,9 +114,16 @@ static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
|
||||
|
||||
int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
|
||||
{
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
struct bcm47xx_wdt wdt = {};
|
||||
struct platform_device *pdev;
|
||||
|
||||
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573 &&
|
||||
bus->chipinfo.rev <= 1) {
|
||||
pr_debug("No watchdog on 53573A0 / 53573A1\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
wdt.driver_data = cc;
|
||||
wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
|
||||
wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
|
||||
@ -105,7 +131,7 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
|
||||
bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
|
||||
|
||||
pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
|
||||
cc->core->bus->num, &wdt,
|
||||
bus->num, &wdt,
|
||||
sizeof(wdt));
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
@ -217,7 +243,7 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
|
||||
u32 maxt;
|
||||
|
||||
maxt = bcma_chipco_watchdog_get_max_timer(cc);
|
||||
if (cc->capabilities & BCMA_CC_CAP_PMU) {
|
||||
if (bcma_core_cc_has_pmu_watchdog(cc)) {
|
||||
if (ticks == 1)
|
||||
ticks = 2;
|
||||
else if (ticks > maxt)
|
||||
|
@ -209,6 +209,8 @@ static void bcma_of_fill_device(struct platform_device *parent,
|
||||
core->dev.of_node = node;
|
||||
|
||||
core->irq = bcma_of_get_irq(parent, core, 0);
|
||||
|
||||
of_dma_configure(&core->dev, node);
|
||||
}
|
||||
|
||||
unsigned int bcma_core_irq(struct bcma_device *core, int num)
|
||||
@ -248,12 +250,12 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
|
||||
core->irq = bus->host_pci->irq;
|
||||
break;
|
||||
case BCMA_HOSTTYPE_SOC:
|
||||
core->dev.dma_mask = &core->dev.coherent_dma_mask;
|
||||
if (bus->host_pdev) {
|
||||
if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
|
||||
core->dma_dev = &bus->host_pdev->dev;
|
||||
core->dev.parent = &bus->host_pdev->dev;
|
||||
bcma_of_fill_device(bus->host_pdev, core);
|
||||
} else {
|
||||
core->dev.dma_mask = &core->dev.coherent_dma_mask;
|
||||
core->dma_dev = &core->dev;
|
||||
}
|
||||
break;
|
||||
|
@ -462,13 +462,13 @@ static void ath10k_ahb_halt_chip(struct ath10k *ar)
|
||||
static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
|
||||
{
|
||||
struct ath10k *ar = arg;
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
if (!ath10k_pci_irq_pending(ar))
|
||||
return IRQ_NONE;
|
||||
|
||||
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
||||
tasklet_schedule(&ar_pci->intr_tq);
|
||||
ath10k_pci_irq_msi_fw_mask(ar);
|
||||
napi_schedule(&ar->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -577,7 +577,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
|
||||
ar_ahb->mem, ar_ahb->mem_len,
|
||||
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
|
||||
return 0;
|
||||
@ -717,6 +717,9 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
|
||||
synchronize_irq(ar_ahb->irq);
|
||||
|
||||
ath10k_pci_flush(ar);
|
||||
|
||||
napi_synchronize(&ar->napi);
|
||||
napi_disable(&ar->napi);
|
||||
}
|
||||
|
||||
static int ath10k_ahb_hif_power_up(struct ath10k *ar)
|
||||
@ -748,6 +751,7 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
|
||||
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
|
||||
goto err_ce_deinit;
|
||||
}
|
||||
napi_enable(&ar->napi);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -831,7 +835,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
|
||||
goto err_resource_deinit;
|
||||
}
|
||||
|
||||
ath10k_pci_init_irq_tasklets(ar);
|
||||
ath10k_pci_init_napi(ar);
|
||||
|
||||
ret = ath10k_ahb_request_irq_legacy(ar);
|
||||
if (ret)
|
||||
|
@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
|
||||
u32 txlen;
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
|
||||
buffer, length);
|
||||
|
||||
if (ar->bmi.done_sent) {
|
||||
@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BMI,
|
||||
"bmi fast download address 0x%x buffer 0x%p length %d\n",
|
||||
"bmi fast download address 0x%x buffer 0x%pK length %d\n",
|
||||
address, buffer, length);
|
||||
|
||||
ret = ath10k_bmi_lz_stream_start(ar, address);
|
||||
|
@ -840,7 +840,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
|
||||
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
||||
"boot init ce src ring id %d entries %d base_addr %p\n",
|
||||
"boot init ce src ring id %d entries %d base_addr %pK\n",
|
||||
ce_id, nentries, src_ring->base_addr_owner_space);
|
||||
|
||||
return 0;
|
||||
@ -874,7 +874,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
|
||||
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
||||
"boot ce dest ring id %d entries %d base_addr %p\n",
|
||||
"boot ce dest ring id %d entries %d base_addr %pK\n",
|
||||
ce_id, nentries, dest_ring->base_addr_owner_space);
|
||||
|
||||
return 0;
|
||||
|
@ -60,7 +60,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.otp_exe_param = 0,
|
||||
.channel_counters_freq_hz = 88000,
|
||||
.max_probe_resp_desc_thres = 0,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
|
||||
.cal_data_len = 2116,
|
||||
.fw = {
|
||||
.dir = QCA988X_HW_2_0_FW_DIR,
|
||||
@ -68,6 +67,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA988X_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA9887_HW_1_0_VERSION,
|
||||
@ -79,7 +79,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.otp_exe_param = 0,
|
||||
.channel_counters_freq_hz = 88000,
|
||||
.max_probe_resp_desc_thres = 0,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
|
||||
.cal_data_len = 2116,
|
||||
.fw = {
|
||||
.dir = QCA9887_HW_1_0_FW_DIR,
|
||||
@ -87,6 +86,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA9887_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_2_1_VERSION,
|
||||
@ -104,6 +104,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_2_1_VERSION,
|
||||
@ -114,7 +115,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.otp_exe_param = 0,
|
||||
.channel_counters_freq_hz = 88000,
|
||||
.max_probe_resp_desc_thres = 0,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
|
||||
.cal_data_len = 8124,
|
||||
.fw = {
|
||||
.dir = QCA6174_HW_2_1_FW_DIR,
|
||||
@ -122,6 +122,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_3_0_VERSION,
|
||||
@ -132,7 +133,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.otp_exe_param = 0,
|
||||
.channel_counters_freq_hz = 88000,
|
||||
.max_probe_resp_desc_thres = 0,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
|
||||
.cal_data_len = 8124,
|
||||
.fw = {
|
||||
.dir = QCA6174_HW_3_0_FW_DIR,
|
||||
@ -140,6 +140,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_3_2_VERSION,
|
||||
@ -150,7 +151,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.otp_exe_param = 0,
|
||||
.channel_counters_freq_hz = 88000,
|
||||
.max_probe_resp_desc_thres = 0,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
|
||||
.cal_data_len = 8124,
|
||||
.fw = {
|
||||
/* uses same binaries as hw3.0 */
|
||||
@ -159,6 +159,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA6174_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA99X0_HW_2_0_DEV_VERSION,
|
||||
@ -171,7 +172,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.cck_rate_map_rev2 = true,
|
||||
.channel_counters_freq_hz = 150000,
|
||||
.max_probe_resp_desc_thres = 24,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
|
||||
.tx_chain_mask = 0xf,
|
||||
.rx_chain_mask = 0xf,
|
||||
.max_spatial_stream = 4,
|
||||
@ -182,6 +182,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA99X0_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.sw_decrypt_mcast_mgmt = true,
|
||||
.hw_ops = &qca99x0_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA9984_HW_1_0_DEV_VERSION,
|
||||
@ -194,7 +196,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.cck_rate_map_rev2 = true,
|
||||
.channel_counters_freq_hz = 150000,
|
||||
.max_probe_resp_desc_thres = 24,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
|
||||
.tx_chain_mask = 0xf,
|
||||
.rx_chain_mask = 0xf,
|
||||
.max_spatial_stream = 4,
|
||||
@ -205,6 +206,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA99X0_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.sw_decrypt_mcast_mgmt = true,
|
||||
.hw_ops = &qca99x0_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA9888_HW_2_0_DEV_VERSION,
|
||||
@ -216,7 +219,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.continuous_frag_desc = true,
|
||||
.channel_counters_freq_hz = 150000,
|
||||
.max_probe_resp_desc_thres = 24,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
|
||||
.tx_chain_mask = 3,
|
||||
.rx_chain_mask = 3,
|
||||
.max_spatial_stream = 2,
|
||||
@ -227,6 +229,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA99X0_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.sw_decrypt_mcast_mgmt = true,
|
||||
.hw_ops = &qca99x0_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA9377_HW_1_0_DEV_VERSION,
|
||||
@ -244,6 +248,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA9377_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA9377_HW_1_1_DEV_VERSION,
|
||||
@ -261,6 +266,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA9377_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.hw_ops = &qca988x_ops,
|
||||
},
|
||||
{
|
||||
.id = QCA4019_HW_1_0_DEV_VERSION,
|
||||
@ -274,7 +280,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.cck_rate_map_rev2 = true,
|
||||
.channel_counters_freq_hz = 125000,
|
||||
.max_probe_resp_desc_thres = 24,
|
||||
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
|
||||
.tx_chain_mask = 0x3,
|
||||
.rx_chain_mask = 0x3,
|
||||
.max_spatial_stream = 2,
|
||||
@ -285,6 +290,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.board_size = QCA4019_BOARD_DATA_SZ,
|
||||
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
|
||||
},
|
||||
.sw_decrypt_mcast_mgmt = true,
|
||||
.hw_ops = &qca99x0_ops,
|
||||
},
|
||||
};
|
||||
|
||||
@ -304,6 +311,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
|
||||
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
|
||||
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
|
||||
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
|
||||
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
|
||||
};
|
||||
|
||||
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
|
||||
@ -699,7 +707,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
|
||||
|
||||
if (!ar->running_fw->fw_file.otp_data ||
|
||||
!ar->running_fw->fw_file.otp_len) {
|
||||
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
|
||||
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
|
||||
ar->running_fw->fw_file.otp_data,
|
||||
ar->running_fw->fw_file.otp_len);
|
||||
return 0;
|
||||
@ -745,7 +753,7 @@ static int ath10k_download_fw(struct ath10k *ar)
|
||||
data = ar->running_fw->fw_file.firmware_data;
|
||||
data_len = ar->running_fw->fw_file.firmware_len;
|
||||
|
||||
ret = ath10k_swap_code_seg_configure(ar);
|
||||
ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to configure fw code swap: %d\n",
|
||||
ret);
|
||||
@ -753,7 +761,7 @@ static int ath10k_download_fw(struct ath10k *ar)
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
||||
"boot uploading firmware image %p len %d\n",
|
||||
"boot uploading firmware image %pK len %d\n",
|
||||
data, data_len);
|
||||
|
||||
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
|
||||
@ -787,7 +795,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
|
||||
if (!IS_ERR(ar->pre_cal_file))
|
||||
release_firmware(ar->pre_cal_file);
|
||||
|
||||
ath10k_swap_code_seg_release(ar);
|
||||
ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
|
||||
|
||||
ar->normal_mode_fw.fw_file.otp_data = NULL;
|
||||
ar->normal_mode_fw.fw_file.otp_len = 0;
|
||||
@ -1497,14 +1505,14 @@ static void ath10k_core_restart(struct work_struct *work)
|
||||
|
||||
ieee80211_stop_queues(ar->hw);
|
||||
ath10k_drain_tx(ar);
|
||||
complete_all(&ar->scan.started);
|
||||
complete_all(&ar->scan.completed);
|
||||
complete_all(&ar->scan.on_channel);
|
||||
complete_all(&ar->offchan_tx_completed);
|
||||
complete_all(&ar->install_key_done);
|
||||
complete_all(&ar->vdev_setup_done);
|
||||
complete_all(&ar->thermal.wmi_sync);
|
||||
complete_all(&ar->bss_survey_done);
|
||||
complete(&ar->scan.started);
|
||||
complete(&ar->scan.completed);
|
||||
complete(&ar->scan.on_channel);
|
||||
complete(&ar->offchan_tx_completed);
|
||||
complete(&ar->install_key_done);
|
||||
complete(&ar->vdev_setup_done);
|
||||
complete(&ar->thermal.wmi_sync);
|
||||
complete(&ar->bss_survey_done);
|
||||
wake_up(&ar->htt.empty_tx_wq);
|
||||
wake_up(&ar->wmi.tx_credits_wq);
|
||||
wake_up(&ar->peer_mapping_wq);
|
||||
@ -1705,6 +1713,55 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_core_reset_rx_filter(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
int vdev_id;
|
||||
int vdev_type;
|
||||
int vdev_subtype;
|
||||
const u8 *vdev_addr;
|
||||
|
||||
vdev_id = 0;
|
||||
vdev_type = WMI_VDEV_TYPE_STA;
|
||||
vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
|
||||
vdev_addr = ar->mac_addr;
|
||||
|
||||
ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
|
||||
vdev_addr);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_vdev_delete(ar, vdev_id);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* WMI and HTT may use separate HIF pipes and are not guaranteed to be
|
||||
* serialized properly implicitly.
|
||||
*
|
||||
* Moreover (most) WMI commands have no explicit acknowledges. It is
|
||||
* possible to infer it implicitly by poking firmware with echo
|
||||
* command - getting a reply means all preceding comments have been
|
||||
* (mostly) processed.
|
||||
*
|
||||
* In case of vdev create/delete this is sufficient.
|
||||
*
|
||||
* Without this it's possible to end up with a race when HTT Rx ring is
|
||||
* started before vdev create/delete hack is complete allowing a short
|
||||
* window of opportunity to receive (and Tx ACK) a bunch of frames.
|
||||
*/
|
||||
ret = ath10k_wmi_barrier(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to ping firmware: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
|
||||
const struct ath10k_fw_components *fw)
|
||||
{
|
||||
@ -1872,6 +1929,25 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
/* Some firmware revisions do not properly set up hardware rx filter
|
||||
* registers.
|
||||
*
|
||||
* A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
|
||||
* is filled with 0s instead of 1s allowing HW to respond with ACKs to
|
||||
* any frames that matches MAC_PCU_RX_FILTER which is also
|
||||
* misconfigured to accept anything.
|
||||
*
|
||||
* The ADDR1 is programmed using internal firmware structure field and
|
||||
* can't be (easily/sanely) reached from the driver explicitly. It is
|
||||
* possible to implicitly make it correct by creating a dummy vdev and
|
||||
* then deleting it.
|
||||
*/
|
||||
status = ath10k_core_reset_rx_filter(ar);
|
||||
if (status) {
|
||||
ath10k_err(ar, "failed to reset rx filter: %d\n", status);
|
||||
goto err_hif_stop;
|
||||
}
|
||||
|
||||
/* If firmware indicates Full Rx Reorder support it must be used in a
|
||||
* slightly different manner. Let HTT code know.
|
||||
*/
|
||||
@ -2031,7 +2107,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
|
||||
goto err_free_firmware_files;
|
||||
}
|
||||
|
||||
ret = ath10k_swap_code_seg_init(ar);
|
||||
ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
|
||||
ret);
|
||||
@ -2072,6 +2148,9 @@ static void ath10k_core_register_work(struct work_struct *work)
|
||||
struct ath10k *ar = container_of(work, struct ath10k, register_work);
|
||||
int status;
|
||||
|
||||
/* peer stats are enabled by default */
|
||||
set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
|
||||
|
||||
status = ath10k_core_probe_fw(ar);
|
||||
if (status) {
|
||||
ath10k_err(ar, "could not probe fw (%d)\n", status);
|
||||
@ -2249,6 +2328,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
INIT_WORK(&ar->register_work, ath10k_core_register_work);
|
||||
INIT_WORK(&ar->restart_work, ath10k_core_restart);
|
||||
|
||||
init_dummy_netdev(&ar->napi_dev);
|
||||
|
||||
ret = ath10k_debug_create(ar);
|
||||
if (ret)
|
||||
goto err_free_aux_wq;
|
||||
|
@ -65,6 +65,10 @@
|
||||
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
|
||||
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
|
||||
|
||||
/* NAPI poll budget */
|
||||
#define ATH10K_NAPI_BUDGET 64
|
||||
#define ATH10K_NAPI_QUOTA_LIMIT 60
|
||||
|
||||
struct ath10k;
|
||||
|
||||
enum ath10k_bus {
|
||||
@ -142,6 +146,7 @@ struct ath10k_wmi {
|
||||
enum ath10k_htc_ep_id eid;
|
||||
struct completion service_ready;
|
||||
struct completion unified_ready;
|
||||
struct completion barrier;
|
||||
wait_queue_head_t tx_credits_wq;
|
||||
DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
|
||||
struct wmi_cmd_map *cmd;
|
||||
@ -440,7 +445,7 @@ struct ath10k_debug {
|
||||
struct completion tpc_complete;
|
||||
|
||||
/* protected by conf_mutex */
|
||||
u32 fw_dbglog_mask;
|
||||
u64 fw_dbglog_mask;
|
||||
u32 fw_dbglog_level;
|
||||
u32 pktlog_filter;
|
||||
u32 reg_addr;
|
||||
@ -551,6 +556,13 @@ enum ath10k_fw_features {
|
||||
*/
|
||||
ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
|
||||
|
||||
/* Older firmware with HTT delivers incorrect tx status for null func
|
||||
* frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
|
||||
* Also this workaround results in reporting of incorrect null func
|
||||
* status for 10.4. This flag is used to skip the workaround.
|
||||
*/
|
||||
ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_FW_FEATURE_COUNT,
|
||||
};
|
||||
@ -663,6 +675,15 @@ struct ath10k_fw_file {
|
||||
|
||||
const void *codeswap_data;
|
||||
size_t codeswap_len;
|
||||
|
||||
/* The original idea of struct ath10k_fw_file was that it only
|
||||
* contains struct firmware and pointers to various parts (actual
|
||||
* firmware binary, otp, metadata etc) of the file. This seg_info
|
||||
* is actually created separate but as this is used similarly as
|
||||
* the other firmware components it's more convenient to have it
|
||||
* here.
|
||||
*/
|
||||
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
|
||||
};
|
||||
|
||||
struct ath10k_fw_components {
|
||||
@ -715,53 +736,7 @@ struct ath10k {
|
||||
struct ath10k_htc htc;
|
||||
struct ath10k_htt htt;
|
||||
|
||||
struct ath10k_hw_params {
|
||||
u32 id;
|
||||
u16 dev_id;
|
||||
const char *name;
|
||||
u32 patch_load_addr;
|
||||
int uart_pin;
|
||||
u32 otp_exe_param;
|
||||
|
||||
/* Type of hw cycle counter wraparound logic, for more info
|
||||
* refer enum ath10k_hw_cc_wraparound_type.
|
||||
*/
|
||||
enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
|
||||
|
||||
/* Some of chip expects fragment descriptor to be continuous
|
||||
* memory for any TX operation. Set continuous_frag_desc flag
|
||||
* for the hardware which have such requirement.
|
||||
*/
|
||||
bool continuous_frag_desc;
|
||||
|
||||
/* CCK hardware rate table mapping for the newer chipsets
|
||||
* like QCA99X0, QCA4019 got revised. The CCK h/w rate values
|
||||
* are in a proper order with respect to the rate/preamble
|
||||
*/
|
||||
bool cck_rate_map_rev2;
|
||||
|
||||
u32 channel_counters_freq_hz;
|
||||
|
||||
/* Mgmt tx descriptors threshold for limiting probe response
|
||||
* frames.
|
||||
*/
|
||||
u32 max_probe_resp_desc_thres;
|
||||
|
||||
/* The padding bytes's location is different on various chips */
|
||||
enum ath10k_hw_4addr_pad hw_4addr_pad;
|
||||
|
||||
u32 tx_chain_mask;
|
||||
u32 rx_chain_mask;
|
||||
u32 max_spatial_stream;
|
||||
u32 cal_data_len;
|
||||
|
||||
struct ath10k_hw_params_fw {
|
||||
const char *dir;
|
||||
const char *board;
|
||||
size_t board_size;
|
||||
size_t board_ext_size;
|
||||
} fw;
|
||||
} hw_params;
|
||||
struct ath10k_hw_params hw_params;
|
||||
|
||||
/* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
|
||||
struct ath10k_fw_components normal_mode_fw;
|
||||
@ -774,10 +749,6 @@ struct ath10k {
|
||||
const struct firmware *pre_cal_file;
|
||||
const struct firmware *cal_file;
|
||||
|
||||
struct {
|
||||
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
|
||||
} swap;
|
||||
|
||||
struct {
|
||||
u32 vendor;
|
||||
u32 device;
|
||||
@ -936,6 +907,10 @@ struct ath10k {
|
||||
struct ath10k_thermal thermal;
|
||||
struct ath10k_wow wow;
|
||||
|
||||
/* NAPI */
|
||||
struct net_device napi_dev;
|
||||
struct napi_struct napi;
|
||||
|
||||
/* must be last */
|
||||
u8 drv_priv[0] __aligned(sizeof(void *));
|
||||
};
|
||||
|
@ -1228,9 +1228,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
unsigned int len;
|
||||
char buf[64];
|
||||
char buf[96];
|
||||
|
||||
len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
|
||||
len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
|
||||
ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
@ -1242,15 +1242,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
|
||||
{
|
||||
struct ath10k *ar = file->private_data;
|
||||
int ret;
|
||||
char buf[64];
|
||||
unsigned int log_level, mask;
|
||||
char buf[96];
|
||||
unsigned int log_level;
|
||||
u64 mask;
|
||||
|
||||
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
||||
|
||||
/* make sure that buf is null terminated */
|
||||
buf[sizeof(buf) - 1] = 0;
|
||||
|
||||
ret = sscanf(buf, "%x %u", &mask, &log_level);
|
||||
ret = sscanf(buf, "%llx %u", &mask, &log_level);
|
||||
|
||||
if (!ret)
|
||||
return -EINVAL;
|
||||
|
@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
|
||||
skb_cb = ATH10K_SKB_CB(skb);
|
||||
memset(skb_cb, 0, sizeof(*skb_cb));
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
|
||||
return skb;
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
|
||||
{
|
||||
struct ath10k *ar = ep->htc->ar;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
|
||||
ep->eid, skb);
|
||||
|
||||
ath10k_htc_restore_tx_skb(ep->htc, skb);
|
||||
@ -404,7 +404,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
|
||||
eid, skb);
|
||||
ep->ep_ops.ep_rx_complete(ar, skb);
|
||||
|
||||
|
@ -1665,7 +1665,6 @@ struct ath10k_htt {
|
||||
|
||||
/* This is used to group tx/rx completions separately and process them
|
||||
* in batches to reduce cache stalls */
|
||||
struct tasklet_struct txrx_compl_task;
|
||||
struct sk_buff_head rx_compl_q;
|
||||
struct sk_buff_head rx_in_ord_compl_q;
|
||||
struct sk_buff_head tx_fetch_ind_q;
|
||||
@ -1798,5 +1797,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt,
|
||||
struct sk_buff *msdu);
|
||||
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
|
||||
struct sk_buff *skb);
|
||||
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
|
||||
|
||||
#endif
|
||||
|
@ -34,7 +34,6 @@
|
||||
#define HTT_RX_RING_REFILL_RESCHED_MS 5
|
||||
|
||||
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
|
||||
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
|
||||
@ -226,7 +225,6 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
|
||||
void ath10k_htt_rx_free(struct ath10k_htt *htt)
|
||||
{
|
||||
del_timer_sync(&htt->rx_ring.refill_retry_timer);
|
||||
tasklet_kill(&htt->txrx_compl_task);
|
||||
|
||||
skb_queue_purge(&htt->rx_compl_q);
|
||||
skb_queue_purge(&htt->rx_in_ord_compl_q);
|
||||
@ -520,9 +518,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
||||
skb_queue_head_init(&htt->tx_fetch_ind_q);
|
||||
atomic_set(&htt->num_mpdus_ready, 0);
|
||||
|
||||
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
|
||||
(unsigned long)htt);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
|
||||
htt->rx_ring.size, htt->rx_ring.fill_level);
|
||||
return 0;
|
||||
@ -931,7 +926,7 @@ static void ath10k_process_rx(struct ath10k *ar,
|
||||
*status = *rx_status;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_DATA,
|
||||
"rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
|
||||
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
|
||||
skb,
|
||||
skb->len,
|
||||
ieee80211_get_SA(hdr),
|
||||
@ -958,7 +953,7 @@ static void ath10k_process_rx(struct ath10k *ar,
|
||||
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
|
||||
trace_ath10k_rx_payload(ar, skb->data, skb->len);
|
||||
|
||||
ieee80211_rx(ar->hw, skb);
|
||||
ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
|
||||
}
|
||||
|
||||
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
|
||||
@ -1056,9 +1051,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
|
||||
const u8 first_hdr[64])
|
||||
{
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct htt_rx_desc *rxd;
|
||||
size_t hdr_len;
|
||||
u8 da[ETH_ALEN];
|
||||
u8 sa[ETH_ALEN];
|
||||
int l3_pad_bytes;
|
||||
|
||||
/* Delivered decapped frame:
|
||||
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
|
||||
@ -1072,19 +1069,12 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
|
||||
*/
|
||||
|
||||
/* pull decapped header and copy SA & DA */
|
||||
if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
|
||||
ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
|
||||
/* The QCA99X0 4 address mode pad 2 bytes at the
|
||||
* beginning of MSDU
|
||||
*/
|
||||
hdr = (struct ieee80211_hdr *)(msdu->data + 2);
|
||||
/* The skb length need be extended 2 as the 2 bytes at the tail
|
||||
* be excluded due to the padding
|
||||
*/
|
||||
skb_put(msdu, 2);
|
||||
} else {
|
||||
hdr = (struct ieee80211_hdr *)(msdu->data);
|
||||
}
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
|
||||
l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
|
||||
skb_put(msdu, l3_pad_bytes);
|
||||
|
||||
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
|
||||
|
||||
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
|
||||
ether_addr_copy(da, ieee80211_get_DA(hdr));
|
||||
@ -1151,6 +1141,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
|
||||
void *rfc1042;
|
||||
u8 da[ETH_ALEN];
|
||||
u8 sa[ETH_ALEN];
|
||||
int l3_pad_bytes;
|
||||
struct htt_rx_desc *rxd;
|
||||
|
||||
/* Delivered decapped frame:
|
||||
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
|
||||
@ -1161,6 +1153,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
|
||||
if (WARN_ON_ONCE(!rfc1042))
|
||||
return;
|
||||
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
|
||||
skb_put(msdu, l3_pad_bytes);
|
||||
skb_pull(msdu, l3_pad_bytes);
|
||||
|
||||
/* pull decapped header and copy SA & DA */
|
||||
eth = (struct ethhdr *)msdu->data;
|
||||
ether_addr_copy(da, eth->h_dest);
|
||||
@ -1191,6 +1188,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
|
||||
{
|
||||
struct ieee80211_hdr *hdr;
|
||||
size_t hdr_len;
|
||||
int l3_pad_bytes;
|
||||
struct htt_rx_desc *rxd;
|
||||
|
||||
/* Delivered decapped frame:
|
||||
* [amsdu header] <-- replaced with 802.11 hdr
|
||||
@ -1198,7 +1197,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
|
||||
* [payload]
|
||||
*/
|
||||
|
||||
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
|
||||
|
||||
skb_put(msdu, l3_pad_bytes);
|
||||
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
|
||||
|
||||
hdr = (struct ieee80211_hdr *)first_hdr;
|
||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
@ -1527,7 +1530,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct ieee80211_rx_status *rx_status = &htt->rx_status;
|
||||
struct sk_buff_head amsdu;
|
||||
int ret;
|
||||
int ret, num_msdus;
|
||||
|
||||
__skb_queue_head_init(&amsdu);
|
||||
|
||||
@ -1549,13 +1552,14 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
|
||||
return ret;
|
||||
}
|
||||
|
||||
num_msdus = skb_queue_len(&amsdu);
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
|
||||
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
|
||||
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
|
||||
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
|
||||
|
||||
return 0;
|
||||
return num_msdus;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
|
||||
@ -1579,15 +1583,6 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
|
||||
mpdu_count += mpdu_ranges[i].mpdu_count;
|
||||
|
||||
atomic_add(mpdu_count, &htt->num_mpdus_ready);
|
||||
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
|
||||
{
|
||||
atomic_inc(&htt->num_mpdus_ready);
|
||||
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
|
||||
@ -1772,7 +1767,7 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
|
||||
RX_FLAG_MMIC_STRIPPED;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
||||
static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
@ -1780,6 +1775,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
||||
struct htt_rx_offload_msdu *rx;
|
||||
struct sk_buff *msdu;
|
||||
size_t offset;
|
||||
int num_msdu = 0;
|
||||
|
||||
while ((msdu = __skb_dequeue(list))) {
|
||||
/* Offloaded frames don't have Rx descriptor. Instead they have
|
||||
@ -1819,10 +1815,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
||||
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
|
||||
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
|
||||
ath10k_process_rx(ar, status, msdu);
|
||||
num_msdu++;
|
||||
}
|
||||
return num_msdu;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_resp *resp = (void *)skb->data;
|
||||
@ -1835,12 +1833,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
u8 tid;
|
||||
bool offload;
|
||||
bool frag;
|
||||
int ret;
|
||||
int ret, num_msdus = 0;
|
||||
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
if (htt->rx_confused)
|
||||
return;
|
||||
return -EIO;
|
||||
|
||||
skb_pull(skb, sizeof(resp->hdr));
|
||||
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
|
||||
@ -1859,7 +1857,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
|
||||
ath10k_warn(ar, "dropping invalid in order rx indication\n");
|
||||
return;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
|
||||
@ -1870,14 +1868,14 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
if (ret < 0) {
|
||||
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
|
||||
htt->rx_confused = true;
|
||||
return;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Offloaded frames are very different and need to be handled
|
||||
* separately.
|
||||
*/
|
||||
if (offload)
|
||||
ath10k_htt_rx_h_rx_offload(ar, &list);
|
||||
num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
|
||||
|
||||
while (!skb_queue_empty(&list)) {
|
||||
__skb_queue_head_init(&amsdu);
|
||||
@ -1890,6 +1888,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
* better to report something than nothing though. This
|
||||
* should still give an idea about rx rate to the user.
|
||||
*/
|
||||
num_msdus += skb_queue_len(&amsdu);
|
||||
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
|
||||
ath10k_htt_rx_h_filter(ar, &amsdu, status);
|
||||
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
|
||||
@ -1902,9 +1901,10 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
|
||||
htt->rx_confused = true;
|
||||
__skb_queue_purge(&list);
|
||||
return;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
return num_msdus;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
|
||||
@ -2267,7 +2267,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
|
||||
ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_SEC_IND: {
|
||||
struct ath10k *ar = htt->ar;
|
||||
@ -2284,7 +2283,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
|
||||
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
|
||||
skb->data, skb->len);
|
||||
ath10k_htt_rx_frag_handler(htt);
|
||||
atomic_inc(&htt->num_mpdus_ready);
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TEST:
|
||||
@ -2320,8 +2319,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
|
||||
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
|
||||
return false;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
|
||||
@ -2347,7 +2345,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
|
||||
@ -2376,27 +2373,77 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
|
||||
|
||||
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
||||
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
|
||||
{
|
||||
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_tx_done tx_done = {};
|
||||
struct sk_buff_head rx_ind_q;
|
||||
struct sk_buff_head tx_ind_q;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
int num_mpdus;
|
||||
int quota = 0, done, num_rx_msdus;
|
||||
bool resched_napi = false;
|
||||
|
||||
__skb_queue_head_init(&rx_ind_q);
|
||||
__skb_queue_head_init(&tx_ind_q);
|
||||
|
||||
spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
|
||||
skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
|
||||
spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
|
||||
/* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
|
||||
* process it first to utilize full available quota.
|
||||
*/
|
||||
while (quota < budget) {
|
||||
if (skb_queue_empty(&htt->rx_in_ord_compl_q))
|
||||
break;
|
||||
|
||||
spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
|
||||
skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
|
||||
spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
|
||||
skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
|
||||
if (!skb) {
|
||||
resched_napi = true;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
if (num_rx_msdus < 0) {
|
||||
resched_napi = true;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
if (num_rx_msdus > 0)
|
||||
quota += num_rx_msdus;
|
||||
|
||||
if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
|
||||
!skb_queue_empty(&htt->rx_in_ord_compl_q)) {
|
||||
resched_napi = true;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
while (quota < budget) {
|
||||
/* no more data to receive */
|
||||
if (!atomic_read(&htt->num_mpdus_ready))
|
||||
break;
|
||||
|
||||
num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
|
||||
if (num_rx_msdus < 0) {
|
||||
resched_napi = true;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
quota += num_rx_msdus;
|
||||
atomic_dec(&htt->num_mpdus_ready);
|
||||
if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
|
||||
atomic_read(&htt->num_mpdus_ready)) {
|
||||
resched_napi = true;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
/* From NAPI documentation:
|
||||
* The napi poll() function may also process TX completions, in which
|
||||
* case if it processes the entire TX ring then it should count that
|
||||
* work as the rest of the budget.
|
||||
*/
|
||||
if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
|
||||
quota = budget;
|
||||
|
||||
/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
|
||||
* From kfifo_get() documentation:
|
||||
@ -2406,27 +2453,24 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
||||
while (kfifo_get(&htt->txdone_fifo, &tx_done))
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
|
||||
ath10k_mac_tx_push_pending(ar);
|
||||
|
||||
spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
|
||||
skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
|
||||
spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
|
||||
|
||||
while ((skb = __skb_dequeue(&tx_ind_q))) {
|
||||
ath10k_htt_rx_tx_fetch_ind(ar, skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
num_mpdus = atomic_read(&htt->num_mpdus_ready);
|
||||
|
||||
while (num_mpdus) {
|
||||
if (ath10k_htt_rx_handle_amsdu(htt))
|
||||
break;
|
||||
|
||||
num_mpdus--;
|
||||
atomic_dec(&htt->num_mpdus_ready);
|
||||
}
|
||||
|
||||
while ((skb = __skb_dequeue(&rx_ind_q))) {
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
ath10k_htt_rx_in_ord_ind(ar, skb);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
exit:
|
||||
ath10k_htt_rx_msdu_buff_replenish(htt);
|
||||
/* In case of rx failure or more data to read, report budget
|
||||
* to reschedule NAPI poll
|
||||
*/
|
||||
done = resched_napi ? budget : quota;
|
||||
|
||||
return done;
|
||||
}
|
||||
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
|
||||
|
@ -390,8 +390,6 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
|
||||
{
|
||||
int size;
|
||||
|
||||
tasklet_kill(&htt->txrx_compl_task);
|
||||
|
||||
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
|
||||
idr_destroy(&htt->pending_tx);
|
||||
|
||||
|
@ -219,3 +219,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
|
||||
survey->time = CCNT_TO_MSEC(ar, cc);
|
||||
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
|
||||
}
|
||||
|
||||
const struct ath10k_hw_ops qca988x_ops = {
|
||||
};
|
||||
|
||||
static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
|
||||
{
|
||||
return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
|
||||
RX_MSDU_END_INFO1_L3_HDR_PAD);
|
||||
}
|
||||
|
||||
const struct ath10k_hw_ops qca99x0_ops = {
|
||||
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
|
||||
};
|
||||
|
@ -338,11 +338,6 @@ enum ath10k_hw_rate_rev2_cck {
|
||||
ATH10K_HW_RATE_REV2_CCK_SP_11M,
|
||||
};
|
||||
|
||||
enum ath10k_hw_4addr_pad {
|
||||
ATH10K_HW_4ADDR_PAD_AFTER,
|
||||
ATH10K_HW_4ADDR_PAD_BEFORE,
|
||||
};
|
||||
|
||||
enum ath10k_hw_cc_wraparound_type {
|
||||
ATH10K_HW_CC_WRAP_DISABLED = 0,
|
||||
|
||||
@ -363,6 +358,77 @@ enum ath10k_hw_cc_wraparound_type {
|
||||
ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
|
||||
};
|
||||
|
||||
struct ath10k_hw_params {
|
||||
u32 id;
|
||||
u16 dev_id;
|
||||
const char *name;
|
||||
u32 patch_load_addr;
|
||||
int uart_pin;
|
||||
u32 otp_exe_param;
|
||||
|
||||
/* Type of hw cycle counter wraparound logic, for more info
|
||||
* refer enum ath10k_hw_cc_wraparound_type.
|
||||
*/
|
||||
enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
|
||||
|
||||
/* Some of chip expects fragment descriptor to be continuous
|
||||
* memory for any TX operation. Set continuous_frag_desc flag
|
||||
* for the hardware which have such requirement.
|
||||
*/
|
||||
bool continuous_frag_desc;
|
||||
|
||||
/* CCK hardware rate table mapping for the newer chipsets
|
||||
* like QCA99X0, QCA4019 got revised. The CCK h/w rate values
|
||||
* are in a proper order with respect to the rate/preamble
|
||||
*/
|
||||
bool cck_rate_map_rev2;
|
||||
|
||||
u32 channel_counters_freq_hz;
|
||||
|
||||
/* Mgmt tx descriptors threshold for limiting probe response
|
||||
* frames.
|
||||
*/
|
||||
u32 max_probe_resp_desc_thres;
|
||||
|
||||
u32 tx_chain_mask;
|
||||
u32 rx_chain_mask;
|
||||
u32 max_spatial_stream;
|
||||
u32 cal_data_len;
|
||||
|
||||
struct ath10k_hw_params_fw {
|
||||
const char *dir;
|
||||
const char *board;
|
||||
size_t board_size;
|
||||
size_t board_ext_size;
|
||||
} fw;
|
||||
|
||||
/* qca99x0 family chips deliver broadcast/multicast management
|
||||
* frames encrypted and expect software do decryption.
|
||||
*/
|
||||
bool sw_decrypt_mcast_mgmt;
|
||||
|
||||
const struct ath10k_hw_ops *hw_ops;
|
||||
};
|
||||
|
||||
struct htt_rx_desc;
|
||||
|
||||
/* Defines needed for Rx descriptor abstraction */
|
||||
struct ath10k_hw_ops {
|
||||
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
|
||||
};
|
||||
|
||||
extern const struct ath10k_hw_ops qca988x_ops;
|
||||
extern const struct ath10k_hw_ops qca99x0_ops;
|
||||
|
||||
static inline int
|
||||
ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
|
||||
struct htt_rx_desc *rxd)
|
||||
{
|
||||
if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
|
||||
return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Target specific defines for MAIN firmware */
|
||||
#define TARGET_NUM_VDEVS 8
|
||||
#define TARGET_NUM_PEER_AST 2
|
||||
|
@ -824,7 +824,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
|
||||
if (ar->peer_map[i] == peer) {
|
||||
ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
|
||||
ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
|
||||
peer->addr, peer, i);
|
||||
ar->peer_map[i] = NULL;
|
||||
}
|
||||
@ -3255,6 +3255,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
|
||||
if (ar->htt.target_version_major < 3 &&
|
||||
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
|
||||
!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
|
||||
ar->running_fw->fw_file.fw_features) &&
|
||||
!test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
|
||||
ar->running_fw->fw_file.fw_features))
|
||||
return ATH10K_HW_TXRX_MGMT;
|
||||
|
||||
@ -3524,7 +3526,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
|
||||
if (!ath10k_mac_tx_frm_has_freq(ar)) {
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
|
||||
skb);
|
||||
|
||||
skb_queue_tail(&ar->offchan_tx_queue, skb);
|
||||
@ -3586,7 +3588,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
|
||||
skb);
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
@ -3643,7 +3645,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
|
||||
time_left =
|
||||
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
|
||||
if (time_left == 0)
|
||||
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
|
||||
ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
|
||||
skb);
|
||||
|
||||
if (!peer && tmp_peer_created) {
|
||||
@ -3777,7 +3779,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
|
||||
enum ath10k_hw_txrx_mode txmode;
|
||||
enum ath10k_mac_tx_path txpath;
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_hdr *hdr;
|
||||
size_t skb_len;
|
||||
bool is_mgmt, is_presp;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&ar->htt.tx_lock);
|
||||
@ -3801,6 +3805,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
|
||||
skb_len = skb->len;
|
||||
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
|
||||
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
|
||||
is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
|
||||
|
||||
if (is_mgmt) {
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
is_presp = ieee80211_is_probe_resp(hdr->frame_control);
|
||||
|
||||
spin_lock_bh(&ar->htt.tx_lock);
|
||||
ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
|
||||
|
||||
if (ret) {
|
||||
ath10k_htt_tx_dec_pending(htt);
|
||||
spin_unlock_bh(&ar->htt.tx_lock);
|
||||
return ret;
|
||||
}
|
||||
spin_unlock_bh(&ar->htt.tx_lock);
|
||||
}
|
||||
|
||||
ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
|
||||
if (unlikely(ret)) {
|
||||
@ -3808,6 +3828,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
|
||||
|
||||
spin_lock_bh(&ar->htt.tx_lock);
|
||||
ath10k_htt_tx_dec_pending(htt);
|
||||
if (is_mgmt)
|
||||
ath10k_htt_tx_mgmt_dec_pending(htt);
|
||||
spin_unlock_bh(&ar->htt.tx_lock);
|
||||
|
||||
return ret;
|
||||
@ -3894,7 +3916,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
|
||||
ar->scan.roc_freq = 0;
|
||||
ath10k_offchan_tx_purge(ar);
|
||||
cancel_delayed_work(&ar->scan.timeout);
|
||||
complete_all(&ar->scan.completed);
|
||||
complete(&ar->scan.completed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -4100,13 +4122,29 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
|
||||
{
|
||||
struct ath10k *ar = hw->priv;
|
||||
struct ath10k_txq *artxq = (void *)txq->drv_priv;
|
||||
struct ieee80211_txq *f_txq;
|
||||
struct ath10k_txq *f_artxq;
|
||||
int ret = 0;
|
||||
int max = 16;
|
||||
|
||||
spin_lock_bh(&ar->txqs_lock);
|
||||
if (list_empty(&artxq->list))
|
||||
list_add_tail(&artxq->list, &ar->txqs);
|
||||
|
||||
f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
|
||||
f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
|
||||
list_del_init(&f_artxq->list);
|
||||
|
||||
while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
|
||||
ret = ath10k_mac_tx_push_txq(hw, f_txq);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
if (ret != -ENOENT)
|
||||
list_add_tail(&f_artxq->list, &ar->txqs);
|
||||
spin_unlock_bh(&ar->txqs_lock);
|
||||
|
||||
ath10k_mac_tx_push_pending(ar);
|
||||
ath10k_htt_tx_txq_update(hw, f_txq);
|
||||
ath10k_htt_tx_txq_update(hw, txq);
|
||||
}
|
||||
|
||||
@ -5186,7 +5224,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
|
||||
|
||||
ret = ath10k_monitor_recalc(ar);
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
|
||||
ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
@ -5984,8 +6022,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
||||
* Existing station deletion.
|
||||
*/
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
"mac vdev %d peer delete %pM (sta gone)\n",
|
||||
arvif->vdev_id, sta->addr);
|
||||
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
|
||||
arvif->vdev_id, sta->addr, sta);
|
||||
|
||||
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
|
||||
if (ret)
|
||||
@ -6001,7 +6039,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
||||
continue;
|
||||
|
||||
if (peer->sta == sta) {
|
||||
ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
|
||||
ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
|
||||
sta->addr, peer, i, arvif->vdev_id);
|
||||
peer->sta = NULL;
|
||||
|
||||
@ -6538,7 +6576,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ath10k_mac_update_bss_chan_survey(ar, survey->channel);
|
||||
ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
memcpy(survey, ar_survey, sizeof(*survey));
|
||||
@ -7134,7 +7172,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
|
||||
struct ath10k *ar = hw->priv;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
"mac chanctx add freq %hu width %d ptr %p\n",
|
||||
"mac chanctx add freq %hu width %d ptr %pK\n",
|
||||
ctx->def.chan->center_freq, ctx->def.width, ctx);
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
@ -7158,7 +7196,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
|
||||
struct ath10k *ar = hw->priv;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
"mac chanctx remove freq %hu width %d ptr %p\n",
|
||||
"mac chanctx remove freq %hu width %d ptr %pK\n",
|
||||
ctx->def.chan->center_freq, ctx->def.width, ctx);
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
@ -7223,7 +7261,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
"mac chanctx change freq %hu width %d ptr %p changed %x\n",
|
||||
"mac chanctx change freq %hu width %d ptr %pK changed %x\n",
|
||||
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
|
||||
|
||||
/* This shouldn't really happen because channel switching should use
|
||||
@ -7281,7 +7319,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
"mac chanctx assign ptr %p vdev_id %i\n",
|
||||
"mac chanctx assign ptr %pK vdev_id %i\n",
|
||||
ctx, arvif->vdev_id);
|
||||
|
||||
if (WARN_ON(arvif->is_started)) {
|
||||
@ -7342,7 +7380,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC,
|
||||
"mac chanctx unassign ptr %p vdev_id %i\n",
|
||||
"mac chanctx unassign ptr %pK vdev_id %i\n",
|
||||
ctx, arvif->vdev_id);
|
||||
|
||||
WARN_ON(!arvif->is_started);
|
||||
|
@ -1506,12 +1506,10 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
||||
ath10k_ce_per_engine_service(ar, pipe);
|
||||
}
|
||||
|
||||
void ath10k_pci_kill_tasklet(struct ath10k *ar)
|
||||
static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
tasklet_kill(&ar_pci->intr_tq);
|
||||
|
||||
del_timer_sync(&ar_pci->rx_post_retry);
|
||||
}
|
||||
|
||||
@ -1570,7 +1568,7 @@ void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
|
||||
ul_pipe, dl_pipe);
|
||||
}
|
||||
|
||||
static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
|
||||
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
@ -1693,14 +1691,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
||||
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
||||
{
|
||||
struct ath10k *ar;
|
||||
struct ath10k_pci *ar_pci;
|
||||
struct ath10k_ce_pipe *ce_pipe;
|
||||
struct ath10k_ce_ring *ce_ring;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
|
||||
ar = pci_pipe->hif_ce_state;
|
||||
ar_pci = ath10k_pci_priv(ar);
|
||||
ce_pipe = pci_pipe->ce_hdl;
|
||||
ce_ring = ce_pipe->src_ring;
|
||||
|
||||
@ -1753,7 +1749,7 @@ void ath10k_pci_ce_deinit(struct ath10k *ar)
|
||||
|
||||
void ath10k_pci_flush(struct ath10k *ar)
|
||||
{
|
||||
ath10k_pci_kill_tasklet(ar);
|
||||
ath10k_pci_rx_retry_sync(ar);
|
||||
ath10k_pci_buffer_cleanup(ar);
|
||||
}
|
||||
|
||||
@ -1780,6 +1776,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
|
||||
ath10k_pci_irq_disable(ar);
|
||||
ath10k_pci_irq_sync(ar);
|
||||
ath10k_pci_flush(ar);
|
||||
napi_synchronize(&ar->napi);
|
||||
napi_disable(&ar->napi);
|
||||
|
||||
spin_lock_irqsave(&ar_pci->ps_lock, flags);
|
||||
WARN_ON(ar_pci->ps_wake_refcount > 0);
|
||||
@ -2533,6 +2531,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
||||
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
|
||||
goto err_ce;
|
||||
}
|
||||
napi_enable(&ar->napi);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2725,7 +2724,7 @@ static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(data);
|
||||
kfree(caldata);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2772,35 +2771,53 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
|
||||
if (!ath10k_pci_irq_pending(ar))
|
||||
if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
|
||||
!ath10k_pci_irq_pending(ar))
|
||||
return IRQ_NONE;
|
||||
|
||||
ath10k_pci_disable_and_clear_legacy_irq(ar);
|
||||
}
|
||||
|
||||
tasklet_schedule(&ar_pci->intr_tq);
|
||||
ath10k_pci_irq_msi_fw_mask(ar);
|
||||
napi_schedule(&ar->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void ath10k_pci_tasklet(unsigned long data)
|
||||
static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
|
||||
{
|
||||
struct ath10k *ar = (struct ath10k *)data;
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
|
||||
int done = 0;
|
||||
|
||||
if (ath10k_pci_has_fw_crashed(ar)) {
|
||||
ath10k_pci_irq_disable(ar);
|
||||
ath10k_pci_fw_crashed_clear(ar);
|
||||
ath10k_pci_fw_crashed_dump(ar);
|
||||
return;
|
||||
napi_complete(ctx);
|
||||
return done;
|
||||
}
|
||||
|
||||
ath10k_ce_per_engine_service_any(ar);
|
||||
|
||||
/* Re-enable legacy irq that was disabled in the irq handler */
|
||||
if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
|
||||
done = ath10k_htt_txrx_compl_task(ar, budget);
|
||||
|
||||
if (done < budget) {
|
||||
napi_complete(ctx);
|
||||
/* In case of MSI, it is possible that interrupts are received
|
||||
* while NAPI poll is inprogress. So pending interrupts that are
|
||||
* received after processing all copy engine pipes by NAPI poll
|
||||
* will not be handled again. This is causing failure to
|
||||
* complete boot sequence in x86 platform. So before enabling
|
||||
* interrupts safer to check for pending interrupts for
|
||||
* immediate servicing.
|
||||
*/
|
||||
if (CE_INTERRUPT_SUMMARY(ar)) {
|
||||
napi_reschedule(ctx);
|
||||
goto out;
|
||||
}
|
||||
ath10k_pci_enable_legacy_irq(ar);
|
||||
ath10k_pci_irq_msi_fw_unmask(ar);
|
||||
}
|
||||
|
||||
out:
|
||||
return done;
|
||||
}
|
||||
|
||||
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
|
||||
@ -2858,11 +2875,10 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
|
||||
free_irq(ar_pci->pdev->irq, ar);
|
||||
}
|
||||
|
||||
void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
|
||||
void ath10k_pci_init_napi(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
|
||||
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
|
||||
ATH10K_NAPI_BUDGET);
|
||||
}
|
||||
|
||||
static int ath10k_pci_init_irq(struct ath10k *ar)
|
||||
@ -2870,7 +2886,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
int ret;
|
||||
|
||||
ath10k_pci_init_irq_tasklets(ar);
|
||||
ath10k_pci_init_napi(ar);
|
||||
|
||||
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
|
||||
ath10k_info(ar, "limiting irq mode to: %d\n",
|
||||
@ -3062,7 +3078,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
|
||||
goto err_master;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
|
||||
return 0;
|
||||
|
||||
err_master:
|
||||
@ -3131,7 +3147,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
|
||||
|
||||
void ath10k_pci_release_resource(struct ath10k *ar)
|
||||
{
|
||||
ath10k_pci_kill_tasklet(ar);
|
||||
ath10k_pci_rx_retry_sync(ar);
|
||||
netif_napi_del(&ar->napi);
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
ath10k_pci_free_pipes(ar);
|
||||
}
|
||||
@ -3297,7 +3314,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
err_free_irq:
|
||||
ath10k_pci_free_irq(ar);
|
||||
ath10k_pci_kill_tasklet(ar);
|
||||
ath10k_pci_rx_retry_sync(ar);
|
||||
|
||||
err_deinit_irq:
|
||||
ath10k_pci_deinit_irq(ar);
|
||||
|
@ -177,8 +177,6 @@ struct ath10k_pci {
|
||||
/* Operating interrupt mode */
|
||||
enum ath10k_pci_irq_mode oper_irq_mode;
|
||||
|
||||
struct tasklet_struct intr_tq;
|
||||
|
||||
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
|
||||
|
||||
/* Copy Engine used for Diagnostic Accesses */
|
||||
@ -294,8 +292,7 @@ void ath10k_pci_free_pipes(struct ath10k *ar);
|
||||
void ath10k_pci_free_pipes(struct ath10k *ar);
|
||||
void ath10k_pci_rx_replenish_retry(unsigned long ptr);
|
||||
void ath10k_pci_ce_deinit(struct ath10k *ar);
|
||||
void ath10k_pci_init_irq_tasklets(struct ath10k *ar);
|
||||
void ath10k_pci_kill_tasklet(struct ath10k *ar);
|
||||
void ath10k_pci_init_napi(struct ath10k *ar);
|
||||
int ath10k_pci_init_pipes(struct ath10k *ar);
|
||||
int ath10k_pci_init_config(struct ath10k *ar);
|
||||
void ath10k_pci_rx_post(struct ath10k *ar);
|
||||
@ -303,6 +300,7 @@ void ath10k_pci_flush(struct ath10k *ar);
|
||||
void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
|
||||
bool ath10k_pci_irq_pending(struct ath10k *ar);
|
||||
void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
|
||||
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
|
||||
int ath10k_pci_wait_for_target_init(struct ath10k *ar);
|
||||
int ath10k_pci_setup_resource(struct ath10k *ar);
|
||||
void ath10k_pci_release_resource(struct ath10k *ar);
|
||||
|
@ -134,17 +134,18 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
|
||||
return seg_info;
|
||||
}
|
||||
|
||||
int ath10k_swap_code_seg_configure(struct ath10k *ar)
|
||||
int ath10k_swap_code_seg_configure(struct ath10k *ar,
|
||||
const struct ath10k_fw_file *fw_file)
|
||||
{
|
||||
int ret;
|
||||
struct ath10k_swap_code_seg_info *seg_info = NULL;
|
||||
|
||||
if (!ar->swap.firmware_swap_code_seg_info)
|
||||
if (!fw_file->firmware_swap_code_seg_info)
|
||||
return 0;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
|
||||
|
||||
seg_info = ar->swap.firmware_swap_code_seg_info;
|
||||
seg_info = fw_file->firmware_swap_code_seg_info;
|
||||
|
||||
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
|
||||
&seg_info->seg_hw_info,
|
||||
@ -158,28 +159,29 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath10k_swap_code_seg_release(struct ath10k *ar)
|
||||
void ath10k_swap_code_seg_release(struct ath10k *ar,
|
||||
struct ath10k_fw_file *fw_file)
|
||||
{
|
||||
ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
|
||||
ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
|
||||
|
||||
/* FIXME: these two assignments look to bein wrong place! Shouldn't
|
||||
* they be in ath10k_core_free_firmware_files() like the rest?
|
||||
*/
|
||||
ar->normal_mode_fw.fw_file.codeswap_data = NULL;
|
||||
ar->normal_mode_fw.fw_file.codeswap_len = 0;
|
||||
fw_file->codeswap_data = NULL;
|
||||
fw_file->codeswap_len = 0;
|
||||
|
||||
ar->swap.firmware_swap_code_seg_info = NULL;
|
||||
fw_file->firmware_swap_code_seg_info = NULL;
|
||||
}
|
||||
|
||||
int ath10k_swap_code_seg_init(struct ath10k *ar)
|
||||
int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
|
||||
{
|
||||
int ret;
|
||||
struct ath10k_swap_code_seg_info *seg_info;
|
||||
const void *codeswap_data;
|
||||
size_t codeswap_len;
|
||||
|
||||
codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
|
||||
codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
|
||||
codeswap_data = fw_file->codeswap_data;
|
||||
codeswap_len = fw_file->codeswap_len;
|
||||
|
||||
if (!codeswap_len || !codeswap_data)
|
||||
return 0;
|
||||
@ -200,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ar->swap.firmware_swap_code_seg_info = seg_info;
|
||||
fw_file->firmware_swap_code_seg_info = seg_info;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -23,6 +23,8 @@
|
||||
/* Currently only one swap segment is supported */
|
||||
#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
|
||||
|
||||
struct ath10k_fw_file;
|
||||
|
||||
struct ath10k_swap_code_seg_tlv {
|
||||
__le32 address;
|
||||
__le32 length;
|
||||
@ -58,8 +60,11 @@ struct ath10k_swap_code_seg_info {
|
||||
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
|
||||
};
|
||||
|
||||
int ath10k_swap_code_seg_configure(struct ath10k *ar);
|
||||
void ath10k_swap_code_seg_release(struct ath10k *ar);
|
||||
int ath10k_swap_code_seg_init(struct ath10k *ar);
|
||||
int ath10k_swap_code_seg_configure(struct ath10k *ar,
|
||||
const struct ath10k_fw_file *fw_file);
|
||||
void ath10k_swap_code_seg_release(struct ath10k *ar,
|
||||
struct ath10k_fw_file *fw_file);
|
||||
int ath10k_swap_code_seg_init(struct ath10k *ar,
|
||||
struct ath10k_fw_file *fw_file);
|
||||
|
||||
#endif
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "wmi.h"
|
||||
#include "hif.h"
|
||||
#include "hw.h"
|
||||
#include "core.h"
|
||||
|
||||
#include "testmode_i.h"
|
||||
|
||||
@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
|
||||
"testmode event wmi cmd_id %d skb %p skb->len %d\n",
|
||||
"testmode event wmi cmd_id %d skb %pK skb->len %d\n",
|
||||
cmd_id, skb, skb->len);
|
||||
|
||||
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
|
||||
@ -240,6 +241,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
|
||||
ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
|
||||
ret = ath10k_swap_code_seg_init(ar,
|
||||
&ar->testmode.utf_mode_fw.fw_file);
|
||||
if (ret) {
|
||||
ath10k_warn(ar,
|
||||
"failed to init utf code swap segment: %d\n",
|
||||
ret);
|
||||
goto err_release_utf_mode_fw;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
ar->testmode.utf_monitor = true;
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
@ -279,6 +292,11 @@ err_power_down:
|
||||
ath10k_hif_power_down(ar);
|
||||
|
||||
err_release_utf_mode_fw:
|
||||
if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
|
||||
ar->testmode.utf_mode_fw.fw_file.codeswap_len)
|
||||
ath10k_swap_code_seg_release(ar,
|
||||
&ar->testmode.utf_mode_fw.fw_file);
|
||||
|
||||
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
|
||||
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
|
||||
|
||||
@ -301,6 +319,11 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
|
||||
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
|
||||
ar->testmode.utf_mode_fw.fw_file.codeswap_len)
|
||||
ath10k_swap_code_seg_release(ar,
|
||||
&ar->testmode.utf_mode_fw.fw_file);
|
||||
|
||||
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
|
||||
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
|
||||
|
||||
@ -360,7 +383,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
|
||||
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
|
||||
"testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
|
||||
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
|
||||
cmd_id, buf, buf_len);
|
||||
|
||||
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
|
||||
|
@ -192,7 +192,7 @@ int ath10k_thermal_register(struct ath10k *ar)
|
||||
|
||||
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
|
||||
* guess linux/hwmon.h is missing proper stubs. */
|
||||
if (!config_enabled(CONFIG_HWMON))
|
||||
if (!IS_REACHABLE(CONFIG_HWMON))
|
||||
return 0;
|
||||
|
||||
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
|
||||
|
@ -44,7 +44,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
|
||||
complete(&ar->offchan_tx_completed);
|
||||
ar->offchan_tx_skb = NULL; /* just for sanity */
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
|
||||
out:
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
ieee80211_tx_status(htt->ar->hw, msdu);
|
||||
/* we do not own the msdu anymore */
|
||||
|
||||
ath10k_mac_tx_push_pending(ar);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,8 @@ struct wmi_ops {
|
||||
struct wmi_roam_ev_arg *arg);
|
||||
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_wow_ev_arg *arg);
|
||||
int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_echo_ev_arg *arg);
|
||||
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
|
||||
|
||||
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
|
||||
@ -123,7 +125,7 @@ struct wmi_ops {
|
||||
enum wmi_force_fw_hang_type type,
|
||||
u32 delay_ms);
|
||||
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
|
||||
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
|
||||
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
|
||||
u32 log_level);
|
||||
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
|
||||
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
|
||||
@ -194,6 +196,7 @@ struct wmi_ops {
|
||||
struct sk_buff *(*gen_pdev_bss_chan_info_req)
|
||||
(struct ath10k *ar,
|
||||
enum wmi_bss_survey_req_type type);
|
||||
struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
|
||||
};
|
||||
|
||||
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
|
||||
@ -349,6 +352,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
|
||||
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_echo_ev_arg *arg)
|
||||
{
|
||||
if (!ar->wmi.ops->pull_echo_ev)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
|
||||
}
|
||||
|
||||
static inline enum wmi_txbf_conf
|
||||
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
|
||||
{
|
||||
@ -932,7 +945,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
|
||||
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -1382,4 +1395,20 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
|
||||
wmi->cmd->pdev_bss_chan_info_request_cmdid);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_echo(struct ath10k *ar, u32 value)
|
||||
{
|
||||
struct ath10k_wmi *wmi = &ar->wmi;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!wmi->ops->gen_echo)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
skb = wmi->ops->gen_echo(ar, value);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
|
||||
return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1223,6 +1223,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
|
||||
struct sk_buff *skb,
|
||||
struct wmi_echo_ev_arg *arg)
|
||||
{
|
||||
const void **tb;
|
||||
const struct wmi_echo_event *ev;
|
||||
int ret;
|
||||
|
||||
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
||||
if (IS_ERR(tb)) {
|
||||
ret = PTR_ERR(tb);
|
||||
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
|
||||
if (!ev) {
|
||||
kfree(tb);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
arg->value = ev->value;
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
|
||||
{
|
||||
@ -2441,7 +2468,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
||||
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
|
||||
u32 log_level) {
|
||||
struct wmi_tlv_dbglog_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
@ -3081,6 +3108,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
|
||||
{
|
||||
struct wmi_echo_cmd *cmd;
|
||||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
void *ptr;
|
||||
size_t len;
|
||||
|
||||
len = sizeof(*tlv) + sizeof(*cmd);
|
||||
skb = ath10k_wmi_alloc_skb(ar, len);
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ptr = (void *)skb->data;
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
|
||||
tlv->len = __cpu_to_le16(sizeof(*cmd));
|
||||
cmd = (void *)tlv->value;
|
||||
cmd->value = cpu_to_le32(value);
|
||||
|
||||
ptr += sizeof(*tlv);
|
||||
ptr += sizeof(*cmd);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/****************/
|
||||
/* TLV mappings */
|
||||
/****************/
|
||||
@ -3429,6 +3484,7 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
|
||||
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
|
||||
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
|
||||
.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
|
||||
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
|
||||
@ -3485,6 +3541,7 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
|
||||
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
|
||||
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
|
||||
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
|
||||
};
|
||||
|
||||
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
|
||||
|
@ -29,6 +29,9 @@
|
||||
#include "p2p.h"
|
||||
#include "hw.h"
|
||||
|
||||
#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
|
||||
#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
|
||||
|
||||
/* MAIN WMI cmd track */
|
||||
static struct wmi_cmd_map wmi_cmd_map = {
|
||||
.init_cmdid = WMI_INIT_CMDID,
|
||||
@ -1874,7 +1877,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
||||
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
|
||||
memcpy(cmd->buf, msdu->data, msdu->len);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
|
||||
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
|
||||
fc & IEEE80211_FCTL_STYPE);
|
||||
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
|
||||
@ -2240,6 +2243,29 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
|
||||
struct ieee80211_hdr *hdr)
|
||||
{
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
return false;
|
||||
|
||||
/* FW delivers WEP Shared Auth frame with Protected Bit set and
|
||||
* encrypted payload. However in case of PMF it delivers decrypted
|
||||
* frames with Protected Bit set.
|
||||
*/
|
||||
if (ieee80211_is_auth(hdr->frame_control))
|
||||
return false;
|
||||
|
||||
/* qca99x0 based FW delivers broadcast or multicast management frames
|
||||
* (ex: group privacy action frames in mesh) as encrypted payload.
|
||||
*/
|
||||
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
|
||||
ar->hw_params.sw_decrypt_mcast_mgmt)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct wmi_mgmt_rx_ev_arg arg = {};
|
||||
@ -2326,11 +2352,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
ath10k_wmi_handle_wep_reauth(ar, skb, status);
|
||||
|
||||
/* FW delivers WEP Shared Auth frame with Protected Bit set and
|
||||
* encrypted payload. However in case of PMF it delivers decrypted
|
||||
* frames with Protected Bit set. */
|
||||
if (ieee80211_has_protected(hdr->frame_control) &&
|
||||
!ieee80211_is_auth(hdr->frame_control)) {
|
||||
if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
|
||||
status->flag |= RX_FLAG_DECRYPTED;
|
||||
|
||||
if (!ieee80211_is_action(hdr->frame_control) &&
|
||||
@ -2347,7 +2369,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
ath10k_mac_handle_beacon(ar, skb);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
||||
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
|
||||
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
|
||||
skb, skb->len,
|
||||
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
|
||||
|
||||
@ -2495,7 +2517,21 @@ exit:
|
||||
|
||||
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
|
||||
struct wmi_echo_ev_arg arg = {};
|
||||
int ret;
|
||||
|
||||
ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to parse echo: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi event echo value 0x%08x\n",
|
||||
le32_to_cpu(arg.value));
|
||||
|
||||
if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
|
||||
complete(&ar->wmi.barrier);
|
||||
}
|
||||
|
||||
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
|
||||
@ -3527,7 +3563,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
||||
ath10k_warn(ar, "failed to map beacon: %d\n",
|
||||
ret);
|
||||
dev_kfree_skb_any(bcn);
|
||||
ret = -EIO;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
@ -4792,6 +4827,17 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
|
||||
struct sk_buff *skb,
|
||||
struct wmi_echo_ev_arg *arg)
|
||||
{
|
||||
struct wmi_echo_event *ev = (void *)skb->data;
|
||||
|
||||
arg->value = ev->value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct wmi_rdy_ev_arg arg = {};
|
||||
@ -5124,6 +5170,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct wmi_cmd_hdr *cmd_hdr;
|
||||
enum wmi_10_2_event_id id;
|
||||
bool consumed;
|
||||
|
||||
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
|
||||
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
|
||||
@ -5133,6 +5180,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
|
||||
|
||||
consumed = ath10k_tm_event_wmi(ar, id, skb);
|
||||
|
||||
/* Ready event must be handled normally also in UTF mode so that we
|
||||
* know the UTF firmware has booted, others we are just bypass WMI
|
||||
* events to testmode.
|
||||
*/
|
||||
if (consumed && id != WMI_10_2_READY_EVENTID) {
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi testmode consumed 0x%x\n", id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (id) {
|
||||
case WMI_10_2_MGMT_RX_EVENTID:
|
||||
ath10k_wmi_event_mgmt_rx(ar, skb);
|
||||
@ -5248,6 +5307,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct wmi_cmd_hdr *cmd_hdr;
|
||||
enum wmi_10_4_event_id id;
|
||||
bool consumed;
|
||||
|
||||
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
|
||||
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
|
||||
@ -5257,6 +5317,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
|
||||
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
|
||||
|
||||
consumed = ath10k_tm_event_wmi(ar, id, skb);
|
||||
|
||||
/* Ready event must be handled normally also in UTF mode so that we
|
||||
* know the UTF firmware has booted, others we are just bypass WMI
|
||||
* events to testmode.
|
||||
*/
|
||||
if (consumed && id != WMI_10_4_READY_EVENTID) {
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi testmode consumed 0x%x\n", id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (id) {
|
||||
case WMI_10_4_MGMT_RX_EVENTID:
|
||||
ath10k_wmi_event_mgmt_rx(ar, skb);
|
||||
@ -5306,6 +5378,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
|
||||
case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
|
||||
case WMI_10_4_WDS_PEER_EVENTID:
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"received event id %d not implemented\n", id);
|
||||
break;
|
||||
@ -6863,7 +6936,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
||||
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
|
||||
u32 log_level)
|
||||
{
|
||||
struct wmi_dbglog_cfg_cmd *cmd;
|
||||
@ -6900,6 +6973,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
|
||||
u32 log_level)
|
||||
{
|
||||
struct wmi_10_4_dbglog_cfg_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
u32 cfg;
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
|
||||
|
||||
if (module_enable) {
|
||||
cfg = SM(log_level,
|
||||
ATH10K_DBGLOG_CFG_LOG_LVL);
|
||||
} else {
|
||||
/* set back defaults, all modules with WARN level */
|
||||
cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
|
||||
ATH10K_DBGLOG_CFG_LOG_LVL);
|
||||
module_enable = ~0;
|
||||
}
|
||||
|
||||
cmd->module_enable = __cpu_to_le64(module_enable);
|
||||
cmd->module_valid = __cpu_to_le64(~0);
|
||||
cmd->config_enable = __cpu_to_le32(cfg);
|
||||
cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
|
||||
__le64_to_cpu(cmd->module_enable),
|
||||
__le64_to_cpu(cmd->module_valid),
|
||||
__le32_to_cpu(cmd->config_enable),
|
||||
__le32_to_cpu(cmd->config_valid));
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
|
||||
{
|
||||
@ -7649,6 +7760,48 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
|
||||
{
|
||||
struct wmi_echo_cmd *cmd;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = (struct wmi_echo_cmd *)skb->data;
|
||||
cmd->value = cpu_to_le32(value);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"wmi echo value 0x%08x\n", value);
|
||||
return skb;
|
||||
}
|
||||
|
||||
int
|
||||
ath10k_wmi_barrier(struct ath10k *ar)
|
||||
{
|
||||
int ret;
|
||||
int time_left;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
reinit_completion(&ar->wmi.barrier);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
time_left = wait_for_completion_timeout(&ar->wmi.barrier,
|
||||
ATH10K_WMI_BARRIER_TIMEOUT_HZ);
|
||||
if (!time_left)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct wmi_ops wmi_ops = {
|
||||
.rx = ath10k_wmi_op_rx,
|
||||
.map_svc = wmi_main_svc_map,
|
||||
@ -7665,6 +7818,7 @@ static const struct wmi_ops wmi_ops = {
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -7709,6 +7863,7 @@ static const struct wmi_ops wmi_ops = {
|
||||
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
||||
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
|
||||
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
|
||||
.gen_echo = ath10k_wmi_op_gen_echo,
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
@ -7738,6 +7893,7 @@ static const struct wmi_ops wmi_10_1_ops = {
|
||||
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -7777,6 +7933,7 @@ static const struct wmi_ops wmi_10_1_ops = {
|
||||
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
||||
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
|
||||
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
|
||||
.gen_echo = ath10k_wmi_op_gen_echo,
|
||||
/* .gen_bcn_tmpl not implemented */
|
||||
/* .gen_prb_tmpl not implemented */
|
||||
/* .gen_p2p_go_bcn_ie not implemented */
|
||||
@ -7796,6 +7953,7 @@ static const struct wmi_ops wmi_10_2_ops = {
|
||||
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
||||
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
||||
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
||||
.gen_echo = ath10k_wmi_op_gen_echo,
|
||||
|
||||
.pull_scan = ath10k_wmi_op_pull_scan_ev,
|
||||
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
|
||||
@ -7807,6 +7965,7 @@ static const struct wmi_ops wmi_10_2_ops = {
|
||||
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -7862,6 +8021,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
||||
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
||||
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
||||
.gen_echo = ath10k_wmi_op_gen_echo,
|
||||
|
||||
.pull_scan = ath10k_wmi_op_pull_scan_ev,
|
||||
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
|
||||
@ -7873,6 +8033,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
|
||||
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
|
||||
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -7968,7 +8129,7 @@ static const struct wmi_ops wmi_10_4_ops = {
|
||||
.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
|
||||
.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
|
||||
.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
|
||||
.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
|
||||
.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
|
||||
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
||||
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
||||
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
||||
@ -7980,10 +8141,12 @@ static const struct wmi_ops wmi_10_4_ops = {
|
||||
.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
|
||||
|
||||
/* shared with 10.2 */
|
||||
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
|
||||
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
|
||||
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
|
||||
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
|
||||
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
|
||||
.gen_echo = ath10k_wmi_op_gen_echo,
|
||||
};
|
||||
|
||||
int ath10k_wmi_attach(struct ath10k *ar)
|
||||
@ -8036,6 +8199,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
|
||||
|
||||
init_completion(&ar->wmi.service_ready);
|
||||
init_completion(&ar->wmi.unified_ready);
|
||||
init_completion(&ar->wmi.barrier);
|
||||
|
||||
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
|
||||
|
||||
|
@ -180,6 +180,7 @@ enum wmi_service {
|
||||
WMI_SERVICE_MESH_NON_11S,
|
||||
WMI_SERVICE_PEER_STATS,
|
||||
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
|
||||
WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
|
||||
WMI_SERVICE_TX_MODE_PUSH_ONLY,
|
||||
WMI_SERVICE_TX_MODE_PUSH_PULL,
|
||||
WMI_SERVICE_TX_MODE_DYNAMIC,
|
||||
@ -305,6 +306,7 @@ enum wmi_10_4_service {
|
||||
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
|
||||
WMI_10_4_SERVICE_PEER_STATS,
|
||||
WMI_10_4_SERVICE_MESH_11S,
|
||||
WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
|
||||
WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
|
||||
WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
|
||||
WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
|
||||
@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
|
||||
SVCSTR(WMI_SERVICE_MESH_NON_11S);
|
||||
SVCSTR(WMI_SERVICE_PEER_STATS);
|
||||
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
|
||||
SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
|
||||
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
|
||||
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
|
||||
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
|
||||
@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
|
||||
WMI_SERVICE_PEER_STATS, len);
|
||||
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
|
||||
WMI_SERVICE_MESH_11S, len);
|
||||
SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
|
||||
WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
|
||||
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
|
||||
WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
|
||||
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
|
||||
@ -6169,6 +6174,20 @@ struct wmi_dbglog_cfg_cmd {
|
||||
__le32 config_valid;
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_4_dbglog_cfg_cmd {
|
||||
/* bitmask to hold mod id config*/
|
||||
__le64 module_enable;
|
||||
|
||||
/* see ATH10K_DBGLOG_CFG_ */
|
||||
__le32 config_enable;
|
||||
|
||||
/* mask of module id bits to be changed */
|
||||
__le64 module_valid;
|
||||
|
||||
/* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
|
||||
__le32 config_valid;
|
||||
} __packed;
|
||||
|
||||
enum wmi_roam_reason {
|
||||
WMI_ROAM_REASON_BETTER_AP = 1,
|
||||
WMI_ROAM_REASON_BEACON_MISS = 2,
|
||||
@ -6296,6 +6315,10 @@ struct wmi_roam_ev_arg {
|
||||
__le32 rssi;
|
||||
};
|
||||
|
||||
struct wmi_echo_ev_arg {
|
||||
__le32 value;
|
||||
};
|
||||
|
||||
struct wmi_pdev_temperature_event {
|
||||
/* temperature value in Celcius degree */
|
||||
__le32 temperature;
|
||||
@ -6624,5 +6647,6 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
|
||||
char *buf);
|
||||
int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
|
||||
enum wmi_vdev_subtype subtype);
|
||||
int ath10k_wmi_barrier(struct ath10k *ar);
|
||||
|
||||
#endif /* _WMI_H_ */
|
||||
|
@ -909,7 +909,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
|
||||
struct ath5k_hw *ah = inode->i_private;
|
||||
bool res;
|
||||
int i, ret;
|
||||
u32 eesize;
|
||||
u32 eesize; /* NB: in 16-bit words */
|
||||
u16 val, *buf;
|
||||
|
||||
/* Get eeprom size */
|
||||
@ -932,7 +932,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
|
||||
|
||||
/* Create buffer and read in eeprom */
|
||||
|
||||
buf = vmalloc(eesize);
|
||||
buf = vmalloc(eesize * 2);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
@ -952,7 +952,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
ep->buf = buf;
|
||||
ep->len = i;
|
||||
ep->len = eesize * 2;
|
||||
|
||||
file->private_data = (void *)ep;
|
||||
|
||||
|
@ -1449,14 +1449,14 @@ static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
|
||||
return -EIO;
|
||||
|
||||
if (test_bit(CONNECTED, &vif->flags)) {
|
||||
ar->tx_pwr = 0;
|
||||
ar->tx_pwr = 255;
|
||||
|
||||
if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
|
||||
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
|
||||
wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 255,
|
||||
5 * HZ);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
|
@ -64,7 +64,7 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
|
||||
}
|
||||
EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
|
||||
|
||||
#define REG_DUMP_COUNT_AR6003 60
|
||||
#define REGISTER_DUMP_COUNT 60
|
||||
#define REGISTER_DUMP_LEN_MAX 60
|
||||
|
||||
static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
|
||||
@ -73,9 +73,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
|
||||
u32 i, address, regdump_addr = 0;
|
||||
int ret;
|
||||
|
||||
if (ar->target_type != TARGET_TYPE_AR6003)
|
||||
return;
|
||||
|
||||
/* the reg dump pointer is copied to the host interest area */
|
||||
address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
|
||||
address = TARG_VTOP(ar->target_type, address);
|
||||
@ -95,7 +92,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
|
||||
|
||||
/* fetch register dump data */
|
||||
ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)®dump_val[0],
|
||||
REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
|
||||
REGISTER_DUMP_COUNT * (sizeof(u32)));
|
||||
if (ret) {
|
||||
ath6kl_warn("failed to get register dump: %d\n", ret);
|
||||
return;
|
||||
@ -105,9 +102,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
|
||||
ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
|
||||
ar->wiphy->fw_version);
|
||||
|
||||
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
|
||||
BUILD_BUG_ON(REGISTER_DUMP_COUNT % 4);
|
||||
|
||||
for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
|
||||
for (i = 0; i < REGISTER_DUMP_COUNT; i += 4) {
|
||||
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
|
||||
i,
|
||||
le32_to_cpu(regdump_val[i]),
|
||||
|
@ -260,8 +260,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
|
||||
int cur_bin;
|
||||
int upper, lower, cur_vit_mask;
|
||||
int i;
|
||||
int8_t mask_m[123];
|
||||
int8_t mask_p[123];
|
||||
int8_t mask_m[123] = {0};
|
||||
int8_t mask_p[123] = {0};
|
||||
int8_t mask_amt;
|
||||
int tmp_mask;
|
||||
static const int pilot_mask_reg[4] = {
|
||||
@ -274,9 +274,6 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
|
||||
};
|
||||
static const int inc[4] = { 0, 100, 0, 0 };
|
||||
|
||||
memset(&mask_m, 0, sizeof(int8_t) * 123);
|
||||
memset(&mask_p, 0, sizeof(int8_t) * 123);
|
||||
|
||||
cur_bin = -6000;
|
||||
upper = bin + 100;
|
||||
lower = bin - 100;
|
||||
@ -302,7 +299,7 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
|
||||
upper = bin + 120;
|
||||
lower = bin - 120;
|
||||
|
||||
for (i = 0; i < 123; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mask_m); i++) {
|
||||
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
|
||||
/* workaround for gcc bug #37014 */
|
||||
volatile int tmp_v = abs(cur_vit_mask - bin);
|
||||
|
@ -3252,7 +3252,8 @@ static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mdata_size / 2; i++, data++)
|
||||
ath9k_hw_nvram_read(ah, i, data);
|
||||
if (!ath9k_hw_nvram_read(ah, i, data))
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3282,7 +3283,8 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
|
||||
if (ath9k_hw_use_flash(ah)) {
|
||||
u8 txrx;
|
||||
|
||||
ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
|
||||
if (ar9300_eeprom_restore_flash(ah, mptr, mdata_size))
|
||||
return -EIO;
|
||||
|
||||
/* check if eeprom contains valid data */
|
||||
eep = (struct ar9300_eeprom *) mptr;
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
#ifdef CONFIG_MAC80211_LEDS
|
||||
|
||||
void ath_fill_led_pin(struct ath_softc *sc)
|
||||
static void ath_fill_led_pin(struct ath_softc *sc)
|
||||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
|
||||
|
@ -50,9 +50,11 @@ static u16 bits_per_symbol[][2] = {
|
||||
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_atx_tid *tid, struct sk_buff *skb);
|
||||
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
||||
int tx_flags, struct ath_txq *txq);
|
||||
int tx_flags, struct ath_txq *txq,
|
||||
struct ieee80211_sta *sta);
|
||||
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_txq *txq, struct list_head *bf_q,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ath_tx_status *ts, int txok);
|
||||
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct list_head *head, bool internal);
|
||||
@ -77,6 +79,22 @@ enum {
|
||||
/* Aggregation logic */
|
||||
/*********************/
|
||||
|
||||
static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_sta *sta = info->status.status_driver_data[0];
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
|
||||
ieee80211_tx_status(hw, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sta)
|
||||
ieee80211_tx_status_noskb(hw, sta, info);
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
|
||||
__acquires(&txq->axq_lock)
|
||||
{
|
||||
@ -92,6 +110,7 @@ void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
|
||||
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
|
||||
__releases(&txq->axq_lock)
|
||||
{
|
||||
struct ieee80211_hw *hw = sc->hw;
|
||||
struct sk_buff_head q;
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -100,7 +119,7 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
|
||||
spin_unlock_bh(&txq->axq_lock);
|
||||
|
||||
while ((skb = __skb_dequeue(&q)))
|
||||
ieee80211_tx_status(sc->hw, skb);
|
||||
ath_tx_status(hw, skb);
|
||||
}
|
||||
|
||||
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
|
||||
@ -253,7 +272,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
||||
}
|
||||
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
|
||||
}
|
||||
|
||||
if (sendbar) {
|
||||
@ -318,12 +337,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
|
||||
bf = fi->bf;
|
||||
|
||||
if (!bf) {
|
||||
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
|
||||
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&bf->list, &bf_head);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -426,15 +445,14 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
|
||||
|
||||
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_buf *bf, struct list_head *bf_q,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ath_atx_tid *tid,
|
||||
struct ath_tx_status *ts, int txok)
|
||||
{
|
||||
struct ath_node *an = NULL;
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_sta *sta;
|
||||
struct ieee80211_hw *hw = sc->hw;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ieee80211_tx_info *tx_info;
|
||||
struct ath_atx_tid *tid = NULL;
|
||||
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
|
||||
struct list_head bf_head;
|
||||
struct sk_buff_head bf_pending;
|
||||
@ -460,12 +478,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
for (i = 0; i < ts->ts_rateindex; i++)
|
||||
retries += rates[i].count;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
|
||||
if (!sta) {
|
||||
rcu_read_unlock();
|
||||
|
||||
INIT_LIST_HEAD(&bf_head);
|
||||
while (bf) {
|
||||
bf_next = bf->bf_next;
|
||||
@ -473,7 +486,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
if (!bf->bf_state.stale || bf_next != NULL)
|
||||
list_move_tail(&bf->list, &bf_head);
|
||||
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
|
||||
|
||||
bf = bf_next;
|
||||
}
|
||||
@ -481,7 +494,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
}
|
||||
|
||||
an = (struct ath_node *)sta->drv_priv;
|
||||
tid = ath_get_skb_tid(sc, an, skb);
|
||||
seq_first = tid->seq_start;
|
||||
isba = ts->ts_flags & ATH9K_TX_BA;
|
||||
|
||||
@ -583,7 +595,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
ts);
|
||||
}
|
||||
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
|
||||
!txfail);
|
||||
} else {
|
||||
if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
|
||||
@ -604,7 +616,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
ath_tx_update_baw(sc, tid, seqno);
|
||||
|
||||
ath_tx_complete_buf(sc, bf, txq,
|
||||
&bf_head, ts, 0);
|
||||
&bf_head, NULL, ts,
|
||||
0);
|
||||
bar_index = max_t(int, bar_index,
|
||||
ATH_BA_INDEX(seq_first, seqno));
|
||||
break;
|
||||
@ -648,8 +661,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
||||
ath_txq_lock(sc, txq);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (needreset)
|
||||
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
|
||||
}
|
||||
@ -664,7 +675,11 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_tx_status *ts, struct ath_buf *bf,
|
||||
struct list_head *bf_head)
|
||||
{
|
||||
struct ieee80211_hw *hw = sc->hw;
|
||||
struct ieee80211_tx_info *info;
|
||||
struct ieee80211_sta *sta;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ath_atx_tid *tid = NULL;
|
||||
bool txok, flush;
|
||||
|
||||
txok = !(ts->ts_status & ATH9K_TXERR_MASK);
|
||||
@ -677,6 +692,16 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
||||
|
||||
ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
|
||||
ts->ts_rateindex);
|
||||
|
||||
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
|
||||
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
|
||||
if (sta) {
|
||||
struct ath_node *an = (struct ath_node *)sta->drv_priv;
|
||||
tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
|
||||
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
|
||||
tid->clear_ps_filter = true;
|
||||
}
|
||||
|
||||
if (!bf_isampdu(bf)) {
|
||||
if (!flush) {
|
||||
info = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
@ -685,9 +710,9 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
||||
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
|
||||
ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
|
||||
}
|
||||
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
|
||||
ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
|
||||
} else
|
||||
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
|
||||
ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
|
||||
|
||||
if (!flush)
|
||||
ath_txq_schedule(sc, txq);
|
||||
@ -923,7 +948,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
|
||||
list_add(&bf->list, &bf_head);
|
||||
__skb_unlink(skb, *q);
|
||||
ath_tx_update_baw(sc, tid, seqno);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1832,6 +1857,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
|
||||
*/
|
||||
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
|
||||
{
|
||||
rcu_read_lock();
|
||||
ath_txq_lock(sc, txq);
|
||||
|
||||
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
|
||||
@ -1850,6 +1876,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
|
||||
ath_drain_txq_list(sc, txq, &txq->axq_q);
|
||||
|
||||
ath_txq_unlock_complete(sc, txq);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
bool ath_drain_all_txq(struct ath_softc *sc)
|
||||
@ -2472,7 +2499,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
/*****************/
|
||||
|
||||
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
||||
int tx_flags, struct ath_txq *txq)
|
||||
int tx_flags, struct ath_txq *txq,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
@ -2492,6 +2520,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
||||
tx_info->flags |= IEEE80211_TX_STAT_ACK;
|
||||
}
|
||||
|
||||
if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
|
||||
padpos = ieee80211_hdrlen(hdr->frame_control);
|
||||
padsize = padpos & 3;
|
||||
if (padsize && skb->len>padpos+padsize) {
|
||||
@ -2502,6 +2531,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
||||
memmove(skb->data + padsize, skb->data, padpos);
|
||||
skb_pull(skb, padsize);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&sc->sc_pm_lock, flags);
|
||||
if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
|
||||
@ -2515,12 +2545,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
|
||||
}
|
||||
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
|
||||
|
||||
__skb_queue_tail(&txq->complete_q, skb);
|
||||
ath_txq_skb_done(sc, txq, skb);
|
||||
tx_info->status.status_driver_data[0] = sta;
|
||||
__skb_queue_tail(&txq->complete_q, skb);
|
||||
}
|
||||
|
||||
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
||||
struct ath_txq *txq, struct list_head *bf_q,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ath_tx_status *ts, int txok)
|
||||
{
|
||||
struct sk_buff *skb = bf->bf_mpdu;
|
||||
@ -2548,7 +2580,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
||||
complete(&sc->paprd_complete);
|
||||
} else {
|
||||
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
|
||||
ath_tx_complete(sc, skb, tx_flags, txq);
|
||||
ath_tx_complete(sc, skb, tx_flags, txq, sta);
|
||||
}
|
||||
skip_tx_complete:
|
||||
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
|
||||
@ -2700,10 +2732,12 @@ void ath_tx_tasklet(struct ath_softc *sc)
|
||||
u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
|
||||
int i;
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
|
||||
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
|
||||
ath_tx_processq(sc, &sc->tx.txq[i]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void ath_tx_edma_tasklet(struct ath_softc *sc)
|
||||
@ -2717,6 +2751,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
|
||||
struct list_head *fifo_list;
|
||||
int status;
|
||||
|
||||
rcu_read_lock();
|
||||
for (;;) {
|
||||
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
|
||||
break;
|
||||
@ -2787,6 +2822,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
|
||||
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
|
||||
ath_txq_unlock_complete(sc, txq);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*****************/
|
||||
|
@ -670,6 +670,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
|
||||
ar->readlen = outlen;
|
||||
spin_unlock_bh(&ar->cmd_lock);
|
||||
|
||||
reinit_completion(&ar->cmd_wait);
|
||||
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
|
||||
|
||||
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
|
||||
@ -778,10 +779,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
|
||||
spin_lock_bh(&ar->cmd_lock);
|
||||
ar->readlen = 0;
|
||||
spin_unlock_bh(&ar->cmd_lock);
|
||||
complete_all(&ar->cmd_wait);
|
||||
|
||||
/* This is required to prevent an early completion on _start */
|
||||
reinit_completion(&ar->cmd_wait);
|
||||
complete(&ar->cmd_wait);
|
||||
|
||||
/*
|
||||
* Note:
|
||||
|
@ -338,7 +338,7 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct dfs_pattern_detector default_dpd = {
|
||||
static const struct dfs_pattern_detector default_dpd = {
|
||||
.exit = dpd_exit,
|
||||
.set_dfs_domain = dpd_set_domain,
|
||||
.add_pulse = dpd_add_pulse,
|
||||
|
@ -354,10 +354,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
|
||||
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
|
||||
__func__, wdev, wdev->iftype);
|
||||
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
if (wil->scan_request) {
|
||||
wil_err(wil, "Already scanning\n");
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
return -EAGAIN;
|
||||
}
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
|
||||
/* check we are client side */
|
||||
switch (wdev->iftype) {
|
||||
@ -760,14 +763,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct wil_tid_crypto_rx_single *
|
||||
wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
|
||||
static struct wil_sta_info *
|
||||
wil_find_sta_by_key_usage(struct wil6210_priv *wil,
|
||||
enum wmi_key_usage key_usage, const u8 *mac_addr)
|
||||
{
|
||||
int cid = -EINVAL;
|
||||
int tid = 0;
|
||||
struct wil_sta_info *s;
|
||||
struct wil_tid_crypto_rx *c;
|
||||
|
||||
if (key_usage == WMI_KEY_USE_TX_GROUP)
|
||||
return NULL; /* not needed */
|
||||
@ -778,18 +778,72 @@ wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
|
||||
else if (key_usage == WMI_KEY_USE_RX_GROUP)
|
||||
cid = wil_find_cid_by_idx(wil, 0);
|
||||
if (cid < 0) {
|
||||
wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
|
||||
key_usage_str[key_usage], key_index);
|
||||
wil_err(wil, "No CID for %pM %s\n", mac_addr,
|
||||
key_usage_str[key_usage]);
|
||||
return ERR_PTR(cid);
|
||||
}
|
||||
|
||||
s = &wil->sta[cid];
|
||||
if (key_usage == WMI_KEY_USE_PAIRWISE)
|
||||
c = &s->tid_crypto_rx[tid];
|
||||
else
|
||||
c = &s->group_crypto_rx;
|
||||
return &wil->sta[cid];
|
||||
}
|
||||
|
||||
return &c->key_id[key_index];
|
||||
static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
|
||||
struct wil_sta_info *cs,
|
||||
struct key_params *params)
|
||||
{
|
||||
struct wil_tid_crypto_rx_single *cc;
|
||||
int tid;
|
||||
|
||||
if (!cs)
|
||||
return;
|
||||
|
||||
switch (key_usage) {
|
||||
case WMI_KEY_USE_PAIRWISE:
|
||||
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
|
||||
cc = &cs->tid_crypto_rx[tid].key_id[key_index];
|
||||
if (params->seq)
|
||||
memcpy(cc->pn, params->seq,
|
||||
IEEE80211_GCMP_PN_LEN);
|
||||
else
|
||||
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
|
||||
cc->key_set = true;
|
||||
}
|
||||
break;
|
||||
case WMI_KEY_USE_RX_GROUP:
|
||||
cc = &cs->group_crypto_rx.key_id[key_index];
|
||||
if (params->seq)
|
||||
memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
|
||||
else
|
||||
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
|
||||
cc->key_set = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
|
||||
struct wil_sta_info *cs)
|
||||
{
|
||||
struct wil_tid_crypto_rx_single *cc;
|
||||
int tid;
|
||||
|
||||
if (!cs)
|
||||
return;
|
||||
|
||||
switch (key_usage) {
|
||||
case WMI_KEY_USE_PAIRWISE:
|
||||
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
|
||||
cc = &cs->tid_crypto_rx[tid].key_id[key_index];
|
||||
cc->key_set = false;
|
||||
}
|
||||
break;
|
||||
case WMI_KEY_USE_RX_GROUP:
|
||||
cc = &cs->group_crypto_rx.key_id[key_index];
|
||||
cc->key_set = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int wil_cfg80211_add_key(struct wiphy *wiphy,
|
||||
@ -801,24 +855,26 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
|
||||
int rc;
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
|
||||
struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
|
||||
key_index,
|
||||
key_usage,
|
||||
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
|
||||
mac_addr);
|
||||
|
||||
if (!params) {
|
||||
wil_err(wil, "NULL params\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
|
||||
mac_addr, key_usage_str[key_usage], key_index,
|
||||
params->seq_len, params->seq);
|
||||
|
||||
if (IS_ERR(cc)) {
|
||||
if (IS_ERR(cs)) {
|
||||
wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
|
||||
__func__, mac_addr, key_usage_str[key_usage], key_index,
|
||||
params->seq_len, params->seq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cc)
|
||||
cc->key_set = false;
|
||||
wil_del_rx_key(key_index, key_usage, cs);
|
||||
|
||||
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
|
||||
wil_err(wil,
|
||||
@ -831,13 +887,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
|
||||
|
||||
rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
|
||||
params->key, key_usage);
|
||||
if ((rc == 0) && cc) {
|
||||
if (params->seq)
|
||||
memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
|
||||
else
|
||||
memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
|
||||
cc->key_set = true;
|
||||
}
|
||||
if (!rc)
|
||||
wil_set_crypto_rx(key_index, key_usage, cs, params);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -849,20 +900,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
|
||||
struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
|
||||
key_index,
|
||||
key_usage,
|
||||
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
|
||||
mac_addr);
|
||||
|
||||
wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
|
||||
key_usage_str[key_usage], key_index);
|
||||
|
||||
if (IS_ERR(cc))
|
||||
if (IS_ERR(cs))
|
||||
wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
|
||||
mac_addr, key_usage_str[key_usage], key_index);
|
||||
|
||||
if (!IS_ERR_OR_NULL(cc))
|
||||
cc->key_set = false;
|
||||
if (!IS_ERR_OR_NULL(cs))
|
||||
wil_del_rx_key(key_index, key_usage, cs);
|
||||
|
||||
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
|
||||
}
|
||||
@ -1363,23 +1412,16 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
u8 started;
|
||||
struct wil_p2p_info *p2p = &wil->p2p;
|
||||
|
||||
if (!p2p->p2p_dev_started)
|
||||
return;
|
||||
|
||||
wil_dbg_misc(wil, "%s: entered\n", __func__);
|
||||
mutex_lock(&wil->mutex);
|
||||
started = wil_p2p_stop_discovery(wil);
|
||||
if (started && wil->scan_request) {
|
||||
struct cfg80211_scan_info info = {
|
||||
.aborted = true,
|
||||
};
|
||||
|
||||
cfg80211_scan_done(wil->scan_request, &info);
|
||||
wil->scan_request = NULL;
|
||||
wil->radio_wdev = wil->wdev;
|
||||
}
|
||||
wil_p2p_stop_radio_operations(wil);
|
||||
p2p->p2p_dev_started = 0;
|
||||
mutex_unlock(&wil->mutex);
|
||||
|
||||
wil->p2p.p2p_dev_started = 0;
|
||||
}
|
||||
|
||||
static struct cfg80211_ops wil_cfg80211_ops = {
|
||||
@ -1464,14 +1506,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev)
|
||||
set_wiphy_dev(wdev->wiphy, dev);
|
||||
wil_wiphy_init(wdev->wiphy);
|
||||
|
||||
rc = wiphy_register(wdev->wiphy);
|
||||
if (rc < 0)
|
||||
goto out_failed_reg;
|
||||
|
||||
return wdev;
|
||||
|
||||
out_failed_reg:
|
||||
wiphy_free(wdev->wiphy);
|
||||
out:
|
||||
kfree(wdev);
|
||||
|
||||
@ -1487,7 +1523,6 @@ void wil_wdev_free(struct wil6210_priv *wil)
|
||||
if (!wdev)
|
||||
return;
|
||||
|
||||
wiphy_unregister(wdev->wiphy);
|
||||
wiphy_free(wdev->wiphy);
|
||||
kfree(wdev);
|
||||
}
|
||||
@ -1498,11 +1533,11 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
|
||||
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
p2p_wdev = wil->p2p_wdev;
|
||||
if (p2p_wdev) {
|
||||
wil->p2p_wdev = NULL;
|
||||
wil->radio_wdev = wil_to_wdev(wil);
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
if (p2p_wdev) {
|
||||
cfg80211_unregister_wdev(p2p_wdev);
|
||||
kfree(p2p_wdev);
|
||||
}
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
}
|
||||
|
@ -1553,6 +1553,56 @@ static const struct file_operations fops_led_blink_time = {
|
||||
.open = simple_open,
|
||||
};
|
||||
|
||||
/*---------FW capabilities------------*/
|
||||
static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
|
||||
{
|
||||
struct wil6210_priv *wil = s->private;
|
||||
|
||||
seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
|
||||
wil->fw_capabilities);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, wil_fw_capabilities_debugfs_show,
|
||||
inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations fops_fw_capabilities = {
|
||||
.open = wil_fw_capabilities_seq_open,
|
||||
.release = single_release,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
};
|
||||
|
||||
/*---------FW version------------*/
|
||||
static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
|
||||
{
|
||||
struct wil6210_priv *wil = s->private;
|
||||
|
||||
if (wil->fw_version[0])
|
||||
seq_printf(s, "%s\n", wil->fw_version);
|
||||
else
|
||||
seq_puts(s, "N/A\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, wil_fw_version_debugfs_show,
|
||||
inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations fops_fw_version = {
|
||||
.open = wil_fw_version_seq_open,
|
||||
.release = single_release,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
};
|
||||
|
||||
/*----------------*/
|
||||
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
|
||||
struct dentry *dbg)
|
||||
@ -1603,6 +1653,8 @@ static const struct {
|
||||
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
|
||||
{"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
|
||||
{"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
|
||||
{"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
|
||||
{"fw_version", S_IRUGO, &fops_fw_version},
|
||||
};
|
||||
|
||||
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
|
||||
@ -1643,7 +1695,6 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
|
||||
static const struct dbg_off dbg_wil_off[] = {
|
||||
WIL_FIELD(privacy, S_IRUGO, doff_u32),
|
||||
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
|
||||
WIL_FIELD(fw_version, S_IRUGO, doff_u32),
|
||||
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
|
||||
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
|
||||
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
|
||||
u8 data[0]; /* free-form data [data_size], see above */
|
||||
} __packed;
|
||||
|
||||
/* FW capabilities encoded inside a comment record */
|
||||
#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
|
||||
struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
|
||||
/* identifies capabilities record */
|
||||
__le32 magic;
|
||||
/* capabilities (variable size), see enum wmi_fw_capability */
|
||||
u8 capabilities[0];
|
||||
};
|
||||
|
||||
/* perform action
|
||||
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
|
||||
*/
|
||||
@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */
|
||||
/* file header
|
||||
* First record of every file
|
||||
*/
|
||||
/* the FW version prefix in the comment */
|
||||
#define WIL_FW_VERSION_PREFIX "FW version: "
|
||||
#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
|
||||
struct wil_fw_record_file_header {
|
||||
__le32 signature ; /* Wilocity signature */
|
||||
__le32 reserved;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -118,6 +118,12 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
|
||||
return (int)dlen;
|
||||
}
|
||||
|
||||
static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
|
||||
size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
|
||||
size_t size)
|
||||
{
|
||||
@ -126,6 +132,27 @@ static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
|
||||
size_t size)
|
||||
{
|
||||
const struct wil_fw_record_capabilities *rec = data;
|
||||
size_t capa_size;
|
||||
|
||||
if (size < sizeof(*rec) ||
|
||||
le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
|
||||
return 0;
|
||||
|
||||
capa_size = size - offsetof(struct wil_fw_record_capabilities,
|
||||
capabilities);
|
||||
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
|
||||
memcpy(wil->fw_capabilities, rec->capabilities,
|
||||
min(sizeof(wil->fw_capabilities), capa_size));
|
||||
wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
|
||||
rec->capabilities, capa_size, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fw_handle_data(struct wil6210_priv *wil, const void *data,
|
||||
size_t size)
|
||||
{
|
||||
@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
|
||||
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
|
||||
sizeof(d->comment), true);
|
||||
|
||||
if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
|
||||
WIL_FW_VERSION_PREFIX_LEN))
|
||||
memcpy(wil->fw_version,
|
||||
d->comment + WIL_FW_VERSION_PREFIX_LEN,
|
||||
min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
|
||||
sizeof(wil->fw_version) - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -383,42 +417,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
|
||||
|
||||
static const struct {
|
||||
int type;
|
||||
int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
|
||||
int (*load_handler)(struct wil6210_priv *wil, const void *data,
|
||||
size_t size);
|
||||
int (*parse_handler)(struct wil6210_priv *wil, const void *data,
|
||||
size_t size);
|
||||
} wil_fw_handlers[] = {
|
||||
{wil_fw_type_comment, fw_handle_comment},
|
||||
{wil_fw_type_data, fw_handle_data},
|
||||
{wil_fw_type_fill, fw_handle_fill},
|
||||
{wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
|
||||
{wil_fw_type_data, fw_handle_data, fw_ignore_section},
|
||||
{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
|
||||
/* wil_fw_type_action */
|
||||
/* wil_fw_type_verify */
|
||||
{wil_fw_type_file_header, fw_handle_file_header},
|
||||
{wil_fw_type_direct_write, fw_handle_direct_write},
|
||||
{wil_fw_type_gateway_data, fw_handle_gateway_data},
|
||||
{wil_fw_type_gateway_data4, fw_handle_gateway_data4},
|
||||
{wil_fw_type_file_header, fw_handle_file_header,
|
||||
fw_handle_file_header},
|
||||
{wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
|
||||
{wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
|
||||
{wil_fw_type_gateway_data4, fw_handle_gateway_data4,
|
||||
fw_ignore_section},
|
||||
};
|
||||
|
||||
static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
|
||||
const void *data, size_t size)
|
||||
const void *data, size_t size, bool load)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
|
||||
if (wil_fw_handlers[i].type == type)
|
||||
return wil_fw_handlers[i].handler(wil, data, size);
|
||||
}
|
||||
return load ?
|
||||
wil_fw_handlers[i].load_handler(
|
||||
wil, data, size) :
|
||||
wil_fw_handlers[i].parse_handler(
|
||||
wil, data, size);
|
||||
|
||||
wil_err_fw(wil, "unknown record type: %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* wil_fw_load - load FW into device
|
||||
*
|
||||
* Load the FW and uCode code and data to the corresponding device
|
||||
* memory regions
|
||||
* wil_fw_process - process section from FW file
|
||||
* if load is true: Load the FW and uCode code and data to the
|
||||
* corresponding device memory regions,
|
||||
* otherwise only parse and look for capabilities
|
||||
*
|
||||
* Return error code
|
||||
*/
|
||||
static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
|
||||
static int wil_fw_process(struct wil6210_priv *wil, const void *data,
|
||||
size_t size, bool load)
|
||||
{
|
||||
int rc = 0;
|
||||
const struct wil_fw_record_head *hdr;
|
||||
@ -437,7 +480,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
|
||||
&hdr[1], hdr_sz);
|
||||
&hdr[1], hdr_sz, load);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
@ -456,13 +499,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
|
||||
}
|
||||
|
||||
/**
|
||||
* wil_request_firmware - Request firmware and load to device
|
||||
* wil_request_firmware - Request firmware
|
||||
*
|
||||
* Request firmware image from the file and load it to device
|
||||
* Request firmware image from the file
|
||||
* If load is true, load firmware to device, otherwise
|
||||
* only parse and extract capabilities
|
||||
*
|
||||
* Return error code
|
||||
*/
|
||||
int wil_request_firmware(struct wil6210_priv *wil, const char *name)
|
||||
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
|
||||
bool load)
|
||||
{
|
||||
int rc, rc1;
|
||||
const struct firmware *fw;
|
||||
@ -482,7 +528,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
|
||||
rc = rc1;
|
||||
goto out;
|
||||
}
|
||||
rc = wil_fw_load(wil, d, rc1);
|
||||
rc = wil_fw_process(wil, d, rc1, load);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -101,7 +101,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
|
||||
mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
|
||||
}
|
||||
|
||||
static void wil6210_mask_halp(struct wil6210_priv *wil)
|
||||
void wil6210_mask_halp(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_irq(wil, "%s()\n", __func__);
|
||||
|
||||
@ -503,6 +503,13 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
|
||||
offsetof(struct RGF_ICR, ICR));
|
||||
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
|
||||
/* HALP interrupt can be unmasked when misc interrupts are
|
||||
* masked
|
||||
*/
|
||||
if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
|
||||
return 0;
|
||||
|
||||
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
|
||||
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
|
||||
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
|
||||
@ -592,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
|
||||
|
||||
void wil6210_set_halp(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
wil_dbg_irq(wil, "%s()\n", __func__);
|
||||
|
||||
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
|
||||
BIT_DMA_EP_MISC_ICR_HALP);
|
||||
@ -600,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
|
||||
|
||||
void wil6210_clear_halp(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
wil_dbg_irq(wil, "%s()\n", __func__);
|
||||
|
||||
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
|
||||
BIT_DMA_EP_MISC_ICR_HALP);
|
||||
|
@ -232,6 +232,9 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
struct wireless_dev *wdev = wil->wdev;
|
||||
|
||||
if (unlikely(!ndev))
|
||||
return;
|
||||
|
||||
might_sleep();
|
||||
wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
|
||||
reason_code, from_event ? "+" : "-");
|
||||
@ -849,6 +852,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
bitmap_zero(wil->status, wil_status_last);
|
||||
mutex_unlock(&wil->wmi_mutex);
|
||||
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
if (wil->scan_request) {
|
||||
struct cfg80211_scan_info info = {
|
||||
.aborted = true,
|
||||
@ -860,6 +864,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
cfg80211_scan_done(wil->scan_request, &info);
|
||||
wil->scan_request = NULL;
|
||||
}
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
|
||||
wil_mask_irq(wil);
|
||||
|
||||
@ -888,11 +893,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
WIL_FW2_NAME);
|
||||
|
||||
wil_halt_cpu(wil);
|
||||
memset(wil->fw_version, 0, sizeof(wil->fw_version));
|
||||
/* Loading f/w from the file */
|
||||
rc = wil_request_firmware(wil, WIL_FW_NAME);
|
||||
rc = wil_request_firmware(wil, WIL_FW_NAME, true);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = wil_request_firmware(wil, WIL_FW2_NAME);
|
||||
rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -1035,10 +1041,10 @@ int wil_up(struct wil6210_priv *wil)
|
||||
|
||||
int __wil_down(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&wil->mutex));
|
||||
|
||||
set_bit(wil_status_resetting, wil->status);
|
||||
|
||||
if (wil->platform_ops.bus_request)
|
||||
wil->platform_ops.bus_request(wil->platform_handle, 0);
|
||||
|
||||
@ -1050,8 +1056,9 @@ int __wil_down(struct wil6210_priv *wil)
|
||||
}
|
||||
wil_enable_irq(wil);
|
||||
|
||||
(void)wil_p2p_stop_discovery(wil);
|
||||
wil_p2p_stop_radio_operations(wil);
|
||||
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
if (wil->scan_request) {
|
||||
struct cfg80211_scan_info info = {
|
||||
.aborted = true,
|
||||
@ -1063,18 +1070,7 @@ int __wil_down(struct wil6210_priv *wil)
|
||||
cfg80211_scan_done(wil->scan_request, &info);
|
||||
wil->scan_request = NULL;
|
||||
}
|
||||
|
||||
if (test_bit(wil_status_fwconnected, wil->status) ||
|
||||
test_bit(wil_status_fwconnecting, wil->status)) {
|
||||
|
||||
mutex_unlock(&wil->mutex);
|
||||
rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
|
||||
WMI_DISCONNECT_EVENTID, NULL, 0,
|
||||
WIL6210_DISCONNECT_TO_MS);
|
||||
mutex_lock(&wil->mutex);
|
||||
if (rc)
|
||||
wil_err(wil, "timeout waiting for disconnect\n");
|
||||
}
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
|
||||
wil_reset(wil, false);
|
||||
|
||||
@ -1118,22 +1114,25 @@ void wil_halp_vote(struct wil6210_priv *wil)
|
||||
|
||||
mutex_lock(&wil->halp.lock);
|
||||
|
||||
wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
|
||||
wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
|
||||
wil->halp.ref_cnt);
|
||||
|
||||
if (++wil->halp.ref_cnt == 1) {
|
||||
wil6210_set_halp(wil);
|
||||
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
|
||||
if (!rc)
|
||||
if (!rc) {
|
||||
wil_err(wil, "%s: HALP vote timed out\n", __func__);
|
||||
else
|
||||
wil_dbg_misc(wil,
|
||||
/* Mask HALP as done in case the interrupt is raised */
|
||||
wil6210_mask_halp(wil);
|
||||
} else {
|
||||
wil_dbg_irq(wil,
|
||||
"%s: HALP vote completed after %d ms\n",
|
||||
__func__,
|
||||
jiffies_to_msecs(to_jiffies - rc));
|
||||
}
|
||||
}
|
||||
|
||||
wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
|
||||
wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
|
||||
wil->halp.ref_cnt);
|
||||
|
||||
mutex_unlock(&wil->halp.lock);
|
||||
@ -1145,15 +1144,15 @@ void wil_halp_unvote(struct wil6210_priv *wil)
|
||||
|
||||
mutex_lock(&wil->halp.lock);
|
||||
|
||||
wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
|
||||
wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
|
||||
wil->halp.ref_cnt);
|
||||
|
||||
if (--wil->halp.ref_cnt == 0) {
|
||||
wil6210_clear_halp(wil);
|
||||
wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
|
||||
wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
|
||||
}
|
||||
|
||||
wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
|
||||
wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
|
||||
wil->halp.ref_cnt);
|
||||
|
||||
mutex_unlock(&wil->halp.lock);
|
||||
|
@ -179,13 +179,6 @@ void *wil_if_alloc(struct device *dev)
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
|
||||
wdev->netdev = ndev;
|
||||
|
||||
netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
|
||||
WIL6210_NAPI_BUDGET);
|
||||
netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
|
||||
WIL6210_NAPI_BUDGET);
|
||||
|
||||
netif_tx_stop_all_queues(ndev);
|
||||
|
||||
return wil;
|
||||
|
||||
out_priv:
|
||||
@ -216,25 +209,48 @@ void wil_if_free(struct wil6210_priv *wil)
|
||||
|
||||
int wil_if_add(struct wil6210_priv *wil)
|
||||
{
|
||||
struct wireless_dev *wdev = wil_to_wdev(wil);
|
||||
struct wiphy *wiphy = wdev->wiphy;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
int rc;
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
wil_dbg_misc(wil, "entered");
|
||||
|
||||
strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
|
||||
|
||||
rc = wiphy_register(wiphy);
|
||||
if (rc < 0) {
|
||||
wil_err(wil, "failed to register wiphy, err %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
|
||||
WIL6210_NAPI_BUDGET);
|
||||
netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
|
||||
WIL6210_NAPI_BUDGET);
|
||||
|
||||
netif_tx_stop_all_queues(ndev);
|
||||
|
||||
rc = register_netdev(ndev);
|
||||
if (rc < 0) {
|
||||
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
|
||||
return rc;
|
||||
goto out_wiphy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_wiphy:
|
||||
wiphy_unregister(wdev->wiphy);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void wil_if_remove(struct wil6210_priv *wil)
|
||||
{
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
struct wireless_dev *wdev = wil_to_wdev(wil);
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
|
||||
unregister_netdev(ndev);
|
||||
wiphy_unregister(wdev->wiphy);
|
||||
}
|
||||
|
@ -263,3 +263,49 @@ void wil_p2p_search_expired(struct work_struct *work)
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
|
||||
{
|
||||
struct wil_p2p_info *p2p = &wil->p2p;
|
||||
struct cfg80211_scan_info info = {
|
||||
.aborted = true,
|
||||
};
|
||||
|
||||
lockdep_assert_held(&wil->mutex);
|
||||
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
|
||||
if (wil->radio_wdev != wil->p2p_wdev)
|
||||
goto out;
|
||||
|
||||
if (!p2p->discovery_started) {
|
||||
/* Regular scan on the p2p device */
|
||||
if (wil->scan_request &&
|
||||
wil->scan_request->wdev == wil->p2p_wdev) {
|
||||
cfg80211_scan_done(wil->scan_request, &info);
|
||||
wil->scan_request = NULL;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Search or listen on p2p device */
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
wil_p2p_stop_discovery(wil);
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
|
||||
if (wil->scan_request) {
|
||||
/* search */
|
||||
cfg80211_scan_done(wil->scan_request, &info);
|
||||
wil->scan_request = NULL;
|
||||
} else {
|
||||
/* listen */
|
||||
cfg80211_remain_on_channel_expired(wil->radio_wdev,
|
||||
p2p->cookie,
|
||||
&p2p->listen_chan,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
out:
|
||||
wil->radio_wdev = wil->wdev;
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/suspend.h>
|
||||
#include "wil6210.h"
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
static bool use_msi = true;
|
||||
module_param(use_msi, bool, S_IRUGO);
|
||||
@ -38,6 +39,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
|
||||
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
|
||||
|
||||
bitmap_zero(wil->hw_capabilities, hw_capability_last);
|
||||
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
|
||||
|
||||
switch (rev_id) {
|
||||
case JTAG_DEV_ID_SPARROW_B0:
|
||||
@ -51,6 +53,9 @@ void wil_set_capabilities(struct wil6210_priv *wil)
|
||||
}
|
||||
|
||||
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
|
||||
|
||||
/* extract FW capabilities from file without loading the FW */
|
||||
wil_request_firmware(wil, WIL_FW_NAME, false);
|
||||
}
|
||||
|
||||
void wil_disable_irq(struct wil6210_priv *wil)
|
||||
@ -293,6 +298,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
wil6210_debugfs_remove(wil);
|
||||
rtnl_lock();
|
||||
wil_p2p_wdev_free(wil);
|
||||
rtnl_unlock();
|
||||
wil_if_remove(wil);
|
||||
wil_if_pcie_disable(wil);
|
||||
pci_iounmap(pdev, csr);
|
||||
@ -300,7 +308,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
|
||||
pci_disable_device(pdev);
|
||||
if (wil->platform_ops.uninit)
|
||||
wil->platform_ops.uninit(wil->platform_handle);
|
||||
wil_p2p_wdev_free(wil);
|
||||
wil_if_free(wil);
|
||||
}
|
||||
|
||||
|
@ -873,9 +873,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
|
||||
rc = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
||||
|
||||
spin_lock_bh(&txdata->lock);
|
||||
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
||||
txdata->enabled = 1;
|
||||
spin_unlock_bh(&txdata->lock);
|
||||
|
||||
if (txdata->dot1x_open && (agg_wsize >= 0))
|
||||
wil_addba_tx_request(wil, id, agg_wsize);
|
||||
|
||||
@ -950,9 +953,11 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
|
||||
rc = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
||||
|
||||
spin_lock_bh(&txdata->lock);
|
||||
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
|
||||
txdata->enabled = 1;
|
||||
spin_unlock_bh(&txdata->lock);
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
|
@ -17,6 +17,7 @@
|
||||
#ifndef __WIL6210_H__
|
||||
#define __WIL6210_H__
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/wireless.h>
|
||||
#include <net/cfg80211.h>
|
||||
@ -576,10 +577,11 @@ struct wil6210_priv {
|
||||
struct wireless_dev *wdev;
|
||||
void __iomem *csr;
|
||||
DECLARE_BITMAP(status, wil_status_last);
|
||||
u32 fw_version;
|
||||
u8 fw_version[ETHTOOL_FWVERS_LEN];
|
||||
u32 hw_version;
|
||||
const char *hw_name;
|
||||
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
|
||||
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
|
||||
u8 n_mids; /* number of additional MIDs as reported by FW */
|
||||
u32 recovery_count; /* num of FW recovery attempts in a short time */
|
||||
u32 recovery_state; /* FW recovery state machine */
|
||||
@ -657,7 +659,7 @@ struct wil6210_priv {
|
||||
|
||||
/* P2P_DEVICE vif */
|
||||
struct wireless_dev *p2p_wdev;
|
||||
struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
|
||||
struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */
|
||||
struct wireless_dev *radio_wdev;
|
||||
|
||||
/* High Access Latency Policy voting */
|
||||
@ -828,6 +830,7 @@ void wil_unmask_irq(struct wil6210_priv *wil);
|
||||
void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
|
||||
void wil_disable_irq(struct wil6210_priv *wil);
|
||||
void wil_enable_irq(struct wil6210_priv *wil);
|
||||
void wil6210_mask_halp(struct wil6210_priv *wil);
|
||||
|
||||
/* P2P */
|
||||
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
|
||||
@ -840,6 +843,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
|
||||
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
|
||||
void wil_p2p_listen_expired(struct work_struct *work);
|
||||
void wil_p2p_search_expired(struct work_struct *work);
|
||||
void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
|
||||
|
||||
/* WMI for P2P */
|
||||
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
|
||||
@ -893,7 +897,8 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
|
||||
int wil_iftype_nl2wmi(enum nl80211_iftype type);
|
||||
|
||||
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
|
||||
int wil_request_firmware(struct wil6210_priv *wil, const char *name);
|
||||
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
|
||||
bool load);
|
||||
|
||||
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
|
||||
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
|
||||
|
@ -312,14 +312,14 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
|
||||
struct wireless_dev *wdev = wil->wdev;
|
||||
struct wmi_ready_event *evt = d;
|
||||
|
||||
wil->fw_version = le32_to_cpu(evt->sw_version);
|
||||
wil->n_mids = evt->numof_additional_mids;
|
||||
|
||||
wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
|
||||
wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
|
||||
wil->fw_version, le32_to_cpu(evt->sw_version),
|
||||
evt->mac, wil->n_mids);
|
||||
/* ignore MAC address, we already have it from the boot loader */
|
||||
snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
|
||||
"%d", wil->fw_version);
|
||||
strlcpy(wdev->wiphy->fw_version, wil->fw_version,
|
||||
sizeof(wdev->wiphy->fw_version));
|
||||
|
||||
wil_set_recovery_state(wil, fw_recovery_idle);
|
||||
set_bit(wil_status_fwready, wil->status);
|
||||
@ -424,6 +424,7 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
|
||||
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
|
||||
void *d, int len)
|
||||
{
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
if (wil->scan_request) {
|
||||
struct wmi_scan_complete_event *data = d;
|
||||
struct cfg80211_scan_info info = {
|
||||
@ -435,14 +436,13 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
|
||||
wil->scan_request, info.aborted);
|
||||
|
||||
del_timer_sync(&wil->scan_timer);
|
||||
mutex_lock(&wil->p2p_wdev_mutex);
|
||||
cfg80211_scan_done(wil->scan_request, &info);
|
||||
wil->radio_wdev = wil->wdev;
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
wil->scan_request = NULL;
|
||||
} else {
|
||||
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
|
||||
}
|
||||
mutex_unlock(&wil->p2p_wdev_mutex);
|
||||
}
|
||||
|
||||
static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
|
||||
|
@ -46,6 +46,16 @@ enum wmi_mid {
|
||||
MID_BROADCAST = 0xFF,
|
||||
};
|
||||
|
||||
/* FW capability IDs
|
||||
* Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
|
||||
* the host
|
||||
*/
|
||||
enum wmi_fw_capability {
|
||||
WMI_FW_CAPABILITY_FTM = 0,
|
||||
WMI_FW_CAPABILITY_PS_CONFIG = 1,
|
||||
WMI_FW_CAPABILITY_MAX,
|
||||
};
|
||||
|
||||
/* WMI_CMD_HDR */
|
||||
struct wmi_cmd_hdr {
|
||||
u8 mid;
|
||||
@ -120,6 +130,8 @@ enum wmi_command_id {
|
||||
WMI_BF_SM_MGMT_CMDID = 0x838,
|
||||
WMI_BF_RXSS_MGMT_CMDID = 0x839,
|
||||
WMI_BF_TRIG_CMDID = 0x83A,
|
||||
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
|
||||
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
|
||||
WMI_SET_SECTORS_CMDID = 0x849,
|
||||
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
|
||||
WMI_MAINTAIN_RESUME_CMDID = 0x851,
|
||||
@ -134,10 +146,15 @@ enum wmi_command_id {
|
||||
WMI_BF_CTRL_CMDID = 0x862,
|
||||
WMI_NOTIFY_REQ_CMDID = 0x863,
|
||||
WMI_GET_STATUS_CMDID = 0x864,
|
||||
WMI_GET_RF_STATUS_CMDID = 0x866,
|
||||
WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
|
||||
WMI_UNIT_TEST_CMDID = 0x900,
|
||||
WMI_HICCUP_CMDID = 0x901,
|
||||
WMI_FLASH_READ_CMDID = 0x902,
|
||||
WMI_FLASH_WRITE_CMDID = 0x903,
|
||||
/* Power management */
|
||||
WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
|
||||
WMI_TRAFFIC_RESUME_CMDID = 0x905,
|
||||
/* P2P */
|
||||
WMI_P2P_CFG_CMDID = 0x910,
|
||||
WMI_PORT_ALLOCATE_CMDID = 0x911,
|
||||
@ -150,6 +167,26 @@ enum wmi_command_id {
|
||||
WMI_PCP_START_CMDID = 0x918,
|
||||
WMI_PCP_STOP_CMDID = 0x919,
|
||||
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
|
||||
/* Power Save Configuration Commands */
|
||||
WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
|
||||
/* Not supported yet */
|
||||
WMI_PS_DEV_CFG_CMDID = 0x91D,
|
||||
/* Not supported yet */
|
||||
WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
|
||||
/* Per MAC Power Save Configuration commands
|
||||
* Not supported yet
|
||||
*/
|
||||
WMI_PS_MID_CFG_CMDID = 0x91F,
|
||||
/* Not supported yet */
|
||||
WMI_PS_MID_CFG_READ_CMDID = 0x920,
|
||||
WMI_RS_CFG_CMDID = 0x921,
|
||||
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
|
||||
WMI_AOA_MEAS_CMDID = 0x923,
|
||||
WMI_TOF_SESSION_START_CMDID = 0x991,
|
||||
WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
|
||||
WMI_TOF_SET_LCR_CMDID = 0x993,
|
||||
WMI_TOF_SET_LCI_CMDID = 0x994,
|
||||
WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
|
||||
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
|
||||
WMI_ABORT_SCAN_CMDID = 0xF007,
|
||||
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
|
||||
@ -291,9 +328,8 @@ enum wmi_scan_type {
|
||||
/* WMI_START_SCAN_CMDID */
|
||||
struct wmi_start_scan_cmd {
|
||||
u8 direct_scan_mac_addr[WMI_MAC_LEN];
|
||||
/* DMG Beacon frame is transmitted during active scanning */
|
||||
/* run scan with discovery beacon. Relevant for ACTIVE scan only. */
|
||||
u8 discovery_mode;
|
||||
/* reserved */
|
||||
u8 reserved;
|
||||
/* Max duration in the home channel(ms) */
|
||||
__le32 dwell_time;
|
||||
@ -453,6 +489,12 @@ struct wmi_port_delete_cmd {
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* WMI_TRAFFIC_DEFERRAL_CMDID */
|
||||
struct wmi_traffic_deferral_cmd {
|
||||
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
|
||||
u8 wakeup_trigger;
|
||||
} __packed;
|
||||
|
||||
/* WMI_P2P_CFG_CMDID */
|
||||
enum wmi_discovery_mode {
|
||||
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
|
||||
@ -818,6 +860,88 @@ struct wmi_pmc_cmd {
|
||||
__le64 mem_base;
|
||||
} __packed;
|
||||
|
||||
enum wmi_aoa_meas_type {
|
||||
WMI_AOA_PHASE_MEAS = 0x00,
|
||||
WMI_AOA_PHASE_AMP_MEAS = 0x01,
|
||||
};
|
||||
|
||||
/* WMI_AOA_MEAS_CMDID */
|
||||
struct wmi_aoa_meas_cmd {
|
||||
u8 mac_addr[WMI_MAC_LEN];
|
||||
/* channels IDs:
|
||||
* 0 - 58320 MHz
|
||||
* 1 - 60480 MHz
|
||||
* 2 - 62640 MHz
|
||||
*/
|
||||
u8 channel;
|
||||
/* enum wmi_aoa_meas_type */
|
||||
u8 aoa_meas_type;
|
||||
__le32 meas_rf_mask;
|
||||
} __packed;
|
||||
|
||||
enum wmi_tof_burst_duration {
|
||||
WMI_TOF_BURST_DURATION_250_USEC = 2,
|
||||
WMI_TOF_BURST_DURATION_500_USEC = 3,
|
||||
WMI_TOF_BURST_DURATION_1_MSEC = 4,
|
||||
WMI_TOF_BURST_DURATION_2_MSEC = 5,
|
||||
WMI_TOF_BURST_DURATION_4_MSEC = 6,
|
||||
WMI_TOF_BURST_DURATION_8_MSEC = 7,
|
||||
WMI_TOF_BURST_DURATION_16_MSEC = 8,
|
||||
WMI_TOF_BURST_DURATION_32_MSEC = 9,
|
||||
WMI_TOF_BURST_DURATION_64_MSEC = 10,
|
||||
WMI_TOF_BURST_DURATION_128_MSEC = 11,
|
||||
WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15,
|
||||
};
|
||||
|
||||
enum wmi_tof_session_start_flags {
|
||||
WMI_TOF_SESSION_START_FLAG_SECURED = 0x1,
|
||||
WMI_TOF_SESSION_START_FLAG_ASAP = 0x2,
|
||||
WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4,
|
||||
WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8,
|
||||
};
|
||||
|
||||
/* WMI_TOF_SESSION_START_CMDID */
|
||||
struct wmi_ftm_dest_info {
|
||||
u8 channel;
|
||||
/* wmi_tof_session_start_flags_e */
|
||||
u8 flags;
|
||||
u8 initial_token;
|
||||
u8 num_of_ftm_per_burst;
|
||||
u8 num_of_bursts_exp;
|
||||
/* wmi_tof_burst_duration_e */
|
||||
u8 burst_duration;
|
||||
/* Burst Period indicate interval between two consecutive burst
|
||||
* instances, in units of 100 ms
|
||||
*/
|
||||
__le16 burst_period;
|
||||
u8 dst_mac[WMI_MAC_LEN];
|
||||
__le16 reserved;
|
||||
} __packed;
|
||||
|
||||
/* WMI_TOF_SESSION_START_CMDID */
|
||||
struct wmi_tof_session_start_cmd {
|
||||
__le32 session_id;
|
||||
u8 num_of_aoa_measures;
|
||||
u8 aoa_type;
|
||||
__le16 num_of_dest;
|
||||
u8 reserved[4];
|
||||
struct wmi_ftm_dest_info ftm_dest_info[0];
|
||||
} __packed;
|
||||
|
||||
enum wmi_tof_channel_info_report_type {
|
||||
WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1,
|
||||
WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2,
|
||||
WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4,
|
||||
WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8,
|
||||
WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10,
|
||||
};
|
||||
|
||||
/* WMI_TOF_CHANNEL_INFO_CMDID */
|
||||
struct wmi_tof_channel_info_cmd {
|
||||
/* wmi_tof_channel_info_report_type_e */
|
||||
__le32 channel_info_report_request;
|
||||
} __packed;
|
||||
|
||||
/* WMI Events
|
||||
* List of Events (target to host)
|
||||
*/
|
||||
@ -868,6 +992,8 @@ enum wmi_event_id {
|
||||
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
|
||||
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
|
||||
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
|
||||
WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
|
||||
WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
|
||||
WMI_OTP_READ_RESULT_EVENTID = 0x1856,
|
||||
WMI_LED_CFG_DONE_EVENTID = 0x1858,
|
||||
/* Performance monitoring events */
|
||||
@ -877,9 +1003,14 @@ enum wmi_event_id {
|
||||
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
|
||||
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
|
||||
WMI_VRING_EN_EVENTID = 0x1865,
|
||||
WMI_GET_RF_STATUS_EVENTID = 0x1866,
|
||||
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
|
||||
WMI_UNIT_TEST_EVENTID = 0x1900,
|
||||
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
|
||||
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
|
||||
/* Power management */
|
||||
WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
|
||||
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
|
||||
/* P2P */
|
||||
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
|
||||
WMI_PORT_ALLOCATED_EVENTID = 0x1911,
|
||||
@ -891,6 +1022,25 @@ enum wmi_event_id {
|
||||
WMI_PCP_STARTED_EVENTID = 0x1918,
|
||||
WMI_PCP_STOPPED_EVENTID = 0x1919,
|
||||
WMI_PCP_FACTOR_EVENTID = 0x191A,
|
||||
/* Power Save Configuration Events */
|
||||
WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
|
||||
/* Not supported yet */
|
||||
WMI_PS_DEV_CFG_EVENTID = 0x191D,
|
||||
/* Not supported yet */
|
||||
WMI_PS_DEV_CFG_READ_EVENTID = 0x191E,
|
||||
/* Not supported yet */
|
||||
WMI_PS_MID_CFG_EVENTID = 0x191F,
|
||||
/* Not supported yet */
|
||||
WMI_PS_MID_CFG_READ_EVENTID = 0x1920,
|
||||
WMI_RS_CFG_DONE_EVENTID = 0x1921,
|
||||
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
|
||||
WMI_AOA_MEAS_EVENTID = 0x1923,
|
||||
WMI_TOF_SESSION_END_EVENTID = 0x1991,
|
||||
WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
|
||||
WMI_TOF_SET_LCR_EVENTID = 0x1993,
|
||||
WMI_TOF_SET_LCI_EVENTID = 0x1994,
|
||||
WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
|
||||
WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
|
||||
WMI_SET_CHANNEL_EVENTID = 0x9000,
|
||||
WMI_ASSOC_REQ_EVENTID = 0x9001,
|
||||
WMI_EAPOL_RX_EVENTID = 0x9002,
|
||||
@ -943,10 +1093,85 @@ struct wmi_get_status_done_event {
|
||||
|
||||
/* WMI_FW_VER_EVENTID */
|
||||
struct wmi_fw_ver_event {
|
||||
u8 major;
|
||||
u8 minor;
|
||||
__le16 subminor;
|
||||
__le16 build;
|
||||
/* FW image version */
|
||||
__le32 fw_major;
|
||||
__le32 fw_minor;
|
||||
__le32 fw_subminor;
|
||||
__le32 fw_build;
|
||||
/* FW image build time stamp */
|
||||
__le32 hour;
|
||||
__le32 minute;
|
||||
__le32 second;
|
||||
__le32 day;
|
||||
__le32 month;
|
||||
__le32 year;
|
||||
/* Boot Loader image version */
|
||||
__le32 bl_major;
|
||||
__le32 bl_minor;
|
||||
__le32 bl_subminor;
|
||||
__le32 bl_build;
|
||||
/* The number of entries in the FW capabilies array */
|
||||
u8 fw_capabilities_len;
|
||||
u8 reserved[3];
|
||||
/* FW capabilities info
|
||||
* Must be the last member of the struct
|
||||
*/
|
||||
__le32 fw_capabilities[0];
|
||||
} __packed;
|
||||
|
||||
/* WMI_GET_RF_STATUS_EVENTID */
|
||||
enum rf_type {
|
||||
RF_UNKNOWN = 0x00,
|
||||
RF_MARLON = 0x01,
|
||||
RF_SPARROW = 0x02,
|
||||
};
|
||||
|
||||
/* WMI_GET_RF_STATUS_EVENTID */
|
||||
enum board_file_rf_type {
|
||||
BF_RF_MARLON = 0x00,
|
||||
BF_RF_SPARROW = 0x01,
|
||||
};
|
||||
|
||||
/* WMI_GET_RF_STATUS_EVENTID */
|
||||
enum rf_status {
|
||||
RF_OK = 0x00,
|
||||
RF_NO_COMM = 0x01,
|
||||
RF_WRONG_BOARD_FILE = 0x02,
|
||||
};
|
||||
|
||||
/* WMI_GET_RF_STATUS_EVENTID */
|
||||
struct wmi_get_rf_status_event {
|
||||
/* enum rf_type */
|
||||
__le32 rf_type;
|
||||
/* attached RFs bit vector */
|
||||
__le32 attached_rf_vector;
|
||||
/* enabled RFs bit vector */
|
||||
__le32 enabled_rf_vector;
|
||||
/* enum rf_status, refers to enabled RFs */
|
||||
u8 rf_status[32];
|
||||
/* enum board file RF type */
|
||||
__le32 board_file_rf_type;
|
||||
/* board file platform type */
|
||||
__le32 board_file_platform_type;
|
||||
/* board file version */
|
||||
__le32 board_file_version;
|
||||
__le32 reserved[2];
|
||||
} __packed;
|
||||
|
||||
/* WMI_GET_BASEBAND_TYPE_EVENTID */
|
||||
enum baseband_type {
|
||||
BASEBAND_UNKNOWN = 0x00,
|
||||
BASEBAND_SPARROW_M_A0 = 0x03,
|
||||
BASEBAND_SPARROW_M_A1 = 0x04,
|
||||
BASEBAND_SPARROW_M_B0 = 0x05,
|
||||
BASEBAND_SPARROW_M_C0 = 0x06,
|
||||
BASEBAND_SPARROW_M_D0 = 0x07,
|
||||
};
|
||||
|
||||
/* WMI_GET_BASEBAND_TYPE_EVENTID */
|
||||
struct wmi_get_baseband_type_event {
|
||||
/* enum baseband_type */
|
||||
__le32 baseband_type;
|
||||
} __packed;
|
||||
|
||||
/* WMI_MAC_ADDR_RESP_EVENTID */
|
||||
@ -1410,4 +1635,553 @@ struct wmi_led_cfg_done_event {
|
||||
__le32 status;
|
||||
} __packed;
|
||||
|
||||
#define WMI_NUM_MCS (13)
|
||||
|
||||
/* Rate search parameters configuration per connection */
|
||||
struct wmi_rs_cfg {
|
||||
/* The maximal allowed PER for each MCS
|
||||
* MCS will be considered as failed if PER during RS is higher
|
||||
*/
|
||||
u8 per_threshold[WMI_NUM_MCS];
|
||||
/* Number of MPDUs for each MCS
|
||||
* this is the minimal statistic required to make an educated
|
||||
* decision
|
||||
*/
|
||||
u8 min_frame_cnt[WMI_NUM_MCS];
|
||||
/* stop threshold [0-100] */
|
||||
u8 stop_th;
|
||||
/* MCS1 stop threshold [0-100] */
|
||||
u8 mcs1_fail_th;
|
||||
u8 max_back_failure_th;
|
||||
/* Debug feature for disabling internal RS trigger (which is
|
||||
* currently triggered by BF Done)
|
||||
*/
|
||||
u8 dbg_disable_internal_trigger;
|
||||
__le32 back_failure_mask;
|
||||
__le32 mcs_en_vec;
|
||||
} __packed;
|
||||
|
||||
/* WMI_RS_CFG_CMDID */
|
||||
struct wmi_rs_cfg_cmd {
|
||||
/* connection id */
|
||||
u8 cid;
|
||||
/* enable or disable rate search */
|
||||
u8 rs_enable;
|
||||
/* rate search configuration */
|
||||
struct wmi_rs_cfg rs_cfg;
|
||||
} __packed;
|
||||
|
||||
/* WMI_RS_CFG_DONE_EVENTID */
|
||||
struct wmi_rs_cfg_done_event {
|
||||
u8 cid;
|
||||
/* enum wmi_fw_status */
|
||||
u8 status;
|
||||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
/* WMI_GET_DETAILED_RS_RES_CMDID */
|
||||
struct wmi_get_detailed_rs_res_cmd {
|
||||
/* connection id */
|
||||
u8 cid;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* RS results status */
|
||||
enum wmi_rs_results_status {
|
||||
WMI_RS_RES_VALID = 0x00,
|
||||
WMI_RS_RES_INVALID = 0x01,
|
||||
};
|
||||
|
||||
/* Rate search results */
|
||||
struct wmi_rs_results {
|
||||
/* number of sent MPDUs */
|
||||
u8 num_of_tx_pkt[WMI_NUM_MCS];
|
||||
/* number of non-acked MPDUs */
|
||||
u8 num_of_non_acked_pkt[WMI_NUM_MCS];
|
||||
/* RS timestamp */
|
||||
__le32 tsf;
|
||||
/* RS selected MCS */
|
||||
u8 mcs;
|
||||
} __packed;
|
||||
|
||||
/* WMI_GET_DETAILED_RS_RES_EVENTID */
|
||||
struct wmi_get_detailed_rs_res_event {
|
||||
u8 cid;
|
||||
/* enum wmi_rs_results_status */
|
||||
u8 status;
|
||||
/* detailed rs results */
|
||||
struct wmi_rs_results rs_results;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* broadcast connection ID */
|
||||
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
|
||||
|
||||
/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
|
||||
enum wmi_link_maintain_cfg_type {
|
||||
/* AP/PCP default normal (non-FST) configuration settings */
|
||||
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00,
|
||||
/* AP/PCP default FST configuration settings */
|
||||
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01,
|
||||
/* STA default normal (non-FST) configuration settings */
|
||||
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02,
|
||||
/* STA default FST configuration settings */
|
||||
WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03,
|
||||
/* custom configuration settings */
|
||||
WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04,
|
||||
/* number of defined configuration types */
|
||||
WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05,
|
||||
};
|
||||
|
||||
/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
|
||||
enum wmi_link_maintain_cfg_response_status {
|
||||
/* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
|
||||
*/
|
||||
WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00,
|
||||
/* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
|
||||
* command request
|
||||
*/
|
||||
WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01,
|
||||
};
|
||||
|
||||
/* Link Loss and Keep Alive configuration */
|
||||
struct wmi_link_maintain_cfg {
|
||||
/* link_loss_enable_detectors_vec */
|
||||
__le32 link_loss_enable_detectors_vec;
|
||||
/* detectors check period usec */
|
||||
__le32 check_link_loss_period_usec;
|
||||
/* max allowed tx ageing */
|
||||
__le32 tx_ageing_threshold_usec;
|
||||
/* keep alive period for high SNR */
|
||||
__le32 keep_alive_period_usec_high_snr;
|
||||
/* keep alive period for low SNR */
|
||||
__le32 keep_alive_period_usec_low_snr;
|
||||
/* lower snr limit for keep alive period update */
|
||||
__le32 keep_alive_snr_threshold_low_db;
|
||||
/* upper snr limit for keep alive period update */
|
||||
__le32 keep_alive_snr_threshold_high_db;
|
||||
/* num of successive bad bcons causing link-loss */
|
||||
__le32 bad_beacons_num_threshold;
|
||||
/* SNR limit for bad_beacons_detector */
|
||||
__le32 bad_beacons_snr_threshold_db;
|
||||
} __packed;
|
||||
|
||||
/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
|
||||
struct wmi_link_maintain_cfg_write_cmd {
|
||||
/* enum wmi_link_maintain_cfg_type_e - type of requested default
|
||||
* configuration to be applied
|
||||
*/
|
||||
__le32 cfg_type;
|
||||
/* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
|
||||
__le32 cid;
|
||||
/* custom configuration settings to be applied (relevant only if
|
||||
* cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
|
||||
*/
|
||||
struct wmi_link_maintain_cfg lm_cfg;
|
||||
} __packed;
|
||||
|
||||
/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
|
||||
struct wmi_link_maintain_cfg_read_cmd {
|
||||
/* connection ID which configuration settings are requested */
|
||||
__le32 cid;
|
||||
} __packed;
|
||||
|
||||
/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
|
||||
struct wmi_link_maintain_cfg_write_done_event {
|
||||
/* requested connection ID */
|
||||
__le32 cid;
|
||||
/* wmi_link_maintain_cfg_response_status_e - write status */
|
||||
__le32 status;
|
||||
} __packed;
|
||||
|
||||
/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
|
||||
struct wmi_link_maintain_cfg_read_done_event {
|
||||
/* requested connection ID */
|
||||
__le32 cid;
|
||||
/* wmi_link_maintain_cfg_response_status_e - read status */
|
||||
__le32 status;
|
||||
/* Retrieved configuration settings */
|
||||
struct wmi_link_maintain_cfg lm_cfg;
|
||||
} __packed;
|
||||
|
||||
enum wmi_traffic_deferral_status {
|
||||
WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
|
||||
WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
|
||||
};
|
||||
|
||||
/* WMI_TRAFFIC_DEFERRAL_EVENTID */
|
||||
struct wmi_traffic_deferral_event {
|
||||
/* enum wmi_traffic_deferral_status_e */
|
||||
u8 status;
|
||||
} __packed;
|
||||
|
||||
enum wmi_traffic_resume_status {
|
||||
WMI_TRAFFIC_RESUME_SUCCESS = 0x0,
|
||||
WMI_TRAFFIC_RESUME_FAILED = 0x1,
|
||||
};
|
||||
|
||||
/* WMI_TRAFFIC_RESUME_EVENTID */
|
||||
struct wmi_traffic_resume_event {
|
||||
/* enum wmi_traffic_resume_status_e */
|
||||
u8 status;
|
||||
} __packed;
|
||||
|
||||
/* Power Save command completion status codes */
|
||||
enum wmi_ps_cfg_cmd_status {
|
||||
WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00,
|
||||
WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
|
||||
/* other error */
|
||||
WMI_PS_CFG_CMD_STATUS_ERROR = 0x02,
|
||||
};
|
||||
|
||||
/* Device Power Save Profiles */
|
||||
enum wmi_ps_profile_type {
|
||||
WMI_PS_PROFILE_TYPE_DEFAULT = 0x00,
|
||||
WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01,
|
||||
WMI_PS_PROFILE_TYPE_MAX_PS = 0x02,
|
||||
WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03,
|
||||
};
|
||||
|
||||
/* WMI_PS_DEV_PROFILE_CFG_CMDID
|
||||
*
|
||||
* Power save profile to be used by the device
|
||||
*
|
||||
* Returned event:
|
||||
* - WMI_PS_DEV_PROFILE_CFG_EVENTID
|
||||
*/
|
||||
struct wmi_ps_dev_profile_cfg_cmd {
|
||||
/* wmi_ps_profile_type_e */
|
||||
u8 ps_profile;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
|
||||
struct wmi_ps_dev_profile_cfg_event {
|
||||
/* wmi_ps_cfg_cmd_status_e */
|
||||
__le32 status;
|
||||
} __packed;
|
||||
|
||||
enum wmi_ps_level {
|
||||
WMI_PS_LEVEL_DEEP_SLEEP = 0x00,
|
||||
WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01,
|
||||
/* awake = all PS mechanisms are disabled */
|
||||
WMI_PS_LEVEL_AWAKE = 0x02,
|
||||
};
|
||||
|
||||
enum wmi_ps_deep_sleep_clk_level {
|
||||
/* 33k */
|
||||
WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00,
|
||||
/* 10k */
|
||||
WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01,
|
||||
/* @RTC Low latency */
|
||||
WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02,
|
||||
WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03,
|
||||
WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04,
|
||||
/* Not Applicable */
|
||||
WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF,
|
||||
};
|
||||
|
||||
/* Response by the FW to a D3 entry request */
|
||||
enum wmi_ps_d3_resp_policy {
|
||||
WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00,
|
||||
/* debug -D3 req is always denied */
|
||||
WMI_PS_D3_RESP_POLICY_DENIED = 0x01,
|
||||
/* debug -D3 req is always approved */
|
||||
WMI_PS_D3_RESP_POLICY_APPROVED = 0x02,
|
||||
};
|
||||
|
||||
/* Device common power save configurations */
|
||||
struct wmi_ps_dev_cfg {
|
||||
/* lowest level of PS allowed while unassociated, enum wmi_ps_level_e
|
||||
*/
|
||||
u8 ps_unassoc_min_level;
|
||||
/* lowest deep sleep clock level while nonassoc, enum
|
||||
* wmi_ps_deep_sleep_clk_level_e
|
||||
*/
|
||||
u8 ps_unassoc_deep_sleep_min_level;
|
||||
/* lowest level of PS allowed while associated, enum wmi_ps_level_e */
|
||||
u8 ps_assoc_min_level;
|
||||
/* lowest deep sleep clock level while assoc, enum
|
||||
* wmi_ps_deep_sleep_clk_level_e
|
||||
*/
|
||||
u8 ps_assoc_deep_sleep_min_level;
|
||||
/* enum wmi_ps_deep_sleep_clk_level_e */
|
||||
u8 ps_assoc_low_latency_ds_min_level;
|
||||
/* enum wmi_ps_d3_resp_policy_e */
|
||||
u8 ps_D3_response_policy;
|
||||
/* BOOL */
|
||||
u8 ps_D3_pm_pme_enabled;
|
||||
/* BOOL */
|
||||
u8 ps_halp_enable;
|
||||
u8 ps_deep_sleep_enter_thresh_msec;
|
||||
/* BOOL */
|
||||
u8 ps_voltage_scaling_en;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_DEV_CFG_CMDID
|
||||
*
|
||||
* Configure common Power Save parameters of the device and all MIDs.
|
||||
*
|
||||
* Returned event:
|
||||
* - WMI_PS_DEV_CFG_EVENTID
|
||||
*/
|
||||
struct wmi_ps_dev_cfg_cmd {
|
||||
/* Device Power Save configuration to be applied */
|
||||
struct wmi_ps_dev_cfg ps_dev_cfg;
|
||||
/* alignment to 32b */
|
||||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_DEV_CFG_EVENTID */
|
||||
struct wmi_ps_dev_cfg_event {
|
||||
/* wmi_ps_cfg_cmd_status_e */
|
||||
__le32 status;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_DEV_CFG_READ_CMDID
|
||||
*
|
||||
* request to retrieve device Power Save configuration
|
||||
* (WMI_PS_DEV_CFG_CMD params)
|
||||
*
|
||||
* Returned event:
|
||||
* - WMI_PS_DEV_CFG_READ_EVENTID
|
||||
*/
|
||||
struct wmi_ps_dev_cfg_read_cmd {
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_DEV_CFG_READ_EVENTID */
|
||||
struct wmi_ps_dev_cfg_read_event {
|
||||
/* wmi_ps_cfg_cmd_status_e */
|
||||
__le32 status;
|
||||
/* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD
|
||||
* params)
|
||||
*/
|
||||
struct wmi_ps_dev_cfg dev_ps_cfg;
|
||||
/* alignment to 32b */
|
||||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
/* Per Mac Power Save configurations */
|
||||
struct wmi_ps_mid_cfg {
|
||||
/* Low power RX in BTI is enabled, BOOL */
|
||||
u8 beacon_lprx_enable;
|
||||
/* Sync to sector ID enabled, BOOL */
|
||||
u8 beacon_sync_to_sectorId_enable;
|
||||
/* Low power RX in DTI is enabled, BOOL */
|
||||
u8 frame_exchange_lprx_enable;
|
||||
/* Sleep Cycle while in scheduled PS, 1-31 */
|
||||
u8 scheduled_sleep_cycle_pow2;
|
||||
/* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */
|
||||
u8 scheduled_num_of_awake_bis;
|
||||
u8 am_to_traffic_load_thresh_mbp;
|
||||
u8 traffic_to_am_load_thresh_mbps;
|
||||
u8 traffic_to_am_num_of_no_traffic_bis;
|
||||
/* BOOL */
|
||||
u8 continuous_traffic_psm;
|
||||
__le16 no_traffic_to_min_usec;
|
||||
__le16 no_traffic_to_max_usec;
|
||||
__le16 snoozing_sleep_interval_milisec;
|
||||
u8 max_no_data_awake_events;
|
||||
/* Trigger WEB after k failed beacons */
|
||||
u8 num_of_failed_beacons_rx_to_trigger_web;
|
||||
/* Trigger BF after k failed beacons */
|
||||
u8 num_of_failed_beacons_rx_to_trigger_bf;
|
||||
/* Trigger SOB after k successful beacons */
|
||||
u8 num_of_successful_beacons_rx_to_trigger_sob;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_MID_CFG_CMDID
|
||||
*
|
||||
* Configure Power Save parameters of a specific MID.
|
||||
* These parameters are relevant for the specific BSS this MID belongs to.
|
||||
*
|
||||
* Returned event:
|
||||
* - WMI_PS_MID_CFG_EVENTID
|
||||
*/
|
||||
struct wmi_ps_mid_cfg_cmd {
|
||||
/* MAC ID */
|
||||
u8 mid;
|
||||
/* mid PS configuration to be applied */
|
||||
struct wmi_ps_mid_cfg ps_mid_cfg;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_MID_CFG_EVENTID */
|
||||
struct wmi_ps_mid_cfg_event {
|
||||
/* MAC ID */
|
||||
u8 mid;
|
||||
/* alignment to 32b */
|
||||
u8 reserved[3];
|
||||
/* wmi_ps_cfg_cmd_status_e */
|
||||
__le32 status;
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_MID_CFG_READ_CMDID
|
||||
*
|
||||
* request to retrieve Power Save configuration of mid
|
||||
* (WMI_PS_MID_CFG_CMD params)
|
||||
*
|
||||
* Returned event:
|
||||
* - WMI_PS_MID_CFG_READ_EVENTID
|
||||
*/
|
||||
struct wmi_ps_mid_cfg_read_cmd {
|
||||
/* MAC ID */
|
||||
u8 mid;
|
||||
/* alignment to 32b */
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* WMI_PS_MID_CFG_READ_EVENTID */
|
||||
struct wmi_ps_mid_cfg_read_event {
|
||||
/* MAC ID */
|
||||
u8 mid;
|
||||
/* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */
|
||||
struct wmi_ps_mid_cfg mid_ps_cfg;
|
||||
/* wmi_ps_cfg_cmd_status_e */
|
||||
__le32 status;
|
||||
} __packed;
|
||||
|
||||
#define WMI_AOA_MAX_DATA_SIZE (128)
|
||||
|
||||
enum wmi_aoa_meas_status {
|
||||
WMI_AOA_MEAS_SUCCESS = 0x00,
|
||||
WMI_AOA_MEAS_PEER_INCAPABLE = 0x01,
|
||||
WMI_AOA_MEAS_FAILURE = 0x02,
|
||||
};
|
||||
|
||||
/* WMI_AOA_MEAS_EVENTID */
|
||||
struct wmi_aoa_meas_event {
|
||||
u8 mac_addr[WMI_MAC_LEN];
|
||||
/* channels IDs:
|
||||
* 0 - 58320 MHz
|
||||
* 1 - 60480 MHz
|
||||
* 2 - 62640 MHz
|
||||
*/
|
||||
u8 channel;
|
||||
/* enum wmi_aoa_meas_type */
|
||||
u8 aoa_meas_type;
|
||||
/* Measurments are from RFs, defined by the mask */
|
||||
__le32 meas_rf_mask;
|
||||
/* enum wmi_aoa_meas_status */
|
||||
u8 meas_status;
|
||||
u8 reserved;
|
||||
/* Length of meas_data in bytes */
|
||||
__le16 length;
|
||||
u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
|
||||
} __packed;
|
||||
|
||||
/* WMI_TOF_GET_CAPABILITIES_EVENTID */
|
||||
struct wmi_tof_get_capabilities_event {
|
||||
u8 ftm_capability;
|
||||
/* maximum supported number of destination to start TOF */
|
||||
u8 max_num_of_dest;
|
||||
/* maximum supported number of measurements per burst */
|
||||
u8 max_num_of_meas_per_burst;
|
||||
u8 reserved;
|
||||
/* maximum supported multi bursts */
|
||||
__le16 max_multi_bursts_sessions;
|
||||
/* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
|
||||
__le16 max_ftm_burst_duration;
|
||||
/* AOA supported types */
|
||||
__le32 aoa_supported_types;
|
||||
} __packed;
|
||||
|
||||
enum wmi_tof_session_end_status {
|
||||
WMI_TOF_SESSION_END_NO_ERROR = 0x00,
|
||||
WMI_TOF_SESSION_END_FAIL = 0x01,
|
||||
WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02,
|
||||
WMI_TOF_SESSION_END_ABORTED = 0x03,
|
||||
};
|
||||
|
||||
/* WMI_TOF_SESSION_END_EVENTID */
|
||||
struct wmi_tof_session_end_event {
|
||||
/* FTM session ID */
|
||||
__le32 session_id;
|
||||
/* wmi_tof_session_end_status_e */
|
||||
u8 status;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* Responder FTM Results */
|
||||
struct wmi_responder_ftm_res {
|
||||
u8 t1[6];
|
||||
u8 t2[6];
|
||||
u8 t3[6];
|
||||
u8 t4[6];
|
||||
__le16 tod_err;
|
||||
__le16 toa_err;
|
||||
__le16 tod_err_initiator;
|
||||
__le16 toa_err_initiator;
|
||||
} __packed;
|
||||
|
||||
enum wmi_tof_ftm_per_dest_res_status {
|
||||
WMI_PER_DEST_RES_NO_ERROR = 0x00,
|
||||
WMI_PER_DEST_RES_TX_RX_FAIL = 0x01,
|
||||
WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02,
|
||||
};
|
||||
|
||||
enum wmi_tof_ftm_per_dest_res_flags {
|
||||
WMI_PER_DEST_RES_REQ_START = 0x01,
|
||||
WMI_PER_DEST_RES_BURST_REPORT_END = 0x02,
|
||||
WMI_PER_DEST_RES_REQ_END = 0x04,
|
||||
WMI_PER_DEST_RES_PARAM_UPDATE = 0x08,
|
||||
};
|
||||
|
||||
/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
|
||||
struct wmi_tof_ftm_per_dest_res_event {
|
||||
/* FTM session ID */
|
||||
__le32 session_id;
|
||||
/* destination MAC address */
|
||||
u8 dst_mac[WMI_MAC_LEN];
|
||||
/* wmi_tof_ftm_per_dest_res_flags_e */
|
||||
u8 flags;
|
||||
/* wmi_tof_ftm_per_dest_res_status_e */
|
||||
u8 status;
|
||||
/* responder ASAP */
|
||||
u8 responder_asap;
|
||||
/* responder number of FTM per burst */
|
||||
u8 responder_num_ftm_per_burst;
|
||||
/* responder number of FTM burst exponent */
|
||||
u8 responder_num_ftm_bursts_exp;
|
||||
/* responder burst duration ,wmi_tof_burst_duration_e */
|
||||
u8 responder_burst_duration;
|
||||
/* responder burst period, indicate interval between two consecutive
|
||||
* burst instances, in units of 100 ms
|
||||
*/
|
||||
__le16 responder_burst_period;
|
||||
/* receive burst counter */
|
||||
__le16 bursts_cnt;
|
||||
/* tsf of responder start burst */
|
||||
__le32 tsf_sync;
|
||||
/* actual received ftm per burst */
|
||||
u8 actual_ftm_per_burst;
|
||||
u8 reserved0[7];
|
||||
struct wmi_responder_ftm_res responder_ftm_res[0];
|
||||
} __packed;
|
||||
|
||||
enum wmi_tof_channel_info_type {
|
||||
WMI_TOF_CHANNEL_INFO_AOA = 0x00,
|
||||
WMI_TOF_CHANNEL_INFO_LCI = 0x01,
|
||||
WMI_TOF_CHANNEL_INFO_LCR = 0x02,
|
||||
WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03,
|
||||
WMI_TOF_CHANNEL_INFO_CIR = 0x04,
|
||||
WMI_TOF_CHANNEL_INFO_RSSI = 0x05,
|
||||
WMI_TOF_CHANNEL_INFO_SNR = 0x06,
|
||||
WMI_TOF_CHANNEL_INFO_DEBUG = 0x07,
|
||||
};
|
||||
|
||||
/* WMI_TOF_CHANNEL_INFO_EVENTID */
|
||||
struct wmi_tof_channel_info_event {
|
||||
/* FTM session ID */
|
||||
__le32 session_id;
|
||||
/* destination MAC address */
|
||||
u8 dst_mac[WMI_MAC_LEN];
|
||||
/* wmi_tof_channel_info_type_e */
|
||||
u8 type;
|
||||
/* data report length */
|
||||
u8 len;
|
||||
/* data report payload */
|
||||
u8 report[0];
|
||||
} __packed;
|
||||
|
||||
#endif /* __WILOCITY_WMI_H__ */
|
||||
|
@ -1101,6 +1101,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
|
||||
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
|
||||
|
@ -3884,11 +3884,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
|
||||
if (!check_vif_up(ifp->vif))
|
||||
return -EIO;
|
||||
|
||||
brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid);
|
||||
brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid);
|
||||
|
||||
npmk = le32_to_cpu(cfg->pmk_list.npmk);
|
||||
for (i = 0; i < npmk; i++)
|
||||
if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN))
|
||||
if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
|
||||
break;
|
||||
|
||||
if ((npmk > 0) && (i < npmk)) {
|
||||
|
@ -313,6 +313,7 @@ struct rte_console {
|
||||
|
||||
#define KSO_WAIT_US 50
|
||||
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
|
||||
#define BRCMF_SDIO_MAX_ACCESS_ERRORS 5
|
||||
|
||||
/*
|
||||
* Conversion of 802.1D priority to precedence level
|
||||
@ -677,6 +678,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
|
||||
{
|
||||
u8 wr_val = 0, rd_val, cmp_val, bmask;
|
||||
int err = 0;
|
||||
int err_cnt = 0;
|
||||
int try_cnt = 0;
|
||||
|
||||
brcmf_dbg(TRACE, "Enter: on=%d\n", on);
|
||||
@ -712,9 +714,14 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
|
||||
*/
|
||||
rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
|
||||
&err);
|
||||
if (((rd_val & bmask) == cmp_val) && !err)
|
||||
if (!err) {
|
||||
if ((rd_val & bmask) == cmp_val)
|
||||
break;
|
||||
err_cnt = 0;
|
||||
}
|
||||
/* bail out upon subsequent access errors */
|
||||
if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
|
||||
break;
|
||||
|
||||
udelay(KSO_WAIT_US);
|
||||
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
|
||||
wr_val, &err);
|
||||
@ -3757,7 +3764,8 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
|
||||
u32 val, rev;
|
||||
|
||||
val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
|
||||
if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
|
||||
if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
|
||||
sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
|
||||
addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
|
||||
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
|
||||
if (rev >= 2) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
#ifndef __CHECKER__
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "tracepoint.h"
|
||||
#include "debug.h"
|
||||
|
||||
void __brcmf_err(const char *func, const char *fmt, ...)
|
||||
{
|
||||
|
@ -1458,11 +1458,15 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf)
|
||||
#define BRCMF_USB_DEVICE(dev_id) \
|
||||
{ USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
|
||||
|
||||
#define LINKSYS_USB_DEVICE(dev_id) \
|
||||
{ USB_DEVICE(BRCM_USB_VENDOR_ID_LINKSYS, dev_id) }
|
||||
|
||||
static struct usb_device_id brcmf_usb_devid_table[] = {
|
||||
BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
|
||||
BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
|
||||
BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
|
||||
BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
|
||||
LINKSYS_USB_DEVICE(BRCM_USB_43235_LINKSYS_DEVICE_ID),
|
||||
{ USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) },
|
||||
/* special entry for device with firmware loaded and running */
|
||||
BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c
|
||||
#define BRCM_USB_VENDOR_ID_LG 0x043e
|
||||
#define BRCM_USB_VENDOR_ID_LINKSYS 0x13b1
|
||||
#define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
|
||||
|
||||
/* Chipcommon Core Chip IDs */
|
||||
@ -58,6 +59,7 @@
|
||||
|
||||
/* USB Device IDs */
|
||||
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
|
||||
#define BRCM_USB_43235_LINKSYS_DEVICE_ID 0x0039
|
||||
#define BRCM_USB_43236_DEVICE_ID 0xbd17
|
||||
#define BRCM_USB_43242_DEVICE_ID 0xbd1f
|
||||
#define BRCM_USB_43242_LG_DEVICE_ID 0x3101
|
||||
|
@ -2671,7 +2671,7 @@ const struct il_ops il3945_ops = {
|
||||
.send_led_cmd = il3945_send_led_cmd,
|
||||
};
|
||||
|
||||
static struct il_cfg il3945_bg_cfg = {
|
||||
static const struct il_cfg il3945_bg_cfg = {
|
||||
.name = "3945BG",
|
||||
.fw_name_pre = IL3945_FW_PRE,
|
||||
.ucode_api_max = IL3945_UCODE_API_MAX,
|
||||
@ -2700,7 +2700,7 @@ static struct il_cfg il3945_bg_cfg = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct il_cfg il3945_abg_cfg = {
|
||||
static const struct il_cfg il3945_abg_cfg = {
|
||||
.name = "3945ABG",
|
||||
.fw_name_pre = IL3945_FW_PRE,
|
||||
.ucode_api_max = IL3945_UCODE_API_MAX,
|
||||
|
@ -72,15 +72,15 @@
|
||||
#define IWL9000_SMEM_OFFSET 0x400000
|
||||
#define IWL9000_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
|
||||
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
|
||||
#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-"
|
||||
#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
|
||||
#define IWL9000_MODULE_FIRMWARE(api) \
|
||||
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260_MODULE_FIRMWARE(api) \
|
||||
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260LC_MODULE_FIRMWARE(api) \
|
||||
IWL9260LC_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9000LC_MODULE_FIRMWARE(api) \
|
||||
IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
|
||||
|
||||
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
|
||||
|
||||
@ -146,6 +146,16 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
||||
.mac_addr_from_csr = true, \
|
||||
.rf_id = true
|
||||
|
||||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9160",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
@ -156,13 +166,9 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO the struct below is for internal testing only this should be
|
||||
* removed by EO 2016~
|
||||
*/
|
||||
const struct iwl_cfg iwl9260lc_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260LC_FW_PRE,
|
||||
const struct iwl_cfg iwl9270_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9270",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
@ -170,8 +176,8 @@ const struct iwl_cfg iwl9260lc_2ac_cfg = {
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl5165_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 5165",
|
||||
const struct iwl_cfg iwl9460_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
@ -181,6 +187,21 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
|
||||
.integrated = true,
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO the struct below is for internal testing only this should be
|
||||
* removed by EO 2016~
|
||||
*/
|
||||
const struct iwl_cfg iwl9000lc_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9000",
|
||||
.fw_name_pre = IWL9000LC_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.integrated = true,
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
|
@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl9000lc_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9160_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260lc_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl5165_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9270_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9460_2ac_cfg;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg;
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
|
@ -77,7 +77,6 @@
|
||||
*/
|
||||
#define FH_MEM_LOWER_BOUND (0x1000)
|
||||
#define FH_MEM_UPPER_BOUND (0x2000)
|
||||
#define TFH_MEM_LOWER_BOUND (0xA06000)
|
||||
|
||||
/**
|
||||
* Keep-Warm (KW) buffer base address.
|
||||
@ -120,7 +119,7 @@
|
||||
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
|
||||
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
|
||||
/* a000 TFD table address, 64 bit */
|
||||
#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00)
|
||||
#define TFH_TFDQ_CBB_TABLE (0x1C00)
|
||||
|
||||
/* Find TFD CB base pointer for given queue */
|
||||
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
|
||||
@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
|
||||
* In case of DRAM read address which is not aligned to 128B, the TFH will
|
||||
* enable transfer size which doesn't cross 64B DRAM address boundary.
|
||||
*/
|
||||
#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40)
|
||||
#define TFH_TRANSFER_MODE (0x1F40)
|
||||
#define TFH_TRANSFER_MAX_PENDING_REQ 0xc
|
||||
#define TFH_CHUNK_SIZE_128 BIT(8)
|
||||
#define TFH_CHUNK_SPLIT_MODE BIT(10)
|
||||
@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
|
||||
* the start of the TFD first TB.
|
||||
* In case of a DRAM Tx CMD update the TFH will update PN and Key ID
|
||||
*/
|
||||
#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48)
|
||||
#define TFH_TXCMD_UPDATE_CFG (0x1F48)
|
||||
/*
|
||||
* Controls TX DMA operation
|
||||
*
|
||||
@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
|
||||
* set to 1 - interrupt is sent to the driver
|
||||
* Bit 0: Indicates the snoop configuration
|
||||
*/
|
||||
#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60)
|
||||
#define TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
|
||||
#define TFH_SRV_DMA_SNOOP BIT(0)
|
||||
#define TFH_SRV_DMA_TO_DRIVER BIT(24)
|
||||
#define TFH_SRV_DMA_START BIT(31)
|
||||
|
||||
/* Defines the DMA SRAM write start address to transfer a data block */
|
||||
#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64)
|
||||
#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
|
||||
|
||||
/* Defines the 64bits DRAM start address to read the DMA data block from */
|
||||
#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68)
|
||||
#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
|
||||
|
||||
/*
|
||||
* Defines the number of bytes to transfer from DRAM to SRAM.
|
||||
* Note that this register may be configured with non-dword aligned size.
|
||||
*/
|
||||
#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70)
|
||||
#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
|
||||
|
||||
/**
|
||||
* Rx SRAM Control and Status Registers (RSCSR)
|
||||
|
@ -302,22 +302,17 @@
|
||||
#define OSC_CLK_FORCE_CONTROL (0x8)
|
||||
|
||||
#define FH_UCODE_LOAD_STATUS (0x1AF0)
|
||||
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
|
||||
enum secure_load_status_reg {
|
||||
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
|
||||
LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
|
||||
LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
|
||||
LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
|
||||
LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
|
||||
};
|
||||
|
||||
#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
|
||||
#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
|
||||
/*
|
||||
* Replacing FH_UCODE_LOAD_STATUS
|
||||
* This register is writen by driver and is read by uCode during boot flow.
|
||||
* Note this address is cleared after MAC reset.
|
||||
*/
|
||||
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
|
||||
|
||||
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
|
||||
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
|
||||
|
||||
#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
|
||||
#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
|
||||
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
|
||||
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
|
||||
|
||||
|
@ -432,26 +432,43 @@ struct iwl_mvm_rm_sta_cmd {
|
||||
u8 reserved[3];
|
||||
} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_mgmt_mcast_key_cmd_v1
|
||||
* ( MGMT_MCAST_KEY = 0x1f )
|
||||
* @ctrl_flags: %iwl_sta_key_flag
|
||||
* @igtk:
|
||||
* @k1: unused
|
||||
* @k2: unused
|
||||
* @sta_id: station ID that support IGTK
|
||||
* @key_id:
|
||||
* @receive_seq_cnt: initial RSC/PN needed for replay check
|
||||
*/
|
||||
struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
|
||||
__le32 ctrl_flags;
|
||||
u8 igtk[16];
|
||||
u8 k1[16];
|
||||
u8 k2[16];
|
||||
__le32 key_id;
|
||||
__le32 sta_id;
|
||||
__le64 receive_seq_cnt;
|
||||
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_mgmt_mcast_key_cmd
|
||||
* ( MGMT_MCAST_KEY = 0x1f )
|
||||
* @ctrl_flags: %iwl_sta_key_flag
|
||||
* @IGTK:
|
||||
* @K1: unused
|
||||
* @K2: unused
|
||||
* @igtk: IGTK master key
|
||||
* @sta_id: station ID that support IGTK
|
||||
* @key_id:
|
||||
* @receive_seq_cnt: initial RSC/PN needed for replay check
|
||||
*/
|
||||
struct iwl_mvm_mgmt_mcast_key_cmd {
|
||||
__le32 ctrl_flags;
|
||||
u8 IGTK[16];
|
||||
u8 K1[16];
|
||||
u8 K2[16];
|
||||
u8 igtk[32];
|
||||
__le32 key_id;
|
||||
__le32 sta_id;
|
||||
__le64 receive_seq_cnt;
|
||||
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
|
||||
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
|
||||
|
||||
struct iwl_mvm_wep_key {
|
||||
u8 key_index;
|
||||
|
@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
|
||||
tx_resp->frame_count) & 0xfff;
|
||||
}
|
||||
|
||||
/* Available options for the SCD_QUEUE_CFG HCMD */
|
||||
enum iwl_scd_cfg_actions {
|
||||
SCD_CFG_DISABLE_QUEUE = 0x0,
|
||||
SCD_CFG_ENABLE_QUEUE = 0x1,
|
||||
SCD_CFG_UPDATE_QUEUE_TID = 0x2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
|
||||
* @token:
|
||||
* @sta_id: station id
|
||||
* @tid:
|
||||
* @scd_queue: scheduler queue to confiug
|
||||
* @enable: 1 queue enable, 0 queue disable
|
||||
* @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
|
||||
* Value is one of %iwl_scd_cfg_actions options
|
||||
* @aggregate: 1 aggregated queue, 0 otherwise
|
||||
* @tx_fifo: %enum iwl_mvm_tx_fifo
|
||||
* @window: BA window size
|
||||
@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd {
|
||||
u8 sta_id;
|
||||
u8 tid;
|
||||
u8 scd_queue;
|
||||
u8 enable;
|
||||
u8 action;
|
||||
u8 aggregate;
|
||||
u8 tx_fifo;
|
||||
u8 window;
|
||||
|
@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd {
|
||||
* @block_size: the block size in powers of 2
|
||||
* @block_num: number of blocks specified in the command.
|
||||
* @device_phy_addr: virtual addresses from device side
|
||||
* 32 bit address for API version 1, 64 bit address for API version 2.
|
||||
*/
|
||||
struct iwl_fw_paging_cmd {
|
||||
__le32 flags;
|
||||
__le32 block_size;
|
||||
__le32 block_num;
|
||||
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
|
||||
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
|
||||
union {
|
||||
__le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
|
||||
__le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
|
||||
} device_phy_addr;
|
||||
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
|
||||
|
||||
/*
|
||||
* Fw items ID's
|
||||
|
@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
|
||||
/* send paging cmd to FW in case CPU2 has paging image */
|
||||
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
|
||||
{
|
||||
int blk_idx;
|
||||
__le32 dev_phy_addr;
|
||||
struct iwl_fw_paging_cmd fw_paging_cmd = {
|
||||
struct iwl_fw_paging_cmd paging_cmd = {
|
||||
.flags =
|
||||
cpu_to_le32(PAGING_CMD_IS_SECURED |
|
||||
PAGING_CMD_IS_ENABLED |
|
||||
@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
|
||||
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
|
||||
.block_num = cpu_to_le32(mvm->num_of_paging_blk),
|
||||
};
|
||||
int blk_idx, size = sizeof(paging_cmd);
|
||||
|
||||
/* A bit hard coded - but this is the old API and will be deprecated */
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
size -= NUM_OF_FW_PAGING_BLOCKS * 4;
|
||||
|
||||
/* loop for for all paging blocks + CSS block */
|
||||
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
|
||||
dev_phy_addr =
|
||||
cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
|
||||
PAGE_2_EXP_SIZE);
|
||||
fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
|
||||
dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
|
||||
|
||||
addr = addr >> PAGE_2_EXP_SIZE;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
__le64 phy_addr = cpu_to_le64(addr);
|
||||
|
||||
paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
|
||||
} else {
|
||||
__le32 phy_addr = cpu_to_le32(addr);
|
||||
|
||||
paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
|
||||
IWL_ALWAYS_LONG_GROUP, 0),
|
||||
0, sizeof(fw_paging_cmd), &fw_paging_cmd);
|
||||
0, size, &paging_cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
|
||||
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
|
||||
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
|
||||
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
|
||||
hw->wiphy->cipher_suites = mvm->ciphers;
|
||||
@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
mvm->ciphers[hw->wiphy->n_cipher_suites] =
|
||||
WLAN_CIPHER_SUITE_AES_CMAC;
|
||||
hw->wiphy->n_cipher_suites++;
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
mvm->ciphers[hw->wiphy->n_cipher_suites] =
|
||||
WLAN_CIPHER_SUITE_BIP_GMAC_128;
|
||||
hw->wiphy->n_cipher_suites++;
|
||||
mvm->ciphers[hw->wiphy->n_cipher_suites] =
|
||||
WLAN_CIPHER_SUITE_BIP_GMAC_256;
|
||||
hw->wiphy->n_cipher_suites++;
|
||||
}
|
||||
}
|
||||
|
||||
/* currently FW API supports only one optional cipher scheme */
|
||||
@ -2747,6 +2755,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_AES_CMAC:
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
||||
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
@ -2780,9 +2790,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||
* GTK on AP interface is a TX-only key, return 0;
|
||||
* on IBSS they're per-station and because we're lazy
|
||||
* we don't support them for RX, so do the same.
|
||||
* CMAC in AP/IBSS modes must be done in software.
|
||||
* CMAC/GMAC in AP/IBSS modes must be done in software.
|
||||
*/
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = 0;
|
||||
|
@ -699,6 +699,10 @@ struct iwl_mvm_baid_data {
|
||||
* it. In this state, when a new queue is needed to be allocated but no
|
||||
* such free queue exists, an inactive queue might be freed and given to
|
||||
* the new RA/TID.
|
||||
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
|
||||
* This is the state of a queue that has had traffic pass through it, but
|
||||
* needs to be reconfigured for some reason, e.g. the queue needs to
|
||||
* become unshared and aggregations re-enabled on.
|
||||
*/
|
||||
enum iwl_mvm_queue_status {
|
||||
IWL_MVM_QUEUE_FREE,
|
||||
@ -706,10 +710,11 @@ enum iwl_mvm_queue_status {
|
||||
IWL_MVM_QUEUE_READY,
|
||||
IWL_MVM_QUEUE_SHARED,
|
||||
IWL_MVM_QUEUE_INACTIVE,
|
||||
IWL_MVM_QUEUE_RECONFIGURING,
|
||||
};
|
||||
|
||||
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
|
||||
#define IWL_MVM_NUM_CIPHERS 8
|
||||
#define IWL_MVM_NUM_CIPHERS 10
|
||||
|
||||
struct iwl_mvm {
|
||||
/* for logger access */
|
||||
@ -769,6 +774,7 @@ struct iwl_mvm {
|
||||
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
||||
bool reserved; /* Is this the TXQ reserved for a STA */
|
||||
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
|
||||
u8 txq_tid; /* The TID "owner" of this queue*/
|
||||
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
|
||||
/* Timestamp for inactivation per TID of this queue */
|
||||
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
|
||||
@ -1124,6 +1130,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
|
||||
(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
|
||||
{
|
||||
return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
|
||||
(queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
|
||||
{
|
||||
return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
|
||||
(queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
bool nvm_lar = mvm->nvm_data->lar_enabled;
|
||||
@ -1194,6 +1212,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
|
||||
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
|
||||
{
|
||||
/* TODO - replace with TLV once defined */
|
||||
return mvm->trans->cfg->use_tfh;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
|
||||
{
|
||||
#ifdef CONFIG_THERMAL
|
||||
|
@ -132,6 +132,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
IEEE80211_CCMP_PN_LEN) <= 0)
|
||||
return -1;
|
||||
|
||||
if (!(stats->flag & RX_FLAG_AMSDU_MORE))
|
||||
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
|
||||
stats->flag |= RX_FLAG_PN_VALIDATED;
|
||||
|
||||
@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
|
||||
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
||||
if (!(desc->amsdu_info &
|
||||
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
|
||||
rx_status->flag |= RX_FLAG_AMSDU_MORE;
|
||||
}
|
||||
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
|
||||
iwl_mvm_agg_rx_received(mvm, baid);
|
||||
|
@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
||||
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
|
||||
continue;
|
||||
|
||||
/* Don't try and take queues being reconfigured */
|
||||
if (mvm->queue_info[queue].status ==
|
||||
IWL_MVM_QUEUE_RECONFIGURING)
|
||||
continue;
|
||||
|
||||
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
|
||||
}
|
||||
|
||||
@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
||||
queue = ac_to_queue[IEEE80211_AC_VO];
|
||||
|
||||
/* Make sure queue found (or not) is legal */
|
||||
if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
|
||||
queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
|
||||
(queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
|
||||
queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
|
||||
(queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
|
||||
if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
|
||||
!iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
|
||||
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
|
||||
IWL_ERR(mvm, "No DATA queues available to share\n");
|
||||
queue = -ENOSPC;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* Make sure the queue isn't in the middle of being reconfigured */
|
||||
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
|
||||
IWL_ERR(mvm,
|
||||
"TXQ %d is in the middle of re-config - try again\n",
|
||||
queue);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a given queue has a higher AC than the TID stream that is being added to
|
||||
* it, the queue needs to be redirected to the lower AC. This function does that
|
||||
* If a given queue has a higher AC than the TID stream that is being compared
|
||||
* to, the queue needs to be redirected to the lower AC. This function does that
|
||||
* in such a case, otherwise - if no redirection required - it does nothing,
|
||||
* unless the %force param is true.
|
||||
*/
|
||||
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
int ac, int ssn, unsigned int wdg_timeout,
|
||||
bool force)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.enable = 0,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
bool shared_queue;
|
||||
unsigned long mq;
|
||||
@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
|
||||
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
|
||||
queue, iwl_mvm_ac_to_tx_fifo[ac]);
|
||||
|
||||
/* Stop MAC queues and wait for this queue to empty */
|
||||
@ -580,6 +592,11 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
|
||||
ssn, wdg_timeout);
|
||||
|
||||
/* Update the TID "owner" of the queue */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
|
||||
|
||||
/* Redirect to lower AC */
|
||||
@ -709,7 +726,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
if (WARN_ON(queue <= 0)) {
|
||||
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
|
||||
tid, cfg.sta_id);
|
||||
return -ENOSPC;
|
||||
return queue;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -728,7 +745,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
if (using_inactive_queue) {
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.enable = 0,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
u8 ac;
|
||||
|
||||
@ -738,11 +755,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
ac = mvm->queue_info[queue].mac80211_ac;
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Disable the queue */
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
|
||||
true);
|
||||
if (disable_agg_tids)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
|
||||
&cmd);
|
||||
@ -758,6 +777,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If TXQ is allocated to another STA, update removal in FW */
|
||||
if (cmd.sta_id != mvmsta->sta_id)
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
@ -827,6 +850,119 @@ out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_UPDATE_QUEUE_TID,
|
||||
};
|
||||
s8 sta_id;
|
||||
int tid;
|
||||
unsigned long tid_bitmap;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
|
||||
return;
|
||||
|
||||
/* Find any TID for queue */
|
||||
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
|
||||
cmd.tid = tid;
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
else
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
|
||||
queue, tid);
|
||||
}
|
||||
|
||||
static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
||||
{
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
s8 sta_id;
|
||||
int tid = -1;
|
||||
unsigned long tid_bitmap;
|
||||
unsigned int wdg_timeout;
|
||||
int ssn;
|
||||
int ret = true;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Find TID for queue, and make sure it is the only one on the queue */
|
||||
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
|
||||
if (tid_bitmap != BIT(tid)) {
|
||||
IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
|
||||
queue, tid_bitmap);
|
||||
return;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
|
||||
tid);
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
|
||||
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
||||
return;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
||||
|
||||
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
|
||||
|
||||
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
|
||||
tid_to_mac80211_ac[tid], ssn,
|
||||
wdg_timeout, true);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If aggs should be turned back on - do it */
|
||||
if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
|
||||
struct iwl_mvm_add_sta_cmd cmd;
|
||||
|
||||
mvmsta->tid_disable_agg &= ~BIT(tid);
|
||||
|
||||
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
|
||||
cmd.sta_id = mvmsta->sta_id;
|
||||
cmd.add_modify = STA_MODE_MODIFY;
|
||||
cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
|
||||
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
|
||||
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
|
||||
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
|
||||
if (!ret) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"TXQ #%d is now aggregated again\n",
|
||||
queue);
|
||||
|
||||
/* Mark queue intenally as aggregating again */
|
||||
iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
|
||||
{
|
||||
if (tid == IWL_MAX_TID_COUNT)
|
||||
@ -894,13 +1030,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
unsigned long deferred_tid_traffic;
|
||||
int sta_id, tid;
|
||||
int queue, sta_id, tid;
|
||||
|
||||
/* Check inactivity of queues */
|
||||
iwl_mvm_inactivity_check(mvm);
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* Reconfigure queues requiring reconfiguation */
|
||||
for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
|
||||
bool reconfig;
|
||||
bool change_owner;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
reconfig = (mvm->queue_info[queue].status ==
|
||||
IWL_MVM_QUEUE_RECONFIGURING);
|
||||
|
||||
/*
|
||||
* We need to take into account a situation in which a TXQ was
|
||||
* allocated to TID x, and then turned shared by adding TIDs y
|
||||
* and z. If TID x becomes inactive and is removed from the TXQ,
|
||||
* ownership must be given to one of the remaining TIDs.
|
||||
* This is mainly because if TID x continues - a new queue can't
|
||||
* be allocated for it as long as it is an owner of another TXQ.
|
||||
*/
|
||||
change_owner = !(mvm->queue_info[queue].tid_bitmap &
|
||||
BIT(mvm->queue_info[queue].txq_tid)) &&
|
||||
(mvm->queue_info[queue].status ==
|
||||
IWL_MVM_QUEUE_SHARED);
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (reconfig)
|
||||
iwl_mvm_unshare_queue(mvm, queue);
|
||||
else if (change_owner)
|
||||
iwl_mvm_change_queue_owner(mvm, queue);
|
||||
}
|
||||
|
||||
/* Go over all stations with deferred traffic */
|
||||
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
|
||||
IWL_MVM_STATION_COUNT) {
|
||||
@ -963,6 +1128,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In DQA mode, after a HW restart the queues should be allocated as before, in
|
||||
* order to avoid race conditions when there are shared queues. This function
|
||||
* does the re-mapping and queue allocation.
|
||||
*
|
||||
* Note that re-enabling aggregations isn't done in this function.
|
||||
*/
|
||||
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta)
|
||||
{
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
|
||||
int i;
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.sta_id = mvm_sta->sta_id,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
|
||||
/* Make sure reserved queue is still marked as such (or allocated) */
|
||||
mvm->queue_info[mvm_sta->reserved_queue].status =
|
||||
IWL_MVM_QUEUE_RESERVED;
|
||||
|
||||
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
||||
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
|
||||
int txq_id = tid_data->txq_id;
|
||||
int ac;
|
||||
u8 mac_queue;
|
||||
|
||||
if (txq_id == IEEE80211_INVAL_HW_QUEUE)
|
||||
continue;
|
||||
|
||||
skb_queue_head_init(&tid_data->deferred_tx_frames);
|
||||
|
||||
ac = tid_to_mac80211_ac[i];
|
||||
mac_queue = mvm_sta->vif->hw_queue[ac];
|
||||
|
||||
cfg.tid = i;
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
|
||||
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
|
||||
txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Re-mapping sta %d tid %d to queue %d\n",
|
||||
mvm_sta->sta_id, i, txq_id);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
|
||||
IEEE80211_SEQ_TO_SN(tid_data->seq_number),
|
||||
&cfg, wdg_timeout);
|
||||
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
||||
}
|
||||
|
||||
atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
|
||||
}
|
||||
|
||||
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
@ -985,6 +1205,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
||||
|
||||
spin_lock_init(&mvm_sta->lock);
|
||||
|
||||
/* In DQA mode, if this is a HW restart, re-alloc existing queues */
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
|
||||
goto update_fw;
|
||||
}
|
||||
|
||||
mvm_sta->sta_id = sta_id;
|
||||
mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color);
|
||||
@ -1048,6 +1275,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
||||
goto err;
|
||||
}
|
||||
|
||||
update_fw:
|
||||
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1956,7 +2184,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
spin_lock(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Note the possible cases:
|
||||
@ -1967,14 +2195,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
* non-DQA mode, since the TXQ hasn't yet been allocated
|
||||
*/
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
if (!iwl_mvm_is_dqa_supported(mvm) ||
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
|
||||
ret = -ENXIO;
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Can't start tid %d agg on shared queue!\n",
|
||||
tid);
|
||||
goto release_locks;
|
||||
} else if (!iwl_mvm_is_dqa_supported(mvm) ||
|
||||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
|
||||
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
||||
mvm->first_agg_queue,
|
||||
mvm->last_agg_queue);
|
||||
if (txq_id < 0) {
|
||||
ret = txq_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto release_locks;
|
||||
}
|
||||
@ -1982,7 +2216,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
|
||||
}
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"AGG for tid %d will be on queue #%d\n",
|
||||
@ -2006,8 +2241,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
release_locks:
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
out:
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
return ret;
|
||||
@ -2023,6 +2261,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
|
||||
int queue, ret;
|
||||
bool alloc_queue = true;
|
||||
enum iwl_mvm_queue_status queue_status;
|
||||
u16 ssn;
|
||||
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
@ -2048,13 +2287,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
queue_status = mvm->queue_info[queue].status;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* In DQA mode, the existing queue might need to be reconfigured */
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/* Maybe there is no need to even alloc a queue... */
|
||||
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
|
||||
alloc_queue = false;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Only reconfig the SCD for the queue if the window size has
|
||||
@ -2089,9 +2330,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
|
||||
&cfg, wdg_timeout);
|
||||
|
||||
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
|
||||
if (queue_status != IWL_MVM_QUEUE_SHARED) {
|
||||
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
|
||||
if (ret)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* No need to mark as reserved */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
@ -2123,7 +2367,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
u16 txq_id;
|
||||
int err;
|
||||
|
||||
|
||||
/*
|
||||
* If mac80211 is cleaning its state, then say that we finished since
|
||||
* our state has been cleared anyway.
|
||||
@ -2152,6 +2395,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
*/
|
||||
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
switch (tid_data->state) {
|
||||
@ -2412,9 +2656,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
|
||||
|
||||
/* verify the key details match the required command's expectations */
|
||||
if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
|
||||
(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
|
||||
(keyconf->keyidx != 4 && keyconf->keyidx != 5)))
|
||||
if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
|
||||
(keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
|
||||
(keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
|
||||
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
|
||||
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
|
||||
keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
|
||||
return -EINVAL;
|
||||
|
||||
igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
|
||||
@ -2430,11 +2680,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
|
||||
case WLAN_CIPHER_SUITE_AES_CMAC:
|
||||
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
||||
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
|
||||
memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
|
||||
igtk_cmd.ctrl_flags |=
|
||||
cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
|
||||
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
|
||||
pn = seq.aes_cmac.pn;
|
||||
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
|
||||
@ -2449,6 +2706,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
|
||||
remove_key ? "removing" : "installing",
|
||||
igtk_cmd.sta_id);
|
||||
|
||||
if (!iwl_mvm_has_new_rx_api(mvm)) {
|
||||
struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
|
||||
.ctrl_flags = igtk_cmd.ctrl_flags,
|
||||
.key_id = igtk_cmd.key_id,
|
||||
.sta_id = igtk_cmd.sta_id,
|
||||
.receive_seq_cnt = igtk_cmd.receive_seq_cnt
|
||||
};
|
||||
|
||||
memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
|
||||
ARRAY_SIZE(igtk_cmd_v1.igtk));
|
||||
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
|
||||
sizeof(igtk_cmd_v1), &igtk_cmd_v1);
|
||||
}
|
||||
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
|
||||
sizeof(igtk_cmd), &igtk_cmd);
|
||||
}
|
||||
@ -2573,7 +2843,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
||||
}
|
||||
sta_id = mvm_sta->sta_id;
|
||||
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
|
||||
ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
|
||||
goto end;
|
||||
}
|
||||
@ -2659,7 +2931,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
|
||||
keyconf->keyidx, sta_id);
|
||||
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
|
||||
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
|
||||
|
||||
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
|
||||
|
@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
|
||||
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
|
||||
|
||||
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
int ac, int ssn, unsigned int wdg_timeout,
|
||||
bool force);
|
||||
|
||||
#endif /* __sta_h__ */
|
||||
|
@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if there are any timed-out TIDs on a given shared TXQ */
|
||||
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
|
||||
{
|
||||
unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
|
||||
unsigned long now = jiffies;
|
||||
int tid;
|
||||
|
||||
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
|
||||
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the fields in the Tx cmd that are crypto related
|
||||
*/
|
||||
@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||
spin_unlock(&mvmsta->lock);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/* If we are here - TXQ exists and needs to be re-activated */
|
||||
@ -953,9 +968,26 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
txq_id);
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
/* Keep track of the time of the last frame for this RA/TID */
|
||||
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
|
||||
|
||||
/*
|
||||
* If we have timed-out TIDs - schedule the worker that will
|
||||
* reconfig the queues and update them
|
||||
*
|
||||
* Note that the mvm->queue_info_lock isn't being taken here in
|
||||
* order to not serialize the TX flow. This isn't dangerous
|
||||
* because scheduling mvm->add_stream_wk can't ruin the state,
|
||||
* and if we DON'T schedule it due to some race condition then
|
||||
* next TX we get here we will.
|
||||
*/
|
||||
if (unlikely(mvm->queue_info[txq_id].status ==
|
||||
IWL_MVM_QUEUE_SHARED &&
|
||||
iwl_mvm_txq_should_update(mvm, txq_id)))
|
||||
schedule_work(&mvm->add_stream_wk);
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
|
||||
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
|
||||
|
||||
|
@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.enable = 1,
|
||||
.action = SCD_CFG_ENABLE_QUEUE,
|
||||
.window = frame_limit,
|
||||
.sta_id = sta_id,
|
||||
.ssn = cpu_to_le16(ssn),
|
||||
@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
tid_to_mac80211_ac[cfg->tid];
|
||||
else
|
||||
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
|
||||
|
||||
mvm->queue_info[queue].txq_tid = cfg->tid;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
if (enable_queue) {
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.enable = 1,
|
||||
.action = SCD_CFG_ENABLE_QUEUE,
|
||||
.window = cfg->frame_limit,
|
||||
.sta_id = cfg->sta_id,
|
||||
.ssn = cpu_to_le16(ssn),
|
||||
@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.enable = 0,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
bool remove_mac_queue = true;
|
||||
int ret;
|
||||
@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
~BIT(mac80211_queue);
|
||||
mvm->queue_info[queue].hw_queue_refcount--;
|
||||
|
||||
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
|
||||
if (!cmd.enable)
|
||||
cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
|
||||
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
|
||||
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211);
|
||||
|
||||
/* If the queue is still enabled - nothing left to do in this func */
|
||||
if (cmd.enable) {
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
|
||||
/* Make sure queue info is correct even though we overwrite it */
|
||||
WARN(mvm->queue_info[queue].hw_queue_refcount ||
|
||||
@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
||||
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
|
||||
}
|
||||
|
||||
/* TODO: if queue was shared - need to re-enable AGGs */
|
||||
/* If the queue is marked as shared - "unshare" it */
|
||||
if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
|
||||
mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
|
||||
queue);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
|
||||
|
@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
|
||||
|
||||
/* 9000 Series */
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
|
||||
|
||||
/* a000 Series */
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
|
||||
@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
|
||||
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
|
||||
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
|
||||
struct iwl_trans *iwl_trans;
|
||||
int ret;
|
||||
|
||||
@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
if (iwl_trans->cfg->rf_id) {
|
||||
if (cfg == &iwl9260_2ac_cfg)
|
||||
cfg_9260lc = &iwl9260lc_2ac_cfg;
|
||||
if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
|
||||
cfg = cfg_9260lc;
|
||||
iwl_trans->cfg = cfg_9260lc;
|
||||
if (cfg == &iwl9460_2ac_cfg &&
|
||||
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
|
||||
cfg = &iwl9000lc_2ac_cfg;
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Notify the ucode of the loaded section number and status */
|
||||
/* Notify ucode of loaded section number and status */
|
||||
if (trans->cfg->use_tfh) {
|
||||
val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
|
||||
val = val | (sec_num << shift_param);
|
||||
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
|
||||
} else {
|
||||
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
|
||||
val = val | (sec_num << shift_param);
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
|
||||
}
|
||||
sec_num = (sec_num << 1) | 0x1;
|
||||
}
|
||||
|
||||
@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
|
||||
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
if (trans->cfg->use_tfh) {
|
||||
if (cpu == 1)
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
|
||||
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
|
||||
0xFFFF);
|
||||
else
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
|
||||
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
|
||||
0xFFFFFFFF);
|
||||
} else {
|
||||
if (cpu == 1)
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
|
||||
0xFFFF);
|
||||
else
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
|
||||
0xFFFFFFFF);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
iwl_set_bits_prph(trans,
|
||||
CSR_UCODE_LOAD_STATUS_ADDR,
|
||||
(LMPM_CPU_UCODE_LOADING_COMPLETED |
|
||||
LMPM_CPU_HDRS_LOADING_COMPLETED |
|
||||
LMPM_CPU_UCODE_LOADING_STARTED) <<
|
||||
shift_param);
|
||||
|
||||
*first_ucode_section = last_read_idx;
|
||||
|
||||
return 0;
|
||||
@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
|
||||
txq->q.read_ptr, txq->q.write_ptr);
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
/* TODO: access new SCD registers and dump them */
|
||||
return;
|
||||
|
||||
scd_sram_addr = trans_pcie->scd_base_addr +
|
||||
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
|
||||
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
|
||||
|
@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
return;
|
||||
|
||||
trans_pcie->scd_base_addr =
|
||||
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
||||
|
||||
@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
if (trans->cfg->use_tfh) {
|
||||
iwl_write_direct32(trans, TFH_TRANSFER_MODE,
|
||||
TFH_TRANSFER_MAX_PENDING_REQ |
|
||||
TFH_CHUNK_SIZE_128 |
|
||||
TFH_CHUNK_SPLIT_MODE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
|
||||
if (trans->cfg->base_params->num_of_queues > 20)
|
||||
@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
|
||||
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
|
||||
|
||||
if (cfg && trans->cfg->use_tfh)
|
||||
WARN_ONCE(1, "Expected no calls to SCD configuration");
|
||||
|
||||
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
|
||||
|
||||
if (cfg) {
|
||||
@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
return;
|
||||
}
|
||||
|
||||
if (configure_scd && trans->cfg->use_tfh)
|
||||
WARN_ONCE(1, "Expected no calls to SCD configuration");
|
||||
|
||||
if (configure_scd) {
|
||||
iwl_scd_txq_set_inactive(trans, txq_id);
|
||||
|
||||
|
@ -3041,13 +3041,9 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
|
||||
p->length > 1024 || !p->pointer)
|
||||
return -EINVAL;
|
||||
|
||||
param = kmalloc(p->length, GFP_KERNEL);
|
||||
if (param == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(param, p->pointer, p->length)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
param = memdup_user(p->pointer, p->length);
|
||||
if (IS_ERR(param)) {
|
||||
return PTR_ERR(param);
|
||||
}
|
||||
|
||||
if (p->length < sizeof(struct prism2_download_param) +
|
||||
@ -3803,13 +3799,9 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
|
||||
p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
|
||||
return -EINVAL;
|
||||
|
||||
param = kmalloc(p->length, GFP_KERNEL);
|
||||
if (param == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(param, p->pointer, p->length)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
param = memdup_user(p->pointer, p->length);
|
||||
if (IS_ERR(param)) {
|
||||
return PTR_ERR(param);
|
||||
}
|
||||
|
||||
switch (param->cmd) {
|
||||
|
@ -260,7 +260,6 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
|
||||
|
||||
rdr_event = (void *)(skb->data + sizeof(u32));
|
||||
|
||||
if (le32_to_cpu(rdr_event->passed)) {
|
||||
mwifiex_dbg(priv->adapter, MSG,
|
||||
"radar detected; indicating kernel\n");
|
||||
if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
|
||||
@ -272,10 +271,6 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
|
||||
rdr_event->reg_domain);
|
||||
mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
|
||||
rdr_event->det_type);
|
||||
} else {
|
||||
mwifiex_dbg(priv->adapter, MSG,
|
||||
"false radar detection event!\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -171,9 +171,10 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
|
||||
static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
|
||||
struct mwifiex_sta_node *node)
|
||||
{
|
||||
|
||||
if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
|
||||
!priv->ap_11n_enabled)
|
||||
if (!node || ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP) &&
|
||||
!priv->ap_11n_enabled) ||
|
||||
((priv->bss_mode == NL80211_IFTYPE_ADHOC) &&
|
||||
!priv->adapter->adhoc_11n_enabled))
|
||||
return 0;
|
||||
|
||||
return node->is_11n_enabled;
|
||||
|
@ -78,8 +78,15 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
|
||||
*/
|
||||
static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
|
||||
{
|
||||
int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
|
||||
|
||||
int ret;
|
||||
|
||||
if (!payload) {
|
||||
mwifiex_dbg(priv->adapter, INFO, "info: fw drop data\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
@ -921,3 +928,72 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
|
||||
else
|
||||
mwifiex_update_ampdu_rxwinsize(adapter, false);
|
||||
}
|
||||
|
||||
/* This function handles rxba_sync event
|
||||
*/
|
||||
void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
||||
u8 *event_buf, u16 len)
|
||||
{
|
||||
struct mwifiex_ie_types_rxba_sync *tlv_rxba = (void *)event_buf;
|
||||
u16 tlv_type, tlv_len;
|
||||
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
|
||||
u8 i, j;
|
||||
u16 seq_num, tlv_seq_num, tlv_bitmap_len;
|
||||
int tlv_buf_left = len;
|
||||
int ret;
|
||||
u8 *tmp;
|
||||
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
|
||||
event_buf, len);
|
||||
while (tlv_buf_left >= sizeof(*tlv_rxba)) {
|
||||
tlv_type = le16_to_cpu(tlv_rxba->header.type);
|
||||
tlv_len = le16_to_cpu(tlv_rxba->header.len);
|
||||
if (tlv_type != TLV_TYPE_RXBA_SYNC) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"Wrong TLV id=0x%x\n", tlv_type);
|
||||
return;
|
||||
}
|
||||
|
||||
tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
|
||||
tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
|
||||
mwifiex_dbg(priv->adapter, INFO,
|
||||
"%pM tid=%d seq_num=%d bitmap_len=%d\n",
|
||||
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
|
||||
tlv_bitmap_len);
|
||||
|
||||
rx_reor_tbl_ptr =
|
||||
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
|
||||
tlv_rxba->mac);
|
||||
if (!rx_reor_tbl_ptr) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"Can not find rx_reorder_tbl!");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < tlv_bitmap_len; i++) {
|
||||
for (j = 0 ; j < 8; j++) {
|
||||
if (tlv_rxba->bitmap[i] & (1 << j)) {
|
||||
seq_num = (MAX_TID_VALUE - 1) &
|
||||
(tlv_seq_num + i * 8 + j);
|
||||
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"drop packet,seq=%d\n",
|
||||
seq_num);
|
||||
|
||||
ret = mwifiex_11n_rx_reorder_pkt
|
||||
(priv, seq_num, tlv_rxba->tid,
|
||||
tlv_rxba->mac, 0, NULL);
|
||||
|
||||
if (ret)
|
||||
mwifiex_dbg(priv->adapter,
|
||||
ERROR,
|
||||
"Fail to drop packet");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
|
||||
tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
|
||||
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
|
||||
}
|
||||
}
|
||||
|
@ -81,5 +81,6 @@ struct mwifiex_rx_reorder_tbl *
|
||||
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
|
||||
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
|
||||
void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
|
||||
|
||||
void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
||||
u8 *event_buf, u16 len);
|
||||
#endif /* _MWIFIEX_11N_RXREORDER_H_ */
|
||||
|
@ -2012,10 +2012,6 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
|
||||
if (mwifiex_deauthenticate(priv, NULL))
|
||||
return -EFAULT;
|
||||
|
||||
mwifiex_dbg(priv->adapter, MSG,
|
||||
"info: successfully disconnected from %pM:\t"
|
||||
"reason code %d\n", priv->cfg_bssid, reason_code);
|
||||
|
||||
eth_zero_addr(priv->cfg_bssid);
|
||||
priv->hs2_enabled = false;
|
||||
|
||||
@ -2485,6 +2481,16 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
|
||||
|
||||
priv->scan_request = request;
|
||||
|
||||
if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
|
||||
ether_addr_copy(priv->random_mac, request->mac_addr);
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
priv->random_mac[i] &= request->mac_addr_mask[i];
|
||||
priv->random_mac[i] |= get_random_int() &
|
||||
~(request->mac_addr_mask[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
|
||||
user_scan_cfg->num_ssids = request->n_ssids;
|
||||
user_scan_cfg->ssid_list = request->ssids;
|
||||
|
||||
@ -2726,7 +2732,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
|
||||
ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
|
||||
|
||||
if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
|
||||
ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
|
||||
ht_info->cap |= 2 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
|
||||
else
|
||||
ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
|
||||
|
||||
@ -3913,6 +3919,88 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NL80211_TESTMODE
|
||||
|
||||
enum mwifiex_tm_attr {
|
||||
__MWIFIEX_TM_ATTR_INVALID = 0,
|
||||
MWIFIEX_TM_ATTR_CMD = 1,
|
||||
MWIFIEX_TM_ATTR_DATA = 2,
|
||||
|
||||
/* keep last */
|
||||
__MWIFIEX_TM_ATTR_AFTER_LAST,
|
||||
MWIFIEX_TM_ATTR_MAX = __MWIFIEX_TM_ATTR_AFTER_LAST - 1,
|
||||
};
|
||||
|
||||
static const struct nla_policy mwifiex_tm_policy[MWIFIEX_TM_ATTR_MAX + 1] = {
|
||||
[MWIFIEX_TM_ATTR_CMD] = { .type = NLA_U32 },
|
||||
[MWIFIEX_TM_ATTR_DATA] = { .type = NLA_BINARY,
|
||||
.len = MWIFIEX_SIZE_OF_CMD_BUFFER },
|
||||
};
|
||||
|
||||
enum mwifiex_tm_command {
|
||||
MWIFIEX_TM_CMD_HOSTCMD = 0,
|
||||
};
|
||||
|
||||
static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
void *data, int len)
|
||||
{
|
||||
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
|
||||
struct mwifiex_ds_misc_cmd *hostcmd;
|
||||
struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
|
||||
struct mwifiex_adapter *adapter;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
if (!priv)
|
||||
return -EINVAL;
|
||||
adapter = priv->adapter;
|
||||
|
||||
err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
|
||||
mwifiex_tm_policy);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!tb[MWIFIEX_TM_ATTR_CMD])
|
||||
return -EINVAL;
|
||||
|
||||
switch (nla_get_u32(tb[MWIFIEX_TM_ATTR_CMD])) {
|
||||
case MWIFIEX_TM_CMD_HOSTCMD:
|
||||
if (!tb[MWIFIEX_TM_ATTR_DATA])
|
||||
return -EINVAL;
|
||||
|
||||
hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
|
||||
if (!hostcmd)
|
||||
return -ENOMEM;
|
||||
|
||||
hostcmd->len = nla_len(tb[MWIFIEX_TM_ATTR_DATA]);
|
||||
memcpy(hostcmd->cmd, nla_data(tb[MWIFIEX_TM_ATTR_DATA]),
|
||||
hostcmd->len);
|
||||
|
||||
if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
|
||||
dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* process hostcmd response*/
|
||||
skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
|
||||
hostcmd->len, hostcmd->cmd);
|
||||
if (err) {
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
err = cfg80211_testmode_reply(skb);
|
||||
kfree(hostcmd);
|
||||
return err;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
|
||||
struct net_device *dev,
|
||||
@ -4025,6 +4113,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
|
||||
.tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
|
||||
.add_station = mwifiex_cfg80211_add_station,
|
||||
.change_station = mwifiex_cfg80211_change_station,
|
||||
CFG80211_TESTMODE_CMD(mwifiex_tm_cmd)
|
||||
.get_channel = mwifiex_cfg80211_get_channel,
|
||||
.start_radar_detection = mwifiex_cfg80211_start_radar_detection,
|
||||
.channel_switch = mwifiex_cfg80211_channel_switch,
|
||||
@ -4135,9 +4224,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
|
||||
wiphy->cipher_suites = mwifiex_cipher_suites;
|
||||
wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
|
||||
|
||||
if (adapter->region_code)
|
||||
wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
|
||||
if (adapter->regd) {
|
||||
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
|
||||
REGULATORY_DISABLE_BEACON_HINTS |
|
||||
REGULATORY_COUNTRY_IE_IGNORE;
|
||||
wiphy_apply_custom_regulatory(wiphy, adapter->regd);
|
||||
}
|
||||
|
||||
ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
|
||||
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
|
||||
@ -4173,7 +4265,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
|
||||
wiphy->features |= NL80211_FEATURE_HT_IBSS |
|
||||
NL80211_FEATURE_INACTIVITY_TIMER |
|
||||
NL80211_FEATURE_LOW_PRIORITY_SCAN |
|
||||
NL80211_FEATURE_NEED_OBSS_SCAN;
|
||||
NL80211_FEATURE_NEED_OBSS_SCAN |
|
||||
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
|
||||
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
|
||||
NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
|
||||
|
||||
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
|
||||
wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
|
||||
@ -4200,19 +4295,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!adapter->regd) {
|
||||
if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
|
||||
mwifiex_dbg(adapter, INFO,
|
||||
"driver hint alpha2: %2.2s\n", reg_alpha2);
|
||||
regulatory_hint(wiphy, reg_alpha2);
|
||||
} else {
|
||||
if (adapter->region_code == 0x00) {
|
||||
mwifiex_dbg(adapter, WARN, "Ignore world regulatory domain\n");
|
||||
mwifiex_dbg(adapter, WARN,
|
||||
"Ignore world regulatory domain\n");
|
||||
} else {
|
||||
wiphy->regulatory_flags |=
|
||||
REGULATORY_DISABLE_BEACON_HINTS |
|
||||
REGULATORY_COUNTRY_IE_IGNORE;
|
||||
country_code =
|
||||
mwifiex_11d_code_2_region(adapter->region_code);
|
||||
mwifiex_11d_code_2_region(
|
||||
adapter->region_code);
|
||||
if (country_code &&
|
||||
regulatory_hint(wiphy, country_code))
|
||||
mwifiex_dbg(priv->adapter, ERROR, "regulatory_hint() failed\n");
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"regulatory_hint() failed\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -480,13 +480,27 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
|
||||
*/
|
||||
int mwifiex_process_event(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
int ret;
|
||||
int ret, i;
|
||||
struct mwifiex_private *priv =
|
||||
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
|
||||
struct sk_buff *skb = adapter->event_skb;
|
||||
u32 eventcause = adapter->event_cause;
|
||||
u32 eventcause;
|
||||
struct mwifiex_rxinfo *rx_info;
|
||||
|
||||
if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) {
|
||||
for (i = 0; i < adapter->priv_num; i++) {
|
||||
priv = adapter->priv[i];
|
||||
if (priv && mwifiex_is_11h_active(priv)) {
|
||||
adapter->event_cause |=
|
||||
((priv->bss_num & 0xff) << 16) |
|
||||
((priv->bss_type & 0xff) << 24);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eventcause = adapter->event_cause;
|
||||
|
||||
/* Save the last event to debug log */
|
||||
adapter->dbg.last_event_index =
|
||||
(adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
|
||||
@ -581,6 +595,14 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/* We don't expect commands in manufacturing mode. They are cooked
|
||||
* in application and ready to download buffer is passed to the driver
|
||||
*/
|
||||
if (adapter->mfg_mode && cmd_no) {
|
||||
dev_dbg(adapter->dev, "Ignoring commands in manufacturing mode\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Get a new command node */
|
||||
cmd_node = mwifiex_get_cmd_node(adapter);
|
||||
|
@ -118,6 +118,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
|
||||
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
|
||||
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
|
||||
p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
|
||||
p += sprintf(p, "region_code=\"0x%x\"\n",
|
||||
priv->adapter->region_code);
|
||||
|
||||
netdev_for_each_mc_addr(ha, netdev)
|
||||
p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
|
||||
|
@ -176,6 +176,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
|
||||
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
|
||||
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
|
||||
#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148)
|
||||
#define TLV_TYPE_RXBA_SYNC (PROPRIETARY_TLV_BASE_ID + 153)
|
||||
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
|
||||
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
|
||||
#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176)
|
||||
@ -188,6 +189,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
|
||||
#define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202)
|
||||
#define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203)
|
||||
#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206)
|
||||
#define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236)
|
||||
#define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237)
|
||||
|
||||
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
|
||||
|
||||
@ -208,6 +211,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
|
||||
|
||||
#define MWIFIEX_TX_DATA_BUF_SIZE_4K 4096
|
||||
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
|
||||
#define MWIFIEX_TX_DATA_BUF_SIZE_12K 12288
|
||||
|
||||
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
|
||||
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
|
||||
@ -379,6 +383,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
|
||||
#define HostCmd_CMD_MC_POLICY 0x0121
|
||||
#define HostCmd_CMD_TDLS_OPER 0x0122
|
||||
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
|
||||
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
|
||||
|
||||
#define PROTOCOL_NO_SECURITY 0x01
|
||||
#define PROTOCOL_STATIC_WEP 0x02
|
||||
@ -411,6 +416,14 @@ enum P2P_MODES {
|
||||
P2P_MODE_CLIENT = 3,
|
||||
};
|
||||
|
||||
enum mwifiex_channel_flags {
|
||||
MWIFIEX_CHANNEL_PASSIVE = BIT(0),
|
||||
MWIFIEX_CHANNEL_DFS = BIT(1),
|
||||
MWIFIEX_CHANNEL_NOHT40 = BIT(2),
|
||||
MWIFIEX_CHANNEL_NOHT80 = BIT(3),
|
||||
MWIFIEX_CHANNEL_DISABLED = BIT(7),
|
||||
};
|
||||
|
||||
#define HostCmd_RET_BIT 0x8000
|
||||
#define HostCmd_ACT_GEN_GET 0x0000
|
||||
#define HostCmd_ACT_GEN_SET 0x0001
|
||||
@ -504,6 +517,8 @@ enum P2P_MODES {
|
||||
#define EVENT_RSSI_HIGH 0x0000001c
|
||||
#define EVENT_SNR_HIGH 0x0000001d
|
||||
#define EVENT_IBSS_COALESCED 0x0000001e
|
||||
#define EVENT_IBSS_STA_CONNECT 0x00000020
|
||||
#define EVENT_IBSS_STA_DISCONNECT 0x00000021
|
||||
#define EVENT_DATA_RSSI_LOW 0x00000024
|
||||
#define EVENT_DATA_SNR_LOW 0x00000025
|
||||
#define EVENT_DATA_RSSI_HIGH 0x00000026
|
||||
@ -531,6 +546,7 @@ enum P2P_MODES {
|
||||
#define EVENT_CHANNEL_REPORT_RDY 0x00000054
|
||||
#define EVENT_TX_DATA_PAUSE 0x00000055
|
||||
#define EVENT_EXT_SCAN_REPORT 0x00000058
|
||||
#define EVENT_RXBA_SYNC 0x00000059
|
||||
#define EVENT_BG_SCAN_STOPPED 0x00000065
|
||||
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
|
||||
#define EVENT_MULTI_CHAN_INFO 0x0000006a
|
||||
@ -734,6 +750,16 @@ struct mwifiex_ie_types_chan_list_param_set {
|
||||
struct mwifiex_chan_scan_param_set chan_scan_param[1];
|
||||
} __packed;
|
||||
|
||||
struct mwifiex_ie_types_rxba_sync {
|
||||
struct mwifiex_ie_types_header header;
|
||||
u8 mac[ETH_ALEN];
|
||||
u8 tid;
|
||||
u8 reserved;
|
||||
__le16 seq_num;
|
||||
__le16 bitmap_len;
|
||||
u8 bitmap[1];
|
||||
} __packed;
|
||||
|
||||
struct chan_band_param_set {
|
||||
u8 radio_type;
|
||||
u8 chan_number;
|
||||
@ -780,6 +806,11 @@ struct mwifiex_ie_types_scan_chan_gap {
|
||||
__le16 chan_gap;
|
||||
} __packed;
|
||||
|
||||
struct mwifiex_ie_types_random_mac {
|
||||
struct mwifiex_ie_types_header header;
|
||||
u8 mac[ETH_ALEN];
|
||||
} __packed;
|
||||
|
||||
struct mwifiex_ietypes_chanstats {
|
||||
struct mwifiex_ie_types_header header;
|
||||
struct mwifiex_fw_chan_stats chanstats[0];
|
||||
@ -1464,6 +1495,7 @@ struct mwifiex_user_scan_cfg {
|
||||
/* Variable number (fixed maximum) of channels to scan up */
|
||||
struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
|
||||
u16 scan_chan_gap;
|
||||
u8 random_mac[ETH_ALEN];
|
||||
} __packed;
|
||||
|
||||
#define MWIFIEX_BG_SCAN_CHAN_MAX 38
|
||||
@ -1646,7 +1678,7 @@ struct mwifiex_ie_types_sta_info {
|
||||
};
|
||||
|
||||
struct host_cmd_ds_sta_list {
|
||||
u16 sta_count;
|
||||
__le16 sta_count;
|
||||
u8 tlv[0];
|
||||
} __packed;
|
||||
|
||||
@ -1667,6 +1699,12 @@ struct mwifiex_ie_types_wmm_param_set {
|
||||
u8 wmm_ie[1];
|
||||
};
|
||||
|
||||
struct mwifiex_ie_types_mgmt_frame {
|
||||
struct mwifiex_ie_types_header header;
|
||||
__le16 frame_control;
|
||||
u8 frame_contents[0];
|
||||
};
|
||||
|
||||
struct mwifiex_ie_types_wmm_queue_status {
|
||||
struct mwifiex_ie_types_header header;
|
||||
u8 queue_index;
|
||||
@ -2034,26 +2072,26 @@ struct host_cmd_ds_set_bss_mode {
|
||||
|
||||
struct host_cmd_ds_pcie_details {
|
||||
/* TX buffer descriptor ring address */
|
||||
u32 txbd_addr_lo;
|
||||
u32 txbd_addr_hi;
|
||||
__le32 txbd_addr_lo;
|
||||
__le32 txbd_addr_hi;
|
||||
/* TX buffer descriptor ring count */
|
||||
u32 txbd_count;
|
||||
__le32 txbd_count;
|
||||
|
||||
/* RX buffer descriptor ring address */
|
||||
u32 rxbd_addr_lo;
|
||||
u32 rxbd_addr_hi;
|
||||
__le32 rxbd_addr_lo;
|
||||
__le32 rxbd_addr_hi;
|
||||
/* RX buffer descriptor ring count */
|
||||
u32 rxbd_count;
|
||||
__le32 rxbd_count;
|
||||
|
||||
/* Event buffer descriptor ring address */
|
||||
u32 evtbd_addr_lo;
|
||||
u32 evtbd_addr_hi;
|
||||
__le32 evtbd_addr_lo;
|
||||
__le32 evtbd_addr_hi;
|
||||
/* Event buffer descriptor ring count */
|
||||
u32 evtbd_count;
|
||||
__le32 evtbd_count;
|
||||
|
||||
/* Sleep cookie buffer physical address */
|
||||
u32 sleep_cookie_addr_lo;
|
||||
u32 sleep_cookie_addr_hi;
|
||||
__le32 sleep_cookie_addr_lo;
|
||||
__le32 sleep_cookie_addr_hi;
|
||||
} __packed;
|
||||
|
||||
struct mwifiex_ie_types_rssi_threshold {
|
||||
@ -2093,8 +2131,8 @@ struct mwifiex_ie_types_mc_group_info {
|
||||
u8 chan_buf_weight;
|
||||
u8 band_config;
|
||||
u8 chan_num;
|
||||
u32 chan_time;
|
||||
u32 reserved;
|
||||
__le32 chan_time;
|
||||
__le32 reserved;
|
||||
union {
|
||||
u8 sdio_func_num;
|
||||
u8 usb_ep_num;
|
||||
@ -2185,7 +2223,7 @@ struct host_cmd_ds_robust_coex {
|
||||
} __packed;
|
||||
|
||||
struct host_cmd_ds_wakeup_reason {
|
||||
u16 wakeup_reason;
|
||||
__le16 wakeup_reason;
|
||||
} __packed;
|
||||
|
||||
struct host_cmd_ds_gtk_rekey_params {
|
||||
@ -2196,6 +2234,10 @@ struct host_cmd_ds_gtk_rekey_params {
|
||||
__le32 replay_ctr_high;
|
||||
} __packed;
|
||||
|
||||
struct host_cmd_ds_chan_region_cfg {
|
||||
__le16 action;
|
||||
} __packed;
|
||||
|
||||
struct host_cmd_ds_command {
|
||||
__le16 command;
|
||||
__le16 size;
|
||||
@ -2270,6 +2312,7 @@ struct host_cmd_ds_command {
|
||||
struct host_cmd_ds_robust_coex coex;
|
||||
struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
|
||||
struct host_cmd_ds_gtk_rekey_params rekey;
|
||||
struct host_cmd_ds_chan_region_cfg reg_cfg;
|
||||
} params;
|
||||
} __packed;
|
||||
|
||||
|
@ -298,6 +298,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
|
||||
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
|
||||
adapter->arp_filter_size = 0;
|
||||
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
|
||||
adapter->mfg_mode = mfg_mode;
|
||||
adapter->key_api_major_ver = 0;
|
||||
adapter->key_api_minor_ver = 0;
|
||||
eth_broadcast_addr(adapter->perm_addr);
|
||||
@ -553,16 +554,23 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (adapter->mfg_mode) {
|
||||
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
|
||||
ret = -EINPROGRESS;
|
||||
} else {
|
||||
for (i = 0; i < adapter->priv_num; i++) {
|
||||
if (adapter->priv[i]) {
|
||||
ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
|
||||
true);
|
||||
ret = mwifiex_sta_init_cmd(adapter->priv[i],
|
||||
first_sta, true);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
first_sta = false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
|
||||
|
@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
|
||||
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
|
||||
sizeof(priv->assoc_rsp_buf));
|
||||
|
||||
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
|
||||
|
||||
assoc_rsp->a_id = cpu_to_le16(aid);
|
||||
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
|
||||
|
||||
if (status_code) {
|
||||
priv->adapter->dbg.num_cmd_assoc_failure++;
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "11n.h"
|
||||
|
||||
#define VERSION "1.0"
|
||||
#define MFG_FIRMWARE "mwifiex_mfg.bin"
|
||||
|
||||
static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
|
||||
module_param(debug_mask, uint, 0);
|
||||
@ -37,6 +38,10 @@ module_param(driver_mode, ushort, 0);
|
||||
MODULE_PARM_DESC(driver_mode,
|
||||
"station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7");
|
||||
|
||||
bool mfg_mode;
|
||||
module_param(mfg_mode, bool, 0);
|
||||
MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
|
||||
|
||||
/*
|
||||
* This function registers the device and performs all the necessary
|
||||
* initializations.
|
||||
@ -139,6 +144,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
|
||||
adapter->nd_info = NULL;
|
||||
}
|
||||
|
||||
kfree(adapter->regd);
|
||||
|
||||
vfree(adapter->chan_stats);
|
||||
kfree(adapter);
|
||||
return 0;
|
||||
@ -486,9 +493,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
|
||||
*/
|
||||
static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
if (adapter->workqueue) {
|
||||
flush_workqueue(adapter->workqueue);
|
||||
destroy_workqueue(adapter->workqueue);
|
||||
adapter->workqueue = NULL;
|
||||
}
|
||||
|
||||
if (adapter->rx_workqueue) {
|
||||
flush_workqueue(adapter->rx_workqueue);
|
||||
@ -559,17 +568,22 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
|
||||
goto done;
|
||||
}
|
||||
/* Wait for mwifiex_init to complete */
|
||||
if (!adapter->mfg_mode) {
|
||||
wait_event_interruptible(adapter->init_wait_q,
|
||||
adapter->init_wait_q_woken);
|
||||
if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
|
||||
goto err_init_fw;
|
||||
}
|
||||
|
||||
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
|
||||
|
||||
if (!adapter->wiphy) {
|
||||
if (mwifiex_register_cfg80211(adapter)) {
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"cannot register with cfg80211\n");
|
||||
goto err_init_fw;
|
||||
}
|
||||
}
|
||||
|
||||
if (mwifiex_init_channel_scan_gap(adapter)) {
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
@ -662,16 +676,41 @@ done:
|
||||
/*
|
||||
* This function initializes the hardware and gets firmware.
|
||||
*/
|
||||
static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
|
||||
static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
|
||||
bool req_fw_nowait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Override default firmware with manufacturing one if
|
||||
* manufacturing mode is enabled
|
||||
*/
|
||||
if (mfg_mode) {
|
||||
if (strlcpy(adapter->fw_name, MFG_FIRMWARE,
|
||||
sizeof(adapter->fw_name)) >=
|
||||
sizeof(adapter->fw_name)) {
|
||||
pr_err("%s: fw_name too long!\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (req_fw_nowait) {
|
||||
ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
|
||||
adapter->dev, GFP_KERNEL, adapter,
|
||||
mwifiex_fw_dpc);
|
||||
if (ret < 0)
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"request_firmware_nowait error %d\n", ret);
|
||||
} else {
|
||||
ret = request_firmware(&adapter->firmware,
|
||||
adapter->fw_name,
|
||||
adapter->dev);
|
||||
if (ret < 0)
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"request_firmware error %d\n", ret);
|
||||
else
|
||||
mwifiex_fw_dpc(adapter->firmware, (void *)adapter);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1320,6 +1359,199 @@ static void mwifiex_main_work_queue(struct work_struct *work)
|
||||
mwifiex_main_process(adapter);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function gets called during PCIe function level reset. Required
|
||||
* code is extracted from mwifiex_remove_card()
|
||||
*/
|
||||
static int
|
||||
mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
|
||||
{
|
||||
struct mwifiex_private *priv;
|
||||
int i;
|
||||
|
||||
if (down_interruptible(sem))
|
||||
goto exit_sem_err;
|
||||
|
||||
if (!adapter)
|
||||
goto exit_remove;
|
||||
|
||||
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
|
||||
mwifiex_deauthenticate(priv, NULL);
|
||||
|
||||
/* We can no longer handle interrupts once we start doing the teardown
|
||||
* below.
|
||||
*/
|
||||
if (adapter->if_ops.disable_int)
|
||||
adapter->if_ops.disable_int(adapter);
|
||||
|
||||
adapter->surprise_removed = true;
|
||||
mwifiex_terminate_workqueue(adapter);
|
||||
|
||||
/* Stop data */
|
||||
for (i = 0; i < adapter->priv_num; i++) {
|
||||
priv = adapter->priv[i];
|
||||
if (priv && priv->netdev) {
|
||||
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
|
||||
if (netif_carrier_ok(priv->netdev))
|
||||
netif_carrier_off(priv->netdev);
|
||||
netif_device_detach(priv->netdev);
|
||||
}
|
||||
}
|
||||
|
||||
mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
|
||||
adapter->init_wait_q_woken = false;
|
||||
|
||||
if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
|
||||
wait_event_interruptible(adapter->init_wait_q,
|
||||
adapter->init_wait_q_woken);
|
||||
if (adapter->if_ops.down_dev)
|
||||
adapter->if_ops.down_dev(adapter);
|
||||
|
||||
mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n");
|
||||
if (atomic_read(&adapter->rx_pending) ||
|
||||
atomic_read(&adapter->tx_pending) ||
|
||||
atomic_read(&adapter->cmd_pending)) {
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"rx_pending=%d, tx_pending=%d,\t"
|
||||
"cmd_pending=%d\n",
|
||||
atomic_read(&adapter->rx_pending),
|
||||
atomic_read(&adapter->tx_pending),
|
||||
atomic_read(&adapter->cmd_pending));
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->priv_num; i++) {
|
||||
priv = adapter->priv[i];
|
||||
if (!priv)
|
||||
continue;
|
||||
rtnl_lock();
|
||||
if (priv->netdev &&
|
||||
priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
|
||||
mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
exit_remove:
|
||||
up(sem);
|
||||
exit_sem_err:
|
||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function gets called during PCIe function level reset. Required
|
||||
* code is extracted from mwifiex_add_card()
|
||||
*/
|
||||
static int
|
||||
mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem,
|
||||
struct mwifiex_if_ops *if_ops, u8 iface_type)
|
||||
{
|
||||
char fw_name[32];
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
|
||||
if (down_interruptible(sem))
|
||||
goto exit_sem_err;
|
||||
|
||||
mwifiex_init_lock_list(adapter);
|
||||
if (adapter->if_ops.up_dev)
|
||||
adapter->if_ops.up_dev(adapter);
|
||||
|
||||
adapter->iface_type = iface_type;
|
||||
adapter->card_sem = sem;
|
||||
|
||||
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
|
||||
adapter->surprise_removed = false;
|
||||
init_waitqueue_head(&adapter->init_wait_q);
|
||||
adapter->is_suspended = false;
|
||||
adapter->hs_activated = false;
|
||||
init_waitqueue_head(&adapter->hs_activate_wait_q);
|
||||
init_waitqueue_head(&adapter->cmd_wait_q.wait);
|
||||
adapter->cmd_wait_q.status = 0;
|
||||
adapter->scan_wait_q_woken = false;
|
||||
|
||||
if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
|
||||
adapter->rx_work_enabled = true;
|
||||
|
||||
adapter->workqueue =
|
||||
alloc_workqueue("MWIFIEX_WORK_QUEUE",
|
||||
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
|
||||
if (!adapter->workqueue)
|
||||
goto err_kmalloc;
|
||||
|
||||
INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
|
||||
|
||||
if (adapter->rx_work_enabled) {
|
||||
adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE",
|
||||
WQ_HIGHPRI |
|
||||
WQ_MEM_RECLAIM |
|
||||
WQ_UNBOUND, 1);
|
||||
if (!adapter->rx_workqueue)
|
||||
goto err_kmalloc;
|
||||
INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
|
||||
}
|
||||
|
||||
/* Register the device. Fill up the private data structure with
|
||||
* relevant information from the card. Some code extracted from
|
||||
* mwifiex_register_dev()
|
||||
*/
|
||||
mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__);
|
||||
strcpy(fw_name, adapter->fw_name);
|
||||
strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
|
||||
|
||||
adapter->tx_buf_size = card->pcie.tx_buf_size;
|
||||
adapter->ext_scan = card->pcie.can_ext_scan;
|
||||
if (mwifiex_init_hw_fw(adapter, false)) {
|
||||
strcpy(adapter->fw_name, fw_name);
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"%s: firmware init failed\n", __func__);
|
||||
goto err_init_fw;
|
||||
}
|
||||
strcpy(adapter->fw_name, fw_name);
|
||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||
up(sem);
|
||||
return 0;
|
||||
|
||||
err_init_fw:
|
||||
mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__);
|
||||
if (adapter->if_ops.unregister_dev)
|
||||
adapter->if_ops.unregister_dev(adapter);
|
||||
if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"info: %s: shutdown mwifiex\n", __func__);
|
||||
adapter->init_wait_q_woken = false;
|
||||
|
||||
if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
|
||||
wait_event_interruptible(adapter->init_wait_q,
|
||||
adapter->init_wait_q_woken);
|
||||
}
|
||||
|
||||
err_kmalloc:
|
||||
mwifiex_terminate_workqueue(adapter);
|
||||
adapter->surprise_removed = true;
|
||||
up(sem);
|
||||
exit_sem_err:
|
||||
mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* This function processes pre and post PCIe function level resets.
|
||||
* It performs software cleanup without touching PCIe specific code.
|
||||
* Also, during initialization PCIe stuff is skipped.
|
||||
*/
|
||||
void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
|
||||
{
|
||||
struct mwifiex_if_ops if_ops;
|
||||
|
||||
if (!prepare) {
|
||||
mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops,
|
||||
adapter->iface_type);
|
||||
} else {
|
||||
memcpy(&if_ops, &adapter->if_ops,
|
||||
sizeof(struct mwifiex_if_ops));
|
||||
mwifiex_shutdown_sw(adapter, adapter->card_sem);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mwifiex_do_flr);
|
||||
|
||||
/*
|
||||
* This function adds the card.
|
||||
*
|
||||
@ -1391,7 +1623,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
|
||||
goto err_registerdev;
|
||||
}
|
||||
|
||||
if (mwifiex_init_hw_fw(adapter)) {
|
||||
if (mwifiex_init_hw_fw(adapter, true)) {
|
||||
pr_err("%s: firmware init failed\n", __func__);
|
||||
goto err_init_fw;
|
||||
}
|
||||
|
@ -58,6 +58,7 @@
|
||||
#include "sdio.h"
|
||||
|
||||
extern const char driver_version[];
|
||||
extern bool mfg_mode;
|
||||
|
||||
struct mwifiex_adapter;
|
||||
struct mwifiex_private;
|
||||
@ -675,6 +676,7 @@ struct mwifiex_private {
|
||||
struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
|
||||
u8 assoc_resp_ht_param;
|
||||
bool ht_param_present;
|
||||
u8 random_mac[ETH_ALEN];
|
||||
};
|
||||
|
||||
|
||||
@ -827,6 +829,8 @@ struct mwifiex_if_ops {
|
||||
void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
|
||||
void (*multi_port_resync)(struct mwifiex_adapter *);
|
||||
bool (*is_port_ready)(struct mwifiex_private *);
|
||||
void (*down_dev)(struct mwifiex_adapter *);
|
||||
void (*up_dev)(struct mwifiex_adapter *);
|
||||
};
|
||||
|
||||
struct mwifiex_adapter {
|
||||
@ -989,6 +993,7 @@ struct mwifiex_adapter {
|
||||
u32 drv_info_size;
|
||||
bool scan_chan_gap_enabled;
|
||||
struct sk_buff_head rx_data_q;
|
||||
bool mfg_mode;
|
||||
struct mwifiex_chan_stats *chan_stats;
|
||||
u32 num_in_chan_stats;
|
||||
int survey_idx;
|
||||
@ -1004,6 +1009,7 @@ struct mwifiex_adapter {
|
||||
bool usb_mc_status;
|
||||
bool usb_mc_setup;
|
||||
struct cfg80211_wowlan_nd_info *nd_info;
|
||||
struct ieee80211_regdomain *regd;
|
||||
};
|
||||
|
||||
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
|
||||
@ -1625,4 +1631,5 @@ void mwifiex_debugfs_remove(void);
|
||||
void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
|
||||
void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
|
||||
#endif
|
||||
void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare);
|
||||
#endif /* !_MWIFIEX_MAIN_H_ */
|
||||
|
@ -225,7 +225,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
|
||||
if (!adapter || !adapter->priv_num)
|
||||
return;
|
||||
|
||||
if (user_rmmod) {
|
||||
if (user_rmmod && !adapter->mfg_mode) {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
if (adapter->is_suspended)
|
||||
mwifiex_pcie_resume(&pdev->dev);
|
||||
@ -277,6 +277,52 @@ static const struct pci_device_id mwifiex_ids[] = {
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
|
||||
|
||||
static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
|
||||
{
|
||||
struct mwifiex_adapter *adapter;
|
||||
struct pcie_service_card *card;
|
||||
|
||||
if (!pdev) {
|
||||
pr_err("%s: PCIe device is not specified\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
card = (struct pcie_service_card *)pci_get_drvdata(pdev);
|
||||
if (!card || !card->adapter) {
|
||||
pr_err("%s: Card or adapter structure is not valid (%ld)\n",
|
||||
__func__, (long)card);
|
||||
return;
|
||||
}
|
||||
|
||||
adapter = card->adapter;
|
||||
mwifiex_dbg(adapter, INFO,
|
||||
"%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
|
||||
__func__, pdev->vendor, pdev->device,
|
||||
pdev->revision,
|
||||
prepare ? "Pre-FLR" : "Post-FLR");
|
||||
|
||||
if (prepare) {
|
||||
/* Kernel would be performing FLR after this notification.
|
||||
* Cleanup all software without cleaning anything related to
|
||||
* PCIe and HW.
|
||||
*/
|
||||
mwifiex_do_flr(adapter, prepare);
|
||||
adapter->surprise_removed = true;
|
||||
} else {
|
||||
/* Kernel stores and restores PCIe function context before and
|
||||
* after performing FLR respectively. Reconfigure the software
|
||||
* and firmware including firmware redownload
|
||||
*/
|
||||
adapter->surprise_removed = false;
|
||||
mwifiex_do_flr(adapter, prepare);
|
||||
}
|
||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers mwifiex_pcie_err_handler[] = {
|
||||
{ .reset_notify = mwifiex_pcie_reset_notify, },
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/* Power Management Hooks */
|
||||
static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
|
||||
@ -295,6 +341,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
|
||||
},
|
||||
#endif
|
||||
.shutdown = mwifiex_pcie_shutdown,
|
||||
.err_handler = mwifiex_pcie_err_handler,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1956,8 +2003,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
|
||||
if (firmware_len - offset < txlen)
|
||||
txlen = firmware_len - offset;
|
||||
|
||||
mwifiex_dbg(adapter, INFO, ".");
|
||||
|
||||
tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
|
||||
card->pcie.blksz_fw_dl;
|
||||
|
||||
@ -2043,6 +2088,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
|
||||
ret = -1;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
mwifiex_dbg(adapter, INFO, "Try %d if FW is ready <%d,%#x>",
|
||||
tries, ret, firmware_stat);
|
||||
|
||||
if (ret)
|
||||
continue;
|
||||
if (firmware_stat == FIRMWARE_READY_PCIE) {
|
||||
@ -2074,8 +2123,7 @@ mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
|
||||
adapter->winner = 1;
|
||||
} else {
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"PCI-E is not the winner <%#x,%d>, exit dnld\n",
|
||||
ret, adapter->winner);
|
||||
"PCI-E is not the winner <%#x>", winner);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2863,7 +2911,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
|
||||
static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
int revision_id = 0;
|
||||
int version;
|
||||
int version, magic;
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
|
||||
switch (card->dev->device) {
|
||||
@ -2888,30 +2936,19 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
|
||||
}
|
||||
break;
|
||||
case PCIE_DEVICE_ID_MARVELL_88W8997:
|
||||
mwifiex_read_reg(adapter, 0x0c48, &revision_id);
|
||||
mwifiex_read_reg(adapter, 0x8, &revision_id);
|
||||
mwifiex_read_reg(adapter, 0x0cd0, &version);
|
||||
mwifiex_read_reg(adapter, 0x0cd4, &magic);
|
||||
revision_id &= 0xff;
|
||||
version &= 0x7;
|
||||
switch (revision_id) {
|
||||
case PCIE8997_V2:
|
||||
if (version == CHIP_VER_PCIEUART)
|
||||
strcpy(adapter->fw_name,
|
||||
PCIEUART8997_FW_NAME_V2);
|
||||
magic &= 0xff;
|
||||
if (revision_id == PCIE8997_A1 &&
|
||||
magic == CHIP_MAGIC_VALUE &&
|
||||
version == CHIP_VER_PCIEUART)
|
||||
strcpy(adapter->fw_name, PCIEUART8997_FW_NAME_V4);
|
||||
else
|
||||
strcpy(adapter->fw_name,
|
||||
PCIEUSB8997_FW_NAME_V2);
|
||||
strcpy(adapter->fw_name, PCIEUSB8997_FW_NAME_V4);
|
||||
break;
|
||||
case PCIE8997_Z:
|
||||
if (version == CHIP_VER_PCIEUART)
|
||||
strcpy(adapter->fw_name,
|
||||
PCIEUART8997_FW_NAME_Z);
|
||||
else
|
||||
strcpy(adapter->fw_name,
|
||||
PCIEUSB8997_FW_NAME_Z);
|
||||
break;
|
||||
default:
|
||||
strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -2952,7 +2989,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
|
||||
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
const struct mwifiex_pcie_card_reg *reg;
|
||||
struct pci_dev *pdev;
|
||||
int i;
|
||||
|
||||
@ -2976,8 +3012,90 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
|
||||
if (card->msi_enable)
|
||||
pci_disable_msi(pdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reg = card->pcie.reg;
|
||||
/* This function initializes the PCI-E host memory space, WCB rings, etc.
|
||||
*
|
||||
* The following initializations steps are followed -
|
||||
* - Allocate TXBD ring buffers
|
||||
* - Allocate RXBD ring buffers
|
||||
* - Allocate event BD ring buffers
|
||||
* - Allocate command response ring buffer
|
||||
* - Allocate sleep cookie buffer
|
||||
* Part of mwifiex_pcie_init(), not reset the PCIE registers
|
||||
*/
|
||||
static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
int ret;
|
||||
struct pci_dev *pdev = card->dev;
|
||||
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
|
||||
|
||||
card->cmdrsp_buf = NULL;
|
||||
ret = mwifiex_pcie_create_txbd_ring(adapter);
|
||||
if (ret) {
|
||||
mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
|
||||
goto err_cre_txbd;
|
||||
}
|
||||
|
||||
ret = mwifiex_pcie_create_rxbd_ring(adapter);
|
||||
if (ret) {
|
||||
mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
|
||||
goto err_cre_rxbd;
|
||||
}
|
||||
|
||||
ret = mwifiex_pcie_create_evtbd_ring(adapter);
|
||||
if (ret) {
|
||||
mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
|
||||
goto err_cre_evtbd;
|
||||
}
|
||||
|
||||
ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
|
||||
if (ret) {
|
||||
mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
|
||||
goto err_alloc_cmdbuf;
|
||||
}
|
||||
|
||||
if (reg->sleep_cookie) {
|
||||
ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
|
||||
if (ret) {
|
||||
mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
|
||||
goto err_alloc_cookie;
|
||||
}
|
||||
} else {
|
||||
card->sleep_cookie_vbase = NULL;
|
||||
}
|
||||
return;
|
||||
|
||||
err_alloc_cookie:
|
||||
mwifiex_pcie_delete_cmdrsp_buf(adapter);
|
||||
err_alloc_cmdbuf:
|
||||
mwifiex_pcie_delete_evtbd_ring(adapter);
|
||||
err_cre_evtbd:
|
||||
mwifiex_pcie_delete_rxbd_ring(adapter);
|
||||
err_cre_rxbd:
|
||||
mwifiex_pcie_delete_txbd_ring(adapter);
|
||||
err_cre_txbd:
|
||||
pci_iounmap(pdev, card->pci_mmap1);
|
||||
}
|
||||
|
||||
/* This function cleans up the PCI-E host memory space.
|
||||
* Some code is extracted from mwifiex_unregister_dev()
|
||||
*
|
||||
*/
|
||||
static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
|
||||
{
|
||||
struct pcie_service_card *card = adapter->card;
|
||||
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
|
||||
|
||||
if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
|
||||
mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
|
||||
|
||||
adapter->seq_num = 0;
|
||||
adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
|
||||
|
||||
if (card) {
|
||||
if (reg->sleep_cookie)
|
||||
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
|
||||
|
||||
@ -2987,6 +3105,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
|
||||
mwifiex_pcie_delete_txbd_ring(adapter);
|
||||
card->cmdrsp_buf = NULL;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static struct mwifiex_if_ops pcie_ops = {
|
||||
@ -3013,6 +3133,8 @@ static struct mwifiex_if_ops pcie_ops = {
|
||||
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
|
||||
.reg_dump = mwifiex_pcie_reg_dump,
|
||||
.device_dump = mwifiex_pcie_device_dump,
|
||||
.down_dev = mwifiex_pcie_down_dev,
|
||||
.up_dev = mwifiex_pcie_up_dev,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -32,11 +32,9 @@
|
||||
#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
|
||||
#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
|
||||
#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
|
||||
#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieusb8997_combo_v2.bin"
|
||||
#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin"
|
||||
#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin"
|
||||
#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
|
||||
#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
|
||||
#define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin"
|
||||
#define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin"
|
||||
#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin"
|
||||
|
||||
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
|
||||
#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
|
||||
@ -46,9 +44,10 @@
|
||||
|
||||
#define PCIE8897_A0 0x1100
|
||||
#define PCIE8897_B0 0x1200
|
||||
#define PCIE8997_Z 0x0
|
||||
#define PCIE8997_V2 0x471
|
||||
#define PCIE8997_A0 0x10
|
||||
#define PCIE8997_A1 0x11
|
||||
#define CHIP_VER_PCIEUART 0x3
|
||||
#define CHIP_MAGIC_VALUE 0x24
|
||||
|
||||
/* Constants for Buffer Descriptor (BD) rings */
|
||||
#define MWIFIEX_MAX_TXRX_BD 0x20
|
||||
|
@ -820,6 +820,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
|
||||
struct mwifiex_adapter *adapter = priv->adapter;
|
||||
struct mwifiex_ie_types_num_probes *num_probes_tlv;
|
||||
struct mwifiex_ie_types_scan_chan_gap *chan_gap_tlv;
|
||||
struct mwifiex_ie_types_random_mac *random_mac_tlv;
|
||||
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
|
||||
struct mwifiex_ie_types_bssid_list *bssid_tlv;
|
||||
u8 *tlv_pos;
|
||||
@ -835,6 +836,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
|
||||
u8 ssid_filter;
|
||||
struct mwifiex_ie_types_htcap *ht_cap;
|
||||
struct mwifiex_ie_types_bss_mode *bss_mode;
|
||||
const u8 zero_mac[6] = {0, 0, 0, 0, 0, 0};
|
||||
|
||||
/* The tlv_buf_len is calculated for each scan command. The TLVs added
|
||||
in this routine will be preserved since the routine that sends the
|
||||
@ -967,6 +969,18 @@ mwifiex_config_scan(struct mwifiex_private *priv,
|
||||
tlv_pos +=
|
||||
sizeof(struct mwifiex_ie_types_scan_chan_gap);
|
||||
}
|
||||
|
||||
if (!ether_addr_equal(user_scan_in->random_mac, zero_mac)) {
|
||||
random_mac_tlv = (void *)tlv_pos;
|
||||
random_mac_tlv->header.type =
|
||||
cpu_to_le16(TLV_TYPE_RANDOM_MAC);
|
||||
random_mac_tlv->header.len =
|
||||
cpu_to_le16(sizeof(random_mac_tlv->mac));
|
||||
ether_addr_copy(random_mac_tlv->mac,
|
||||
user_scan_in->random_mac);
|
||||
tlv_pos +=
|
||||
sizeof(struct mwifiex_ie_types_random_mac);
|
||||
}
|
||||
} else {
|
||||
scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
|
||||
num_probes = adapter->scan_probes;
|
||||
@ -1922,6 +1936,7 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
|
||||
}
|
||||
|
||||
adapter->active_scan_triggered = true;
|
||||
ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
|
||||
user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
|
||||
user_scan_cfg->ssid_list = priv->scan_request->ssids;
|
||||
|
||||
@ -2179,18 +2194,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
|
||||
if (chan_band_tlv && adapter->nd_info) {
|
||||
adapter->nd_info->matches[idx] =
|
||||
kzalloc(sizeof(*pmatch) +
|
||||
sizeof(u32), GFP_ATOMIC);
|
||||
kzalloc(sizeof(*pmatch) + sizeof(u32),
|
||||
GFP_ATOMIC);
|
||||
|
||||
pmatch = adapter->nd_info->matches[idx];
|
||||
|
||||
if (pmatch) {
|
||||
memset(pmatch, 0, sizeof(*pmatch));
|
||||
if (chan_band_tlv) {
|
||||
pmatch->n_channels = 1;
|
||||
pmatch->channels[0] =
|
||||
chan_band->chan_number;
|
||||
}
|
||||
pmatch->channels[0] = chan_band->chan_number;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2761,6 +2772,7 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
|
||||
if (!scan_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
ether_addr_copy(scan_cfg->random_mac, priv->random_mac);
|
||||
scan_cfg->ssid_list = req_ssid;
|
||||
scan_cfg->num_ssids = 1;
|
||||
|
||||
|
@ -122,9 +122,11 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
|
||||
IRQF_TRIGGER_LOW,
|
||||
"wifi_wake", cfg);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
dev_dbg(dev,
|
||||
"Failed to request irq_wifi %d (%d)\n",
|
||||
cfg->irq_wifi, ret);
|
||||
card->plt_wake_cfg = NULL;
|
||||
return 0;
|
||||
}
|
||||
disable_irq(cfg->irq_wifi);
|
||||
}
|
||||
@ -289,7 +291,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
|
||||
|
||||
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
|
||||
|
||||
if (user_rmmod) {
|
||||
if (user_rmmod && !adapter->mfg_mode) {
|
||||
if (adapter->is_suspended)
|
||||
mwifiex_sdio_resume(adapter->dev);
|
||||
|
||||
|
@ -706,15 +706,10 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
|
||||
(priv->wep_key_curr_index & KEY_INDEX_MASK))
|
||||
key_info |= KEY_DEFAULT;
|
||||
} else {
|
||||
if (mac) {
|
||||
if (is_broadcast_ether_addr(mac))
|
||||
key_info |= KEY_MCAST;
|
||||
else
|
||||
key_info |= KEY_UNICAST |
|
||||
KEY_DEFAULT;
|
||||
} else {
|
||||
key_info |= KEY_MCAST;
|
||||
}
|
||||
key_info |= KEY_UNICAST | KEY_DEFAULT;
|
||||
}
|
||||
}
|
||||
km->key_param_set.key_info = cpu_to_le16(key_info);
|
||||
@ -1244,20 +1239,23 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
|
||||
return 0;
|
||||
|
||||
/* Send the ring base addresses and count to firmware */
|
||||
host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
|
||||
host_spec->txbd_addr_hi = (u32)(((u64)card->txbd_ring_pbase)>>32);
|
||||
host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
|
||||
host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
|
||||
host_spec->rxbd_addr_hi = (u32)(((u64)card->rxbd_ring_pbase)>>32);
|
||||
host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
|
||||
host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase);
|
||||
host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32);
|
||||
host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
|
||||
host_spec->txbd_addr_lo = cpu_to_le32((u32)(card->txbd_ring_pbase));
|
||||
host_spec->txbd_addr_hi =
|
||||
cpu_to_le32((u32)(((u64)card->txbd_ring_pbase) >> 32));
|
||||
host_spec->txbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
|
||||
host_spec->rxbd_addr_lo = cpu_to_le32((u32)(card->rxbd_ring_pbase));
|
||||
host_spec->rxbd_addr_hi =
|
||||
cpu_to_le32((u32)(((u64)card->rxbd_ring_pbase) >> 32));
|
||||
host_spec->rxbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
|
||||
host_spec->evtbd_addr_lo = cpu_to_le32((u32)(card->evtbd_ring_pbase));
|
||||
host_spec->evtbd_addr_hi =
|
||||
cpu_to_le32((u32)(((u64)card->evtbd_ring_pbase) >> 32));
|
||||
host_spec->evtbd_count = cpu_to_le32(MWIFIEX_MAX_EVT_BD);
|
||||
if (card->sleep_cookie_vbase) {
|
||||
host_spec->sleep_cookie_addr_lo =
|
||||
(u32)(card->sleep_cookie_pbase);
|
||||
host_spec->sleep_cookie_addr_hi =
|
||||
(u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
|
||||
cpu_to_le32((u32)(card->sleep_cookie_pbase));
|
||||
host_spec->sleep_cookie_addr_hi = cpu_to_le32((u32)(((u64)
|
||||
(card->sleep_cookie_pbase)) >> 32));
|
||||
mwifiex_dbg(priv->adapter, INFO,
|
||||
"sleep_cook_lo phy addr: 0x%x\n",
|
||||
host_spec->sleep_cookie_addr_lo);
|
||||
@ -1482,7 +1480,7 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
|
||||
continue;
|
||||
|
||||
/* property header is 6 bytes, data must fit in cmd buffer */
|
||||
if (prop && prop->value && prop->length > 6 &&
|
||||
if (prop->value && prop->length > 6 &&
|
||||
prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
|
||||
ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
|
||||
HostCmd_ACT_GEN_SET, 0,
|
||||
@ -1596,6 +1594,21 @@ static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mwifiex_cmd_chan_region_cfg(struct mwifiex_private *priv,
|
||||
struct host_cmd_ds_command *cmd,
|
||||
u16 cmd_action)
|
||||
{
|
||||
struct host_cmd_ds_chan_region_cfg *reg = &cmd->params.reg_cfg;
|
||||
|
||||
cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REGION_CFG);
|
||||
cmd->size = cpu_to_le16(sizeof(*reg) + S_DS_GEN);
|
||||
|
||||
if (cmd_action == HostCmd_ACT_GEN_GET)
|
||||
reg->action = cpu_to_le16(cmd_action);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
|
||||
struct host_cmd_ds_command *cmd,
|
||||
@ -2136,6 +2149,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
|
||||
ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
|
||||
data_buf);
|
||||
break;
|
||||
case HostCmd_CMD_CHAN_REGION_CFG:
|
||||
ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
|
||||
break;
|
||||
default:
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
|
||||
@ -2273,6 +2289,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
|
||||
if (ret)
|
||||
return -1;
|
||||
}
|
||||
|
||||
mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REGION_CFG,
|
||||
HostCmd_ACT_GEN_GET, 0, NULL, true);
|
||||
}
|
||||
|
||||
/* get tx rate */
|
||||
|
@ -962,7 +962,7 @@ static int mwifiex_ret_uap_sta_list(struct mwifiex_private *priv,
|
||||
int i;
|
||||
struct mwifiex_sta_node *sta_node;
|
||||
|
||||
for (i = 0; i < sta_list->sta_count; i++) {
|
||||
for (i = 0; i < (le16_to_cpu(sta_list->sta_count)); i++) {
|
||||
sta_node = mwifiex_get_sta_entry(priv, sta_info->mac);
|
||||
if (unlikely(!sta_node))
|
||||
continue;
|
||||
@ -1022,6 +1022,135 @@ static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ieee80211_regdomain *
|
||||
mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
|
||||
u8 *buf, u16 buf_len)
|
||||
{
|
||||
u16 num_chan = buf_len / 2;
|
||||
struct ieee80211_regdomain *regd;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
bool new_rule;
|
||||
int regd_size, idx, freq, prev_freq = 0;
|
||||
u32 bw, prev_bw = 0;
|
||||
u8 chflags, prev_chflags = 0, valid_rules = 0;
|
||||
|
||||
if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
regd_size = sizeof(struct ieee80211_regdomain) +
|
||||
num_chan * sizeof(struct ieee80211_reg_rule);
|
||||
|
||||
regd = kzalloc(regd_size, GFP_KERNEL);
|
||||
if (!regd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (idx = 0; idx < num_chan; idx++) {
|
||||
u8 chan;
|
||||
enum nl80211_band band;
|
||||
|
||||
chan = *buf++;
|
||||
if (!chan)
|
||||
return NULL;
|
||||
chflags = *buf++;
|
||||
band = (chan <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
|
||||
freq = ieee80211_channel_to_frequency(chan, band);
|
||||
new_rule = false;
|
||||
|
||||
if (chflags & MWIFIEX_CHANNEL_DISABLED)
|
||||
continue;
|
||||
|
||||
if (band == NL80211_BAND_5GHZ) {
|
||||
if (!(chflags & MWIFIEX_CHANNEL_NOHT80))
|
||||
bw = MHZ_TO_KHZ(80);
|
||||
else if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
|
||||
bw = MHZ_TO_KHZ(40);
|
||||
else
|
||||
bw = MHZ_TO_KHZ(20);
|
||||
} else {
|
||||
if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
|
||||
bw = MHZ_TO_KHZ(40);
|
||||
else
|
||||
bw = MHZ_TO_KHZ(20);
|
||||
}
|
||||
|
||||
if (idx == 0 || prev_chflags != chflags || prev_bw != bw ||
|
||||
freq - prev_freq > 20) {
|
||||
valid_rules++;
|
||||
new_rule = true;
|
||||
}
|
||||
|
||||
rule = ®d->reg_rules[valid_rules - 1];
|
||||
|
||||
rule->freq_range.end_freq_khz = MHZ_TO_KHZ(freq + 10);
|
||||
|
||||
prev_chflags = chflags;
|
||||
prev_freq = freq;
|
||||
prev_bw = bw;
|
||||
|
||||
if (!new_rule)
|
||||
continue;
|
||||
|
||||
rule->freq_range.start_freq_khz = MHZ_TO_KHZ(freq - 10);
|
||||
rule->power_rule.max_eirp = DBM_TO_MBM(19);
|
||||
|
||||
if (chflags & MWIFIEX_CHANNEL_PASSIVE)
|
||||
rule->flags = NL80211_RRF_NO_IR;
|
||||
|
||||
if (chflags & MWIFIEX_CHANNEL_DFS)
|
||||
rule->flags = NL80211_RRF_DFS;
|
||||
|
||||
rule->freq_range.max_bandwidth_khz = bw;
|
||||
}
|
||||
|
||||
regd->n_reg_rules = valid_rules;
|
||||
regd->alpha2[0] = '9';
|
||||
regd->alpha2[1] = '9';
|
||||
|
||||
return regd;
|
||||
}
|
||||
|
||||
static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
|
||||
struct host_cmd_ds_command *resp)
|
||||
{
|
||||
struct host_cmd_ds_chan_region_cfg *reg = &resp->params.reg_cfg;
|
||||
u16 action = le16_to_cpu(reg->action);
|
||||
u16 tlv, tlv_buf_len, tlv_buf_left;
|
||||
struct mwifiex_ie_types_header *head;
|
||||
u8 *tlv_buf;
|
||||
|
||||
if (action != HostCmd_ACT_GEN_GET)
|
||||
return 0;
|
||||
|
||||
tlv_buf = (u8 *)reg + sizeof(*reg);
|
||||
tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*reg);
|
||||
|
||||
while (tlv_buf_left >= sizeof(*head)) {
|
||||
head = (struct mwifiex_ie_types_header *)tlv_buf;
|
||||
tlv = le16_to_cpu(head->type);
|
||||
tlv_buf_len = le16_to_cpu(head->len);
|
||||
|
||||
if (tlv_buf_left < (sizeof(*head) + tlv_buf_len))
|
||||
break;
|
||||
|
||||
switch (tlv) {
|
||||
case TLV_TYPE_CHAN_ATTR_CFG:
|
||||
mwifiex_dbg_dump(priv->adapter, CMD_D, "CHAN:",
|
||||
(u8 *)head + sizeof(*head),
|
||||
tlv_buf_len);
|
||||
priv->adapter->regd =
|
||||
mwifiex_create_custom_regdomain(priv,
|
||||
(u8 *)head +
|
||||
sizeof(*head), tlv_buf_len);
|
||||
break;
|
||||
}
|
||||
|
||||
tlv_buf += (sizeof(*head) + tlv_buf_len);
|
||||
tlv_buf_left -= (sizeof(*head) + tlv_buf_len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function handles the command responses.
|
||||
*
|
||||
@ -1239,6 +1368,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
|
||||
break;
|
||||
case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
|
||||
break;
|
||||
case HostCmd_CMD_CHAN_REGION_CFG:
|
||||
ret = mwifiex_ret_chan_region_cfg(priv, resp);
|
||||
break;
|
||||
default:
|
||||
mwifiex_dbg(adapter, ERROR,
|
||||
"CMD_RESP: unknown cmd response %#x\n",
|
||||
|
@ -25,6 +25,99 @@
|
||||
#include "wmm.h"
|
||||
#include "11n.h"
|
||||
|
||||
#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE 12
|
||||
|
||||
static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
|
||||
struct mwifiex_sta_node *sta_ptr,
|
||||
struct sk_buff *event)
|
||||
{
|
||||
int evt_len, ele_len;
|
||||
u8 *curr;
|
||||
struct ieee_types_header *ele_hdr;
|
||||
struct mwifiex_ie_types_mgmt_frame *tlv_mgmt_frame;
|
||||
const struct ieee80211_ht_cap *ht_cap;
|
||||
const struct ieee80211_vht_cap *vht_cap;
|
||||
|
||||
skb_pull(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
|
||||
evt_len = event->len;
|
||||
curr = event->data;
|
||||
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
|
||||
event->data, event->len);
|
||||
|
||||
skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
|
||||
|
||||
tlv_mgmt_frame = (void *)curr;
|
||||
if (evt_len >= sizeof(*tlv_mgmt_frame) &&
|
||||
le16_to_cpu(tlv_mgmt_frame->header.type) ==
|
||||
TLV_TYPE_UAP_MGMT_FRAME) {
|
||||
/* Locate curr pointer to the start of beacon tlv,
|
||||
* timestamp 8 bytes, beacon intervel 2 bytes,
|
||||
* capability info 2 bytes, totally 12 byte beacon header
|
||||
*/
|
||||
evt_len = le16_to_cpu(tlv_mgmt_frame->header.len);
|
||||
curr += (sizeof(*tlv_mgmt_frame) + 12);
|
||||
} else {
|
||||
mwifiex_dbg(priv->adapter, MSG,
|
||||
"management frame tlv not found!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (evt_len >= sizeof(*ele_hdr)) {
|
||||
ele_hdr = (struct ieee_types_header *)curr;
|
||||
ele_len = ele_hdr->len;
|
||||
|
||||
if (evt_len < ele_len + sizeof(*ele_hdr))
|
||||
break;
|
||||
|
||||
switch (ele_hdr->element_id) {
|
||||
case WLAN_EID_HT_CAPABILITY:
|
||||
sta_ptr->is_11n_enabled = true;
|
||||
ht_cap = (void *)(ele_hdr + 2);
|
||||
sta_ptr->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
|
||||
IEEE80211_HT_CAP_MAX_AMSDU ?
|
||||
MWIFIEX_TX_DATA_BUF_SIZE_8K :
|
||||
MWIFIEX_TX_DATA_BUF_SIZE_4K;
|
||||
mwifiex_dbg(priv->adapter, INFO,
|
||||
"11n enabled!, max_amsdu : %d\n",
|
||||
sta_ptr->max_amsdu);
|
||||
break;
|
||||
|
||||
case WLAN_EID_VHT_CAPABILITY:
|
||||
sta_ptr->is_11ac_enabled = true;
|
||||
vht_cap = (void *)(ele_hdr + 2);
|
||||
/* check VHT MAXMPDU capability */
|
||||
switch (le32_to_cpu(vht_cap->vht_cap_info) & 0x3) {
|
||||
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
|
||||
sta_ptr->max_amsdu =
|
||||
MWIFIEX_TX_DATA_BUF_SIZE_12K;
|
||||
break;
|
||||
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
|
||||
sta_ptr->max_amsdu =
|
||||
MWIFIEX_TX_DATA_BUF_SIZE_8K;
|
||||
break;
|
||||
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
|
||||
sta_ptr->max_amsdu =
|
||||
MWIFIEX_TX_DATA_BUF_SIZE_4K;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
mwifiex_dbg(priv->adapter, INFO,
|
||||
"11ac enabled!, max_amsdu : %d\n",
|
||||
sta_ptr->max_amsdu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
curr += (ele_len + sizeof(*ele_hdr));
|
||||
evt_len -= (ele_len + sizeof(*ele_hdr));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function resets the connection state.
|
||||
*
|
||||
@ -519,6 +612,8 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
|
||||
* - EVENT_LINK_QUALITY
|
||||
* - EVENT_PRE_BEACON_LOST
|
||||
* - EVENT_IBSS_COALESCED
|
||||
* - EVENT_IBSS_STA_CONNECT
|
||||
* - EVENT_IBSS_STA_DISCONNECT
|
||||
* - EVENT_WEP_ICV_ERR
|
||||
* - EVENT_BW_CHANGE
|
||||
* - EVENT_HOSTWAKE_STAIE
|
||||
@ -547,9 +642,11 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
|
||||
int mwifiex_process_sta_event(struct mwifiex_private *priv)
|
||||
{
|
||||
struct mwifiex_adapter *adapter = priv->adapter;
|
||||
int ret = 0;
|
||||
int ret = 0, i;
|
||||
u32 eventcause = adapter->event_cause;
|
||||
u16 ctrl, reason_code;
|
||||
u8 ibss_sta_addr[ETH_ALEN];
|
||||
struct mwifiex_sta_node *sta_ptr;
|
||||
|
||||
switch (eventcause) {
|
||||
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
|
||||
@ -708,7 +805,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
|
||||
|
||||
case EVENT_EXT_SCAN_REPORT:
|
||||
mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
|
||||
if (adapter->ext_scan && !priv->scan_aborting)
|
||||
/* We intend to skip this event during suspend, but handle
|
||||
* it in interface disabled case
|
||||
*/
|
||||
if (adapter->ext_scan && (!priv->scan_aborting ||
|
||||
!netif_running(priv->netdev)))
|
||||
ret = mwifiex_handle_event_ext_scan_report(priv,
|
||||
adapter->event_skb->data);
|
||||
|
||||
@ -771,6 +872,39 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
|
||||
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
|
||||
HostCmd_ACT_GEN_GET, 0, NULL, false);
|
||||
break;
|
||||
case EVENT_IBSS_STA_CONNECT:
|
||||
ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
|
||||
mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_CONNECT %pM\n",
|
||||
ibss_sta_addr);
|
||||
sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
|
||||
if (sta_ptr && adapter->adhoc_11n_enabled) {
|
||||
mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
|
||||
adapter->event_skb);
|
||||
if (sta_ptr->is_11n_enabled)
|
||||
for (i = 0; i < MAX_NUM_TID; i++)
|
||||
sta_ptr->ampdu_sta[i] =
|
||||
priv->aggr_prio_tbl[i].ampdu_user;
|
||||
else
|
||||
for (i = 0; i < MAX_NUM_TID; i++)
|
||||
sta_ptr->ampdu_sta[i] =
|
||||
BA_STREAM_NOT_ALLOWED;
|
||||
memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
|
||||
}
|
||||
|
||||
break;
|
||||
case EVENT_IBSS_STA_DISCONNECT:
|
||||
ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
|
||||
mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_DISCONNECT %pM\n",
|
||||
ibss_sta_addr);
|
||||
sta_ptr = mwifiex_get_sta_entry(priv, ibss_sta_addr);
|
||||
if (sta_ptr && sta_ptr->is_11n_enabled) {
|
||||
mwifiex_11n_del_rx_reorder_tbl_by_ta(priv,
|
||||
ibss_sta_addr);
|
||||
mwifiex_del_tx_ba_stream_tbl_by_ra(priv, ibss_sta_addr);
|
||||
}
|
||||
mwifiex_wmm_del_peer_ra_list(priv, ibss_sta_addr);
|
||||
mwifiex_del_sta_entry(priv, ibss_sta_addr);
|
||||
break;
|
||||
case EVENT_ADDBA:
|
||||
mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
|
||||
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
|
||||
@ -869,6 +1003,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
|
||||
mwifiex_bt_coex_wlan_param_update_event(priv,
|
||||
adapter->event_skb);
|
||||
break;
|
||||
case EVENT_RXBA_SYNC:
|
||||
dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
|
||||
mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
|
||||
adapter->event_skb->len -
|
||||
sizeof(eventcause));
|
||||
break;
|
||||
default:
|
||||
mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
|
||||
eventcause);
|
||||
|
@ -574,7 +574,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
|
||||
|
||||
adapter->hs_activate_wait_q_woken = false;
|
||||
|
||||
memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
|
||||
memset(&hscfg, 0, sizeof(hscfg));
|
||||
hscfg.is_invoke_hostcmd = true;
|
||||
|
||||
adapter->hs_enabling = true;
|
||||
@ -1138,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
|
||||
{
|
||||
struct mwifiex_ds_encrypt_key encrypt_key;
|
||||
|
||||
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
|
||||
memset(&encrypt_key, 0, sizeof(encrypt_key));
|
||||
encrypt_key.key_len = key_len;
|
||||
encrypt_key.key_index = key_index;
|
||||
|
||||
@ -1180,7 +1180,7 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
|
||||
{
|
||||
struct mwifiex_ver_ext ver_ext;
|
||||
|
||||
memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
|
||||
memset(&ver_ext, 0, sizeof(ver_ext));
|
||||
ver_ext.version_str_sel = version_str_sel;
|
||||
if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
|
||||
HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
|
||||
|
@ -306,7 +306,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
|
||||
mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
|
||||
mwifiex_process_multi_chan_event(priv, adapter->event_skb);
|
||||
break;
|
||||
|
||||
case EVENT_RXBA_SYNC:
|
||||
dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
|
||||
mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
|
||||
adapter->event_skb->len -
|
||||
sizeof(eventcause));
|
||||
break;
|
||||
default:
|
||||
mwifiex_dbg(adapter, EVENT,
|
||||
"event: unknown event id: %#x\n", eventcause);
|
||||
|
@ -611,7 +611,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
|
||||
if (!adapter->priv_num)
|
||||
return;
|
||||
|
||||
if (user_rmmod) {
|
||||
if (user_rmmod && !adapter->mfg_mode) {
|
||||
#ifdef CONFIG_PM
|
||||
if (adapter->is_suspended)
|
||||
mwifiex_usb_resume(intf);
|
||||
@ -1026,6 +1026,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
|
||||
dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
|
||||
tlen += sizeof(struct fw_header);
|
||||
|
||||
/* Command 7 doesn't have data length field */
|
||||
if (dnld_cmd == FW_CMD_7)
|
||||
dlen = 0;
|
||||
|
||||
memcpy(fwdata->data, &firmware[tlen], dlen);
|
||||
|
||||
fwdata->seq_num = cpu_to_le32(fw_seqnum);
|
||||
|
@ -46,11 +46,12 @@
|
||||
#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
|
||||
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
|
||||
#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
|
||||
#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin"
|
||||
#define USB8997_DEFAULT_FW_NAME "mrvl/usbusb8997_combo_v4.bin"
|
||||
|
||||
#define FW_DNLD_TX_BUF_SIZE 620
|
||||
#define FW_DNLD_RX_BUF_SIZE 2048
|
||||
#define FW_HAS_LAST_BLOCK 0x00000004
|
||||
#define FW_CMD_7 0x00000007
|
||||
|
||||
#define FW_DATA_XMIT_SIZE \
|
||||
(sizeof(struct fw_header) + dlen + sizeof(u32))
|
||||
|
@ -386,6 +386,7 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
|
||||
"unknown public action frame category %d\n",
|
||||
category);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
mwifiex_dbg(priv->adapter, INFO,
|
||||
"unknown mgmt frame subtype %#x\n", stype);
|
||||
|
@ -103,7 +103,7 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
|
||||
|
||||
if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
|
||||
dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
|
||||
if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
|
||||
if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
|
||||
dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
|
||||
|
||||
trace_mt_rx(dev, rxwi, fce_info);
|
||||
|
@ -18,8 +18,6 @@
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#include "util.h"
|
||||
|
||||
#define MT_DMA_HDR_LEN 4
|
||||
#define MT_RX_INFO_LEN 4
|
||||
#define MT_FCE_INFO_LEN 4
|
||||
@ -79,9 +77,9 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
|
||||
*/
|
||||
|
||||
info = flags |
|
||||
MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
|
||||
MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
|
||||
MT76_SET(MT_TXD_INFO_TYPE, type);
|
||||
FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
|
||||
FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
|
||||
FIELD_PREP(MT_TXD_INFO_TYPE, type);
|
||||
|
||||
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
|
||||
return skb_put_padto(skb, round_up(skb->len, 4) + 4);
|
||||
@ -90,7 +88,7 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
|
||||
static inline int
|
||||
mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
|
||||
{
|
||||
flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
|
||||
flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
|
||||
return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user