Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next

John W. Linville says:

====================
I would guess that this is the last big wireless pull request before
the 3.11 merge window...

Regarding the mac80211 bits, Johannes says:

"I have a number of mesh fixes and improvements from Colleen, Jacob,
Ashok and Thomas, powersave fixes in mac80211 from Alex, improved
management-TX from Antonio, and a few various things, including locking
fixes, from others and myself. Overall though, nothing really stands
out."

As for the iwlwifi bits, Johannes says:

"Emmanuel contributed two AP mode fixes, removed an unused field, fixed a
comment and added a warning for something that shouldn't happen in
practice, and I removed the declaration of a function that doesn't even
exist and cleaned up a small include."

"This time I have a number of cleanups, a small fix from Emmanuel and two
performance improvements that combined reduce our driver's CPU
utilisation as much as 75% in high TX-throughput scenarios."

"These two patches fix two issues with using rfkill randomly during
traffic, which would then cause our driver to stop working and not be
able to recover at all."

Regarding the ath6kl bits, Kalle says:

"Here are few simple patches for ath6kl. We have a suspend crash fix for
USB from Shafi, use of mac_pton(), a compiler warning fix and a fix for
module initialisation error path."

Kalle also sends the biggest single item of note, the new ath10k
driver for Qualcomm Atheros 802.11ac CQA98xx devices.

Included is an NFC pull, of which Samuel says:

"These are the pending NFC patches for the 3.11 merge window.

It contains the pending fixes that were on nfc-fixes (nfc-fixes-3.10-2),
along with a few more for the pn544 and pn533 drivers, the LLCP
disconnection path and an LLCP memory leak.

Highlights for this one are:

- An initial secure element API. NFC chipsets can carry an embedded
  secure element or get access to the SIM one. In both cases they
  control the secure elements and this API provides a way to discover,
  enable and disable the available SEs. It also exports that to
  userspace in order for SE focused middleware to actually do something
  with them (e.g. payments).

- NCI over SPI support. SPI is the most complex NCI specified transport
  layer and we now have support for it in the kernel. The next step will
  be to implement drivers for NCI chipsets using this transport like
  e.g. bcm2079x.

- NFC p2p hardware simulation driver. We now have an nfcsim driver that
  is mostly a loopback device between 2 NFC interfaces. It also
  implements the rest of the NFC core API like polling and target
  detection. This driver, with neard running on top of it, allows us to
  completely test the LLCP, SNEP and Handover implementation without
  physical hardware.

- A Firmware update netlink API. Most (All ?) HCI chipsets have a
  special firmware update mode where applications can push a new
  firmware that will be flashed. We now have a netlink API for providing
  that mode to e.g. nfctool."

On top of all that, there are a variety of updates to brcmfmac,
iwlegacy, rtlwifi, wil6210, and the TI wl12xx drivers.  As usual,
the bcma and ssb busses get a little love as well, as do a handful
of others here and there.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-06-24 00:31:02 -07:00
commit 7e2f934dc5
210 changed files with 27591 additions and 2856 deletions

View File

@ -127,12 +127,11 @@
!Finclude/net/cfg80211.h cfg80211_ibss_params
!Finclude/net/cfg80211.h cfg80211_connect_params
!Finclude/net/cfg80211.h cfg80211_pmksa
!Finclude/net/cfg80211.h cfg80211_send_rx_auth
!Finclude/net/cfg80211.h cfg80211_send_auth_timeout
!Finclude/net/cfg80211.h cfg80211_send_rx_assoc
!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout
!Finclude/net/cfg80211.h cfg80211_send_deauth
!Finclude/net/cfg80211.h cfg80211_send_disassoc
!Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt
!Finclude/net/cfg80211.h cfg80211_auth_timeout
!Finclude/net/cfg80211.h cfg80211_rx_assoc_resp
!Finclude/net/cfg80211.h cfg80211_assoc_timeout
!Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt
!Finclude/net/cfg80211.h cfg80211_ibss_joined
!Finclude/net/cfg80211.h cfg80211_connect_result
!Finclude/net/cfg80211.h cfg80211_roamed

View File

@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE
config BCMA_HOST_PCI
bool "Support for BCMA on PCI-host bus"
depends on BCMA_HOST_PCI_POSSIBLE
default y
config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode"

View File

@ -9,6 +9,25 @@
#include <linux/export.h>
#include <linux/bcma/bcma.h>
static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
unsigned long deadline = jiffies + timeout;
u32 val;
do {
val = bcma_aread32(core, reg);
if ((val & mask) == value)
return true;
cpu_relax();
udelay(10);
} while (!time_after_eq(jiffies, deadline));
bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
return false;
}
bool bcma_core_is_enabled(struct bcma_device *core)
{
if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
@ -25,13 +44,15 @@ void bcma_core_disable(struct bcma_device *core, u32 flags)
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return;
bcma_awrite32(core, BCMA_IOCTL, flags);
bcma_aread32(core, BCMA_IOCTL);
udelay(10);
bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300);
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
bcma_awrite32(core, BCMA_IOCTL, flags);
bcma_aread32(core, BCMA_IOCTL);
udelay(10);
}
EXPORT_SYMBOL_GPL(bcma_core_disable);
@ -43,6 +64,7 @@ int bcma_core_enable(struct bcma_device *core, u32 flags)
bcma_aread32(core, BCMA_IOCTL);
bcma_awrite32(core, BCMA_RESET_CTL, 0);
bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));

View File

@ -30,7 +30,7 @@ struct bcma_sflash_tbl_e {
u16 numblocks;
};
static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P20", 0x11, 0x10000, 4, },
{ "M25P40", 0x12, 0x10000, 8, },
@ -41,7 +41,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
{ "SST25WF512", 1, 0x1000, 16, },
{ "SST25VF512", 0x48, 0x1000, 16, },
{ "SST25WF010", 2, 0x1000, 32, },
@ -59,7 +59,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
{ "AT45DB011", 0xc, 256, 512, },
{ "AT45DB021", 0x14, 256, 1024, },
{ "AT45DB041", 0x1c, 256, 2048, },
@ -89,7 +89,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
struct bcma_sflash *sflash = &cc->sflash;
struct bcma_sflash_tbl_e *e;
const struct bcma_sflash_tbl_e *e;
u32 id, id2;
switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {

View File

@ -554,6 +554,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
ret = -ENOMEM;
goto exit;
}

View File

@ -57,6 +57,9 @@ static struct usb_device_id btusb_table[] = {
/* Apple-specific (Broadcom) devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
/* MediaTek MT76x0E */
{ USB_DEVICE(0x0e8d, 0x763f) },
/* Broadcom SoftSailing reporting vendor specific */
{ USB_DEVICE(0x0a5c, 0x21e1) },

View File

@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
endif

View File

@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_ATH_COMMON) += ath.o

View File

@ -239,13 +239,12 @@ enum ATH_DEBUG {
ATH_DBG_CONFIG = 0x00000200,
ATH_DBG_FATAL = 0x00000400,
ATH_DBG_PS = 0x00000800,
ATH_DBG_HWTIMER = 0x00001000,
ATH_DBG_BTCOEX = 0x00002000,
ATH_DBG_WMI = 0x00004000,
ATH_DBG_BSTUCK = 0x00008000,
ATH_DBG_MCI = 0x00010000,
ATH_DBG_DFS = 0x00020000,
ATH_DBG_WOW = 0x00040000,
ATH_DBG_BTCOEX = 0x00001000,
ATH_DBG_WMI = 0x00002000,
ATH_DBG_BSTUCK = 0x00004000,
ATH_DBG_MCI = 0x00008000,
ATH_DBG_DFS = 0x00010000,
ATH_DBG_WOW = 0x00020000,
ATH_DBG_ANY = 0xffffffff
};

View File

@ -0,0 +1,39 @@
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211
select ATH_COMMON
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.
If you choose to build a module, it'll be called ath10k.
config ATH10K_PCI
tristate "Atheros ath10k PCI support"
depends on ATH10K && PCI
---help---
This module adds support for PCIE bus
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
---help---
Enables debug support
If unsure, say Y to make it easier to debug problems.
config ATH10K_DEBUGFS
bool "Atheros ath10k debugfs support"
depends on ATH10K
---help---
Enabled debugfs support
If unsure, say Y to make it easier to debug problems.
config ATH10K_TRACING
bool "Atheros ath10k tracing support"
depends on ATH10K
depends on EVENT_TRACING
---help---
Select this to ath10k use tracing infrastructure.

View File

@ -0,0 +1,20 @@
obj-$(CONFIG_ATH10K) += ath10k_core.o
ath10k_core-y += mac.o \
debug.o \
core.o \
htc.o \
htt.o \
htt_rx.o \
htt_tx.o \
txrx.o \
wmi.o \
bmi.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
ce.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -0,0 +1,295 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "bmi.h"
#include "hif.h"
#include "debug.h"
#include "htc.h"
int ath10k_bmi_done(struct ath10k *ar)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
if (ar->bmi.done_sent) {
ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
return 0;
}
ar->bmi.done_sent = true;
cmd.id = __cpu_to_le32(BMI_DONE);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn("unable to write to the device: %d\n", ret);
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
u32 resplen = sizeof(resp.get_target_info);
int ret;
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn("unable to get target info from device\n");
return ret;
}
if (resplen < sizeof(resp.get_target_info)) {
ath10k_warn("invalid get_target_info response length (%d)\n",
resplen);
return -EIO;
}
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
return 0;
}
int ath10k_bmi_read_memory(struct ath10k *ar,
u32 address, void *buffer, u32 length)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
u32 rxlen;
int ret;
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
cmd.read_mem.addr = __cpu_to_le32(address);
cmd.read_mem.len = __cpu_to_le32(rxlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
ath10k_warn("unable to read from the device\n");
return ret;
}
memcpy(buffer, resp.read_mem.payload, rxlen);
address += rxlen;
buffer += rxlen;
length -= rxlen;
}
return 0;
}
int ath10k_bmi_write_memory(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
u32 txlen;
int ret;
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
/* copy before roundup to avoid reading beyond buffer*/
memcpy(cmd.write_mem.payload, buffer, txlen);
txlen = roundup(txlen, 4);
cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
cmd.write_mem.addr = __cpu_to_le32(address);
cmd.write_mem.len = __cpu_to_le32(txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn("unable to write to the device\n");
return ret;
}
/* fixup roundup() so `length` zeroes out for last chunk */
txlen = min(txlen, length);
address += txlen;
buffer += txlen;
length -= txlen;
}
return 0;
}
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
u32 resplen = sizeof(resp.execute);
int ret;
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, param: %d)\n",
__func__, ar, address, *param);
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn("unable to read from the device\n");
return ret;
}
if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n",
resplen);
return ret;
}
*param = __le32_to_cpu(resp.execute.result);
return 0;
}
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
u32 txlen;
int ret;
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
WARN_ON_ONCE(txlen & 3);
cmd.id = __cpu_to_le32(BMI_LZ_DATA);
cmd.lz_data.len = __cpu_to_le32(txlen);
memcpy(cmd.lz_data.payload, buffer, txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn("unable to write to the device\n");
return ret;
}
buffer += txlen;
length -= txlen;
}
return 0;
}
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
cmd.lz_start.addr = __cpu_to_le32(address);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn("unable to Start LZ Stream to the device\n");
return ret;
}
return 0;
}
int ath10k_bmi_fast_download(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
u8 trailer[4] = {};
u32 head_len = rounddown(length, 4);
u32 trailer_len = length - head_len;
int ret;
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
/* copy the last word into a zero padded buffer */
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
ret = ath10k_bmi_lz_data(ar, buffer, head_len);
if (ret)
return ret;
if (trailer_len > 0)
ret = ath10k_bmi_lz_data(ar, trailer, 4);
if (ret != 0)
return ret;
/*
* Close compressed stream and open a new (fake) one.
* This serves mainly to flush Target caches.
*/
ret = ath10k_bmi_lz_stream_start(ar, 0x00);
return ret;
}

View File

@ -0,0 +1,224 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BMI_H_
#define _BMI_H_
#include "core.h"
/*
* Bootloader Messaging Interface (BMI)
*
* BMI is a very simple messaging interface used during initialization
* to read memory, write memory, execute code, and to define an
* application entry PC.
*
* It is used to download an application to QCA988x, to provide
* patches to code that is already resident on QCA988x, and generally
* to examine and modify state. The Host has an opportunity to use
* BMI only once during bootup. Once the Host issues a BMI_DONE
* command, this opportunity ends.
*
* The Host writes BMI requests to mailbox0, and reads BMI responses
* from mailbox0. BMI requests all begin with a command
* (see below for specific commands), and are followed by
* command-specific data.
*
* Flow control:
* The Host can only issue a command once the Target gives it a
* "BMI Command Credit", using AR8K Counter #4. As soon as the
* Target has completed a command, it issues another BMI Command
* Credit (so the Host can issue the next command).
*
* BMI handles all required Target-side cache flushing.
*/
/* Maximum data size used for BMI transfers */
#define BMI_MAX_DATA_SIZE 256
/* len = cmd + addr + length */
#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
sizeof(u32) + \
sizeof(u32) + \
sizeof(u32))
/* BMI Commands */
enum bmi_cmd_id {
BMI_NO_COMMAND = 0,
BMI_DONE = 1,
BMI_READ_MEMORY = 2,
BMI_WRITE_MEMORY = 3,
BMI_EXECUTE = 4,
BMI_SET_APP_START = 5,
BMI_READ_SOC_REGISTER = 6,
BMI_READ_SOC_WORD = 6,
BMI_WRITE_SOC_REGISTER = 7,
BMI_WRITE_SOC_WORD = 7,
BMI_GET_TARGET_ID = 8,
BMI_GET_TARGET_INFO = 8,
BMI_ROMPATCH_INSTALL = 9,
BMI_ROMPATCH_UNINSTALL = 10,
BMI_ROMPATCH_ACTIVATE = 11,
BMI_ROMPATCH_DEACTIVATE = 12,
BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
BMI_LZ_DATA = 14,
BMI_NVRAM_PROCESS = 15,
};
#define BMI_NVRAM_SEG_NAME_SZ 16
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
union {
struct {
} done;
struct {
__le32 addr;
__le32 len;
} read_mem;
struct {
__le32 addr;
__le32 len;
u8 payload[0];
} write_mem;
struct {
__le32 addr;
__le32 param;
} execute;
struct {
__le32 addr;
} set_app_start;
struct {
__le32 addr;
} read_soc_reg;
struct {
__le32 addr;
__le32 value;
} write_soc_reg;
struct {
} get_target_info;
struct {
__le32 rom_addr;
__le32 ram_addr; /* or value */
__le32 size;
__le32 activate; /* 0=install, but dont activate */
} rompatch_install;
struct {
__le32 patch_id;
} rompatch_uninstall;
struct {
__le32 count;
__le32 patch_ids[0]; /* length of @count */
} rompatch_activate;
struct {
__le32 count;
__le32 patch_ids[0]; /* length of @count */
} rompatch_deactivate;
struct {
__le32 addr;
} lz_start;
struct {
__le32 len; /* max BMI_MAX_DATA_SIZE */
u8 payload[0]; /* length of @len */
} lz_data;
struct {
u8 name[BMI_NVRAM_SEG_NAME_SZ];
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
};
} __packed;
union bmi_resp {
struct {
u8 payload[0];
} read_mem;
struct {
__le32 result;
} execute;
struct {
__le32 value;
} read_soc_reg;
struct {
__le32 len;
__le32 version;
__le32 type;
} get_target_info;
struct {
__le32 patch_id;
} rompatch_install;
struct {
__le32 patch_id;
} rompatch_uninstall;
struct {
/* 0 = nothing executed
* otherwise = NVRAM segment return value */
__le32 result;
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
} __packed;
struct bmi_target_info {
u32 version;
u32 type;
};
/* in msec */
#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
int ath10k_bmi_done(struct ath10k *ar);
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info);
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
void *buffer, u32 length);
int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
#define ath10k_bmi_read32(ar, item, val) \
({ \
int ret; \
u32 addr; \
__le32 tmp; \
\
addr = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
*val = __le32_to_cpu(tmp); \
ret; \
})
#define ath10k_bmi_write32(ar, item, val) \
({ \
int ret; \
u32 address; \
__le32 v = __cpu_to_le32(val); \
\
address = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_write_memory(ar, address, \
(u8 *)&v, sizeof(v)); \
ret; \
})
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
#endif /* _BMI_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,516 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CE_H_
#define _CE_H_
#include "hif.h"
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
* Copy Engine support: low-level Target-side Copy Engine API.
* This is a hardware access layer used by code that understands
* how to use copy engines.
*/
struct ce_state;
/* Copy Engine operational state */
enum ce_op_state {
CE_UNUSED,
CE_PAUSED,
CE_RUNNING,
};
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
#define CE_DESC_FLAGS_META_DATA_LSB 3
struct ce_desc {
__le32 addr;
__le16 nbytes;
__le16 flags; /* %CE_DESC_FLAGS_ */
};
/* Copy Engine Ring internal state */
struct ce_ring_state {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
/*
* For dest ring, this is the next index to be processed
* by software after it was/is received into.
*
* For src ring, this is the last descriptor that was sent
* and completion processed by software.
*
* Regardless of src or dest ring, this is an invariant
* (modulo ring size):
* write index >= read index >= sw_index
*/
unsigned int sw_index;
/* cached copy */
unsigned int write_index;
/*
* For src ring, this is the next index not yet processed by HW.
* This is a cached copy of the real HW index (read index), used
* for avoiding reading the HW index register more often than
* necessary.
* This extends the invariant:
* write index >= read index >= hw_index >= sw_index
*
* For dest ring, this is currently unused.
*/
/* cached copy */
unsigned int hw_index;
/* Start of DMA-coherent area reserved for descriptors */
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
u32 base_addr_ce_space_unaligned;
/*
* Actual start of descriptors.
* Aligned to descriptor-size boundary.
* Points into reserved DMA-coherent area, above.
*/
/* Host address space */
void *base_addr_owner_space;
/* CE address space */
u32 base_addr_ce_space;
/*
* Start of shadow copy of descriptors, within regular memory.
* Aligned to descriptor-size boundary.
*/
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
void **per_transfer_context;
};
/* Copy Engine internal state */
struct ce_state {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
enum ce_op_state state;
void (*send_cb) (struct ce_state *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id);
void (*recv_cb) (struct ce_state *ce_state,
void *per_transfer_recv_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
unsigned int src_sz_max;
struct ce_ring_state *src_ring;
struct ce_ring_state *dest_ring;
};
struct ce_sendlist_item {
/* e.g. buffer or desc list */
dma_addr_t data;
union {
/* simple buffer */
unsigned int nbytes;
/* Rx descriptor list */
unsigned int ndesc;
} u;
/* externally-specified flags; OR-ed with internal flags */
u32 flags;
};
struct ce_sendlist {
unsigned int num_items;
struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
};
/* Copy Engine settable attributes */
struct ce_attr;
/*==================Send====================*/
/* ath10k_ce_send flags */
#define CE_SEND_FLAG_BYTE_SWAP 1
/*
* Queue a source buffer to be sent to an anonymous destination buffer.
* ce - which copy engine to use
* buffer - address of buffer
* nbytes - number of bytes to send
* transfer_id - arbitrary ID; reflected to destination
* flags - CE_SEND_FLAG_* values
* Returns 0 on success; otherwise an error status.
*
* Note: If no flags are specified, use CE's default data swap mode.
*
* Implementation note: pushes 1 buffer to Source ring
*/
int ath10k_ce_send(struct ce_state *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
/* 14 bits */
unsigned int transfer_id,
unsigned int flags);
void ath10k_ce_send_cb_register(struct ce_state *ce_state,
void (*send_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id),
int disable_interrupts);
/* Append a simple buffer (address/length) to a sendlist. */
void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
u32 buffer,
unsigned int nbytes,
/* OR-ed with internal flags */
u32 flags);
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
* ce - which copy engine to use
* sendlist - list of simple buffers to send using gather
* transfer_id - arbitrary ID; reflected to destination
* Returns 0 on success; otherwise an error status.
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ath10k_ce_sendlist_send(struct ce_state *ce_state,
void *per_transfer_send_context,
struct ce_sendlist *sendlist,
/* 14 bits */
unsigned int transfer_id);
/*==================Recv=======================*/
/*
* Make a buffer available to receive. The buffer must be at least of a
* minimal size appropriate for this copy engine (src_sz_max attribute).
* ce - which copy engine to use
* per_transfer_recv_context - context passed back to caller's recv_cb
* buffer - address of buffer in CE space
* Returns 0 on success; otherwise an error status.
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
void *per_transfer_recv_context,
u32 buffer);
void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
void (*recv_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags));
/* recv flags */
/* Data is byte-swapped */
#define CE_RECV_FLAG_SWAPPED 1
/*
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
int ath10k_ce_completed_send_next(struct ce_state *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
struct ce_state *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
/*==================CE Engine Shutdown=======================*/
/*
* Support clean shutdown by allowing the caller to revoke
* receive buffers. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
void ath10k_ce_deinit(struct ce_state *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
void ath10k_ce_disable_interrupts(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
#define CE_ATTR_NO_SNOOP 1
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
/* Swizzle descriptors? */
#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
/* no interrupt on copy completion */
#define CE_ATTR_DIS_INTR 8
/* Attributes of an instance of a Copy Engine */
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
/* currently not in use */
unsigned int priority;
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
/*
* Max source send size for this CE.
* This is also the minimum size of a destination buffer.
*/
unsigned int src_sz_max;
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
/* Future use */
void *reserved;
};
/*
* When using sendlist_send to transfer multiple buffer fragments, the
* transfer context of each fragment, except last one, will be filled
* with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
* each fragment done with send and the transfer context would be
* CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
* status of a send completion.
*/
#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
#define DR_SIZE_ADDRESS 0x000c
#define CE_CMD_ADDRESS 0x0018
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_DMAX_LENGTH_MSB 15
#define CE_CTRL1_DMAX_LENGTH_LSB 0
#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define CE_CTRL1_DMAX_LENGTH_GET(x) \
(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
#define CE_CTRL1_DMAX_LENGTH_SET(x) \
(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
#define CE_CTRL1_ADDRESS 0x0010
#define CE_CTRL1_HW_MASK 0x0007ffff
#define CE_CTRL1_SW_MASK 0x0007ffff
#define CE_CTRL1_HW_WRITE_MASK 0x00000000
#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
#define CE_CTRL1_RSTMASK 0xffffffff
#define CE_CTRL1_RESET 0x00000080
#define CE_CMD_HALT_STATUS_MSB 3
#define CE_CMD_HALT_STATUS_LSB 3
#define CE_CMD_HALT_STATUS_MASK 0x00000008
#define CE_CMD_HALT_STATUS_GET(x) \
(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
#define CE_CMD_HALT_STATUS_SET(x) \
(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
#define CE_CMD_HALT_STATUS_RESET 0
#define CE_CMD_HALT_MSB 0
#define CE_CMD_HALT_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_MSB 0
#define HOST_IE_COPY_COMPLETE_LSB 0
#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_GET(x) \
(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
#define HOST_IE_COPY_COMPLETE_SET(x) \
(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
#define HOST_IE_COPY_COMPLETE_RESET 0
#define HOST_IE_ADDRESS 0x002c
#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define HOST_IS_ADDRESS 0x0030
#define MISC_IE_ADDRESS 0x0034
#define MISC_IS_AXI_ERR_MASK 0x00000400
#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define MISC_IS_ADDRESS 0x0038
#define SR_WR_INDEX_ADDRESS 0x003c
#define DST_WR_INDEX_ADDRESS 0x0040
#define CURRENT_SRRI_ADDRESS 0x0044
#define CURRENT_DRRI_ADDRESS 0x0048
#define SRC_WATERMARK_LOW_MSB 31
#define SRC_WATERMARK_LOW_LSB 16
#define SRC_WATERMARK_LOW_MASK 0xffff0000
#define SRC_WATERMARK_LOW_GET(x) \
(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
#define SRC_WATERMARK_LOW_SET(x) \
(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
#define SRC_WATERMARK_LOW_RESET 0
#define SRC_WATERMARK_HIGH_MSB 15
#define SRC_WATERMARK_HIGH_LSB 0
#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define SRC_WATERMARK_HIGH_GET(x) \
(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
#define SRC_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
#define SRC_WATERMARK_HIGH_RESET 0
#define SRC_WATERMARK_ADDRESS 0x004c
#define DST_WATERMARK_LOW_LSB 16
#define DST_WATERMARK_LOW_MASK 0xffff0000
#define DST_WATERMARK_LOW_SET(x) \
(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
#define DST_WATERMARK_LOW_RESET 0
#define DST_WATERMARK_HIGH_MSB 15
#define DST_WATERMARK_HIGH_LSB 0
#define DST_WATERMARK_HIGH_MASK 0x0000ffff
#define DST_WATERMARK_HIGH_GET(x) \
(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
#define DST_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
#define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050
static inline u32 ath10k_ce_base_address(unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
MISC_IS_DST_ADDR_ERR_MASK | \
MISC_IS_SRC_LEN_ERR_MASK | \
MISC_IS_DST_MAX_LEN_VIO_MASK | \
MISC_IS_DST_RING_OVERFLOW_MASK | \
MISC_IS_SRC_RING_OVERFLOW_MASK)
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define CE_INTERRUPT_SUMMARY(ar) \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
#endif /* _CE_H_ */

View File

@ -0,0 +1,665 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include "core.h"
#include "mac.h"
#include "htc.h"
#include "hif.h"
#include "wmi.h"
#include "bmi.h"
#include "debug.h"
#include "htt.h"
unsigned int ath10k_debug_mask;
static bool uart_print;
static unsigned int ath10k_p2p;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param(uart_print, bool, 0644);
module_param_named(p2p, ath10k_p2p, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
.id = QCA988X_HW_1_0_VERSION,
.name = "qca988x hw1.0",
.patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
.fw = {
.dir = QCA988X_HW_1_0_FW_DIR,
.fw = QCA988X_HW_1_0_FW_FILE,
.otp = QCA988X_HW_1_0_OTP_FILE,
.board = QCA988X_HW_1_0_BOARD_DATA_FILE,
},
},
{
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
.fw = QCA988X_HW_2_0_FW_FILE,
.otp = QCA988X_HW_2_0_OTP_FILE,
.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
},
},
};
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
ar->is_target_paused = true;
wake_up(&ar->event_queue);
}
static int ath10k_check_fw_version(struct ath10k *ar)
{
char version[32];
if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
ar->fw_version_build >= SUPPORTED_FW_BUILD)
return 0;
snprintf(version, sizeof(version), "%u.%u.%u.%u",
SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
ar->hw->wiphy->fw_version);
ath10k_warn("Please upgrade to version %s (or newer)\n", version);
return 0;
}
static int ath10k_init_connect_htc(struct ath10k *ar)
{
int status;
status = ath10k_wmi_connect_htc_service(ar);
if (status)
goto conn_fail;
/* Start HTC */
status = ath10k_htc_start(ar->htc);
if (status)
goto conn_fail;
/* Wait for WMI event to be ready */
status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) {
ath10k_warn("wmi service ready event not received");
status = -ETIMEDOUT;
goto timeout;
}
ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
return 0;
timeout:
ath10k_htc_stop(ar->htc);
conn_fail:
return status;
}
static int ath10k_init_configure_target(struct ath10k *ar)
{
u32 param_host;
int ret;
/* tell target which HTC version it is used*/
ret = ath10k_bmi_write32(ar, hi_app_host_interest,
HTC_PROTOCOL_VERSION);
if (ret) {
ath10k_err("settings HTC version failed\n");
return ret;
}
/* set the firmware mode to STA/IBSS/AP */
ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
if (ret) {
ath10k_err("setting firmware mode (1/2) failed\n");
return ret;
}
/* TODO following parameters need to be re-visited. */
/* num_device */
param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
/* Firmware mode */
/* FIXME: Why FW_MODE_AP ??.*/
param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
/* mac_addr_method */
param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
/* firmware_bridge */
param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
/* fwsubmode */
param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
if (ret) {
ath10k_err("setting firmware mode (2/2) failed\n");
return ret;
}
/* We do all byte-swapping on the host */
ret = ath10k_bmi_write32(ar, hi_be, 0);
if (ret) {
ath10k_err("setting host CPU BE mode failed\n");
return ret;
}
/* FW descriptor/Data swap flags */
ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
if (ret) {
ath10k_err("setting FW data/desc swap flags failed\n");
return ret;
}
return 0;
}
static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
const char *dir,
const char *file)
{
char filename[100];
const struct firmware *fw;
int ret;
if (file == NULL)
return ERR_PTR(-ENOENT);
if (dir == NULL)
dir = ".";
snprintf(filename, sizeof(filename), "%s/%s", dir, file);
ret = request_firmware(&fw, filename, ar->dev);
if (ret)
return ERR_PTR(ret);
return fw;
}
static int ath10k_push_board_ext_data(struct ath10k *ar,
const struct firmware *fw)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
u32 board_ext_data_addr;
int ret;
ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
if (ret) {
ath10k_err("could not read board ext data addr (%d)\n", ret);
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE,
"ath10k: Board extended Data download addr: 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
return 0;
if (fw->size != (board_data_size + board_ext_data_size)) {
ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
fw->size, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
fw->data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err("could not write board ext data (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
(board_ext_data_size << 16) | 1);
if (ret) {
ath10k_err("could not write board ext data bit (%d)\n", ret);
return ret;
}
return 0;
}
static int ath10k_download_board_data(struct ath10k *ar)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
const struct firmware *fw;
int ret;
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(fw)) {
ath10k_err("could not fetch board data fw file (%ld)\n",
PTR_ERR(fw));
return PTR_ERR(fw);
}
ret = ath10k_push_board_ext_data(ar, fw);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_read32(ar, hi_board_data, &address);
if (ret) {
ath10k_err("could not read board data addr (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_write_memory(ar, address, fw->data,
min_t(u32, board_data_size, fw->size));
if (ret) {
ath10k_err("could not write board data (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
if (ret) {
ath10k_err("could not write board data bit (%d)\n", ret);
goto exit;
}
exit:
release_firmware(fw);
return ret;
}
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
const struct firmware *fw;
u32 address;
u32 exec_param;
int ret;
/* OTP is optional */
if (ar->hw_params.fw.otp == NULL) {
ath10k_info("otp file not defined\n");
return 0;
}
address = ar->hw_params.patch_load_addr;
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
ar->hw_params.fw.otp);
if (IS_ERR(fw)) {
ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
return 0;
}
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
goto exit;
}
exec_param = 0;
ret = ath10k_bmi_execute(ar, address, &exec_param);
if (ret) {
ath10k_err("could not execute otp (%d)\n", ret);
goto exit;
}
exit:
release_firmware(fw);
return ret;
}
static int ath10k_download_fw(struct ath10k *ar)
{
const struct firmware *fw;
u32 address;
int ret;
if (ar->hw_params.fw.fw == NULL)
return -EINVAL;
address = ar->hw_params.patch_load_addr;
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
ar->hw_params.fw.fw);
if (IS_ERR(fw)) {
ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
return PTR_ERR(fw);
}
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
goto exit;
}
exit:
release_firmware(fw);
return ret;
}
static int ath10k_init_download_firmware(struct ath10k *ar)
{
int ret;
ret = ath10k_download_board_data(ar);
if (ret)
return ret;
ret = ath10k_download_and_run_otp(ar);
if (ret)
return ret;
ret = ath10k_download_fw(ar);
if (ret)
return ret;
return ret;
}
static int ath10k_init_uart(struct ath10k *ar)
{
int ret;
/*
* Explicitly setting UART prints to zero as target turns it on
* based on scratch registers.
*/
ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
if (ret) {
ath10k_warn("could not disable UART prints (%d)\n", ret);
return ret;
}
if (!uart_print) {
ath10k_info("UART prints disabled\n");
return 0;
}
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
if (ret) {
ath10k_warn("could not enable UART prints (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
if (ret) {
ath10k_warn("could not enable UART prints (%d)\n", ret);
return ret;
}
ath10k_info("UART prints enabled\n");
return 0;
}
static int ath10k_init_hw_params(struct ath10k *ar)
{
const struct ath10k_hw_params *uninitialized_var(hw_params);
int i;
for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
hw_params = &ath10k_hw_params_list[i];
if (hw_params->id == ar->target_version)
break;
}
if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
ath10k_err("Unsupported hardware version: 0x%x\n",
ar->target_version);
return -EINVAL;
}
ar->hw_params = *hw_params;
ath10k_info("Hardware name %s version 0x%x\n",
ar->hw_params.name, ar->target_version);
return 0;
}
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
ar = ath10k_mac_create();
if (!ar)
return NULL;
ar->ath_common.priv = ar;
ar->ath_common.hw = ar->hw;
ar->p2p = !!ath10k_p2p;
ar->dev = dev;
ar->hif.priv = hif_priv;
ar->hif.ops = hif_ops;
ar->hif.bus = bus;
ar->free_vdev_map = 0xFF; /* 8 vdevs */
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
ar->workqueue = create_singlethread_workqueue("ath10k_wq");
if (!ar->workqueue)
goto err_wq;
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
init_waitqueue_head(&ar->event_queue);
return ar;
err_wq:
ath10k_mac_destroy(ar);
return NULL;
}
EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
ath10k_mac_destroy(ar);
}
EXPORT_SYMBOL(ath10k_core_destroy);
int ath10k_core_register(struct ath10k *ar)
{
struct ath10k_htc_ops htc_ops;
struct bmi_target_info target_info;
int status;
memset(&target_info, 0, sizeof(target_info));
status = ath10k_bmi_get_target_info(ar, &target_info);
if (status)
goto err;
ar->target_version = target_info.version;
ar->hw->wiphy->hw_version = target_info.version;
status = ath10k_init_hw_params(ar);
if (status)
goto err;
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
goto err;
}
status = ath10k_init_download_firmware(ar);
if (status)
goto err;
status = ath10k_init_uart(ar);
if (status)
goto err;
htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
ar->htc = ath10k_htc_create(ar, &htc_ops);
if (IS_ERR(ar->htc)) {
status = PTR_ERR(ar->htc);
ath10k_err("could not create HTC (%d)\n", status);
goto err;
}
status = ath10k_bmi_done(ar);
if (status)
goto err_htc_destroy;
status = ath10k_wmi_attach(ar);
if (status) {
ath10k_err("WMI attach failed: %d\n", status);
goto err_htc_destroy;
}
status = ath10k_htc_wait_target(ar->htc);
if (status)
goto err_wmi_detach;
ar->htt = ath10k_htt_attach(ar);
if (!ar->htt) {
status = -ENOMEM;
goto err_wmi_detach;
}
status = ath10k_init_connect_htc(ar);
if (status)
goto err_htt_detach;
ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
status = ath10k_check_fw_version(ar);
if (status)
goto err_disconnect_htc;
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
goto err_disconnect_htc;
}
status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) {
ath10k_err("wmi unified ready event not received\n");
status = -ETIMEDOUT;
goto err_disconnect_htc;
}
status = ath10k_htt_attach_target(ar->htt);
if (status)
goto err_disconnect_htc;
status = ath10k_mac_register(ar);
if (status)
goto err_disconnect_htc;
status = ath10k_debug_create(ar);
if (status) {
ath10k_err("unable to initialize debugfs\n");
goto err_unregister_mac;
}
return 0;
err_unregister_mac:
ath10k_mac_unregister(ar);
err_disconnect_htc:
ath10k_htc_stop(ar->htc);
err_htt_detach:
ath10k_htt_detach(ar->htt);
err_wmi_detach:
ath10k_wmi_detach(ar);
err_htc_destroy:
ath10k_htc_destroy(ar->htc);
err:
return status;
}
EXPORT_SYMBOL(ath10k_core_register);
void ath10k_core_unregister(struct ath10k *ar)
{
/* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
ath10k_htc_stop(ar->htc);
ath10k_htt_detach(ar->htt);
ath10k_wmi_detach(ar);
ath10k_htc_destroy(ar->htc);
}
EXPORT_SYMBOL(ath10k_core_unregister);
int ath10k_core_target_suspend(struct ath10k *ar)
{
int ret;
ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
ret = ath10k_wmi_pdev_suspend_target(ar);
if (ret)
ath10k_warn("could not suspend target (%d)\n", ret);
return ret;
}
EXPORT_SYMBOL(ath10k_core_target_suspend);
int ath10k_core_target_resume(struct ath10k *ar)
{
int ret;
ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
ath10k_warn("could not resume target (%d)\n", ret);
return ret;
}
EXPORT_SYMBOL(ath10k_core_target_resume);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -0,0 +1,369 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CORE_H_
#define _CORE_H_
#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "htc.h"
#include "hw.h"
#include "targaddrs.h"
#include "wmi.h"
#include "../ath.h"
#include "../regd.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
#define WO(_f) ((_f##_OFFSET) >> 2)
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
struct ath10k;
enum ath10k_bus {
ATH10K_BUS_PCI,
};
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
bool is_aborted;
struct {
u8 vdev_id;
u16 msdu_id;
u8 tid;
bool is_offchan;
bool is_conf;
bool discard;
bool no_ack;
u8 refcount;
struct sk_buff *txfrag;
struct sk_buff *msdu;
} __packed htt;
/* 4 bytes left on 64bit arch */
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
{
if (ATH10K_SKB_CB(skb)->is_mapped)
return -EINVAL;
ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
return -EIO;
ATH10K_SKB_CB(skb)->is_mapped = true;
return 0;
}
static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
{
if (!ATH10K_SKB_CB(skb)->is_mapped)
return -EINVAL;
dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
DMA_TO_DEVICE);
ATH10K_SKB_CB(skb)->is_mapped = false;
return 0;
}
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
}
struct ath10k_bmi {
bool done_sent;
};
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
atomic_t pending_tx_count;
wait_queue_head_t wq;
struct sk_buff_head wmi_event_list;
struct work_struct wmi_event_work;
};
struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
};
struct ath10k_target_stats {
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
u32 rx_frame_count;
u32 rx_clear_count;
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
/* PDEV TX stats */
s32 comp_queued;
s32 comp_delivered;
s32 msdu_enqued;
s32 mpdu_enqued;
s32 wmm_drop;
s32 local_enqued;
s32 local_freed;
s32 hw_queued;
s32 hw_reaped;
s32 underrun;
s32 tx_abort;
s32 mpdus_requed;
u32 tx_ko;
u32 data_rc;
u32 self_triggers;
u32 sw_retry_failure;
u32 illgl_rate_phy_err;
u32 pdev_cont_xretry;
u32 pdev_tx_timeout;
u32 pdev_resets;
u32 phy_underrun;
u32 txop_ovf;
/* PDEV RX stats */
s32 mid_ppdu_route_change;
s32 status_rcvd;
s32 r0_frags;
s32 r1_frags;
s32 r2_frags;
s32 r3_frags;
s32 htt_msdus;
s32 htt_mpdus;
s32 loc_msdus;
s32 loc_mpdus;
s32 oversize_amsdu;
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
/* VDEV STATS */
/* PEER STATS */
u8 peers;
struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
/* TODO: Beacon filter stats */
};
#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
struct ath10k_peer {
struct list_head list;
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
struct ath10k *ar;
struct ieee80211_vif *vif;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_index;
u16 tx_seq_no;
union {
struct {
u8 bssid[ETH_ALEN];
u32 uapsd;
} sta;
struct {
/* 127 stations; wmi limit */
u8 tim_bitmap[16];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
bool hidden_ssid;
/* P2P_IE with NoA attribute for P2P_GO case */
u32 noa_len;
u8 *noa_data;
} ap;
struct {
u8 bssid[ETH_ALEN];
} ibss;
} u;
};
struct ath10k_vif_iter {
u32 vdev_id;
struct ath10k_vif *arvif;
};
struct ath10k_debug {
struct dentry *debugfs_phy;
struct ath10k_target_stats target_stats;
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
struct targetdef *targetdef;
struct hostdef *hostdef;
bool p2p;
struct {
void *priv;
enum ath10k_bus bus;
const struct ath10k_hif_ops *ops;
} hif;
struct ath10k_wmi wmi;
wait_queue_head_t event_queue;
bool is_target_paused;
struct ath10k_bmi bmi;
struct ath10k_htc *htc;
struct ath10k_htt *htt;
struct ath10k_hw_params {
u32 id;
const char *name;
u32 patch_load_addr;
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
const char *otp;
const char *board;
} fw;
} hw_params;
struct {
struct completion started;
struct completion completed;
struct completion on_channel;
struct timer_list timeout;
bool is_roc;
bool in_progress;
bool aborting;
int vdev_id;
int roc_freq;
} scan;
struct {
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
} mac;
/* should never be NULL; needed for regular htt rx */
struct ieee80211_channel *rx_channel;
/* valid during scan; needed for mgmt rx during scan */
struct ieee80211_channel *scan_channel;
int free_vdev_map;
int monitor_vdev_id;
bool monitor_enabled;
bool monitor_present;
unsigned int filter_flags;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
/* protects shared structure data */
spinlock_t data_lock;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
};
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar);
void ath10k_core_unregister(struct ath10k *ar);
int ath10k_core_target_suspend(struct ath10k *ar);
int ath10k_core_target_resume(struct ath10k *ar);
#endif /* _CORE_H_ */

View File

@ -0,0 +1,503 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/debugfs.h>
#include "core.h"
#include "debug.h"
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int rtn;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
rtn = printk("%sath10k: %pV", level, &vaf);
va_end(args);
return rtn;
}
int ath10k_info(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
trace_ath10k_log_info(&vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath10k_info);
int ath10k_err(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
trace_ath10k_log_err(&vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath10k_err);
int ath10k_warn(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret = 0;
va_start(args, fmt);
vaf.va = &args;
if (net_ratelimit())
ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
trace_ath10k_log_warn(&vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath10k_warn);
#ifdef CONFIG_ATH10K_DEBUGFS
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
{
memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
}
static ssize_t ath10k_read_wmi_services(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char *buf;
unsigned int len = 0, buf_len = 1500;
const char *status;
ssize_t ret_cnt;
int i;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
if (len > buf_len)
len = buf_len;
for (i = 0; i < WMI_SERVICE_LAST; i++) {
if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
status = "enabled";
else
status = "disabled";
len += scnprintf(buf + len, buf_len - len,
"0x%02x - %20s - %s\n",
i, wmi_service_name(i), status);
}
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
mutex_unlock(&ar->conf_mutex);
kfree(buf);
return ret_cnt;
}
static const struct file_operations fops_wmi_services = {
.read = ath10k_read_wmi_services,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
{
u8 *tmp = ev->data;
struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats;
struct wmi_pdev_stats *ps;
int i;
mutex_lock(&ar->conf_mutex);
stats = &ar->debug.target_stats;
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) {
ps = (struct wmi_pdev_stats *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
stats->cycle_count = __le32_to_cpu(ps->cycle_count);
stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
stats->comp_delivered =
__le32_to_cpu(ps->wal.tx.comp_delivered);
stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
stats->sw_retry_failure =
__le32_to_cpu(ps->wal.tx.sw_retry_failure);
stats->illgl_rate_phy_err =
__le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
stats->pdev_cont_xretry =
__le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
stats->pdev_tx_timeout =
__le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
stats->mid_ppdu_route_change =
__le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
stats->oversize_amsdu =
__le32_to_cpu(ps->wal.rx.oversize_amsdu);
stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
tmp += sizeof(struct wmi_pdev_stats);
}
/* 0 or max vdevs */
/* Currently firmware does not support VDEV stats */
if (num_vdev_stats) {
struct wmi_vdev_stats *vdev_stats;
for (i = 0; i < num_vdev_stats; i++) {
vdev_stats = (struct wmi_vdev_stats *)tmp;
tmp += sizeof(struct wmi_vdev_stats);
}
}
if (num_peer_stats) {
struct wmi_peer_stats *peer_stats;
struct ath10k_peer_stat *s;
stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) {
peer_stats = (struct wmi_peer_stats *)tmp;
s = &stats->peer_stat[i];
WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
s->peer_macaddr);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate);
tmp += sizeof(struct wmi_peer_stats);
}
}
mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
char *buf;
unsigned int len = 0, buf_len = 2500;
ssize_t ret_cnt;
long left;
int i;
int ret;
fw_stats = &ar->debug.target_stats;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
ath10k_warn("could not request stats (%d)\n", ret);
kfree(buf);
return -EIO;
}
left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
if (left <= 0) {
kfree(buf);
return -ETIMEDOUT;
}
mutex_lock(&ar->conf_mutex);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Channel noise floor", fw_stats->ch_noise_floor);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Channel TX power", fw_stats->chan_tx_power);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"TX frame count", fw_stats->tx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RX frame count", fw_stats->rx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RX clear count", fw_stats->rx_clear_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Cycle count", fw_stats->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV TX stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HTT cookies queued", fw_stats->comp_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HTT cookies disp.", fw_stats->comp_delivered);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDU queued", fw_stats->msdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU queued", fw_stats->mpdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs dropped", fw_stats->wmm_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Local enqued", fw_stats->local_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Local freed", fw_stats->local_freed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW queued", fw_stats->hw_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs reaped", fw_stats->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num underruns", fw_stats->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", fw_stats->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs requed", fw_stats->mpdus_requed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Excessive retries", fw_stats->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW rate", fw_stats->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Sched self tiggers", fw_stats->self_triggers);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Dropped due to SW retries",
fw_stats->sw_retry_failure);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Illegal rate phy errors",
fw_stats->illgl_rate_phy_err);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Pdev continous xretry", fw_stats->pdev_cont_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"TX timeout", fw_stats->pdev_tx_timeout);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PDEV resets", fw_stats->pdev_resets);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY underrun", fw_stats->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU is more than txop limit", fw_stats->txop_ovf);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV RX stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Mid PPDU route change",
fw_stats->mid_ppdu_route_change);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Tot. number of statuses", fw_stats->status_rcvd);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 0", fw_stats->r0_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 1", fw_stats->r1_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 2", fw_stats->r2_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 3", fw_stats->r3_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs delivered to HTT", fw_stats->htt_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to HTT", fw_stats->htt_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs delivered to stack", fw_stats->loc_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", fw_stats->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Oversized AMSUs", fw_stats->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", fw_stats->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors drops", fw_stats->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PEER stats");
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
for (i = 0; i < fw_stats->peers; i++) {
len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
"Peer MAC address",
fw_stats->peer_stat[i].peer_macaddr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
if (len > buf_len)
len = buf_len;
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
mutex_unlock(&ar->conf_mutex);
kfree(buf);
return ret_cnt;
}
static const struct file_operations fops_fw_stats = {
.read = ath10k_read_fw_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
ar->hw->wiphy->debugfsdir);
if (!ar->debug.debugfs_phy)
return -ENOMEM;
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_fw_stats);
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
return 0;
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (ath10k_debug_mask & mask)
ath10k_printk(KERN_DEBUG, "%pV", &vaf);
trace_ath10k_log_dbg(mask, &vaf);
va_end(args);
}
EXPORT_SYMBOL(ath10k_dbg);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
if (ath10k_debug_mask & mask) {
if (msg)
ath10k_dbg(mask, "%s\n", msg);
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
}
/* tracing code doesn't like null strings :/ */
trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
buf, len);
}
EXPORT_SYMBOL(ath10k_dbg_dump);
#endif /* CONFIG_ATH10K_DEBUG */

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _DEBUG_H_
#define _DEBUG_H_
#include <linux/types.h>
#include "trace.h"
enum ath10k_debug_mask {
ATH10K_DBG_PCI = 0x00000001,
ATH10K_DBG_WMI = 0x00000002,
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
ATH10K_DBG_CORE = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_ANY = 0xffffffff,
};
extern unsigned int ath10k_debug_mask;
extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
{
}
static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
{
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH10K_DEBUG */
static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
}
#endif /* CONFIG_ATH10K_DEBUG */
#endif /* _DEBUG_H_ */

View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HIF_H_
#define _HIF_H_
#include <linux/kernel.h>
#include "core.h"
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
unsigned transfer_id);
int (*rx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
u8 pipe_id);
};
struct ath10k_hif_ops {
/* Send the head of a buffer to HIF for transmission to the target. */
int (*send_head)(struct ath10k *ar, u8 pipe_id,
unsigned int transfer_id,
unsigned int nbytes,
struct sk_buff *buf);
/*
* API to handle HIF-specific BMI message exchanges, this API is
* synchronous and only allowed to be called from a context that
* can block (sleep)
*/
int (*exchange_bmi_msg)(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len);
int (*start)(struct ath10k *ar);
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe,
int *ul_is_polled, int *dl_is_polled);
void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
/*
* Check if prior sends have completed.
*
* Check whether the pipe in question has any completed
* sends that have not yet been processed.
* This function is only relevant for HIF pipes that are configured
* to be polled rather than interrupt-driven.
*/
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
void (*init)(struct ath10k *ar,
struct ath10k_hif_cb *callbacks);
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
};
static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
unsigned int transfer_id,
unsigned int nbytes,
struct sk_buff *buf)
{
return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len)
{
return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
response, response_len);
}
static inline int ath10k_hif_start(struct ath10k *ar)
{
return ar->hif.ops->start(ar);
}
static inline void ath10k_hif_stop(struct ath10k *ar)
{
return ar->hif.ops->stop(ar);
}
static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe,
int *ul_is_polled,
int *dl_is_polled)
{
return ar->hif.ops->map_service_to_pipe(ar, service_id,
ul_pipe, dl_pipe,
ul_is_polled, dl_is_polled);
}
static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
}
static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force)
{
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
static inline void ath10k_hif_init(struct ath10k *ar,
struct ath10k_hif_cb *callbacks)
{
ar->hif.ops->init(ar, callbacks);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
u8 pipe_id)
{
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
}
#endif /* _HIF_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,368 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HTC_H_
#define _HTC_H_
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/semaphore.h>
#include <linux/timer.h>
struct ath10k;
/****************/
/* HTC protocol */
/****************/
/*
* HTC - host-target control protocol
*
* tx packets are generally <htc_hdr><payload>
* rx packets are more complex: <htc_hdr><payload><trailer>
*
* The payload + trailer length is stored in len.
* To get payload-only length one needs to payload - trailer_len.
*
* Trailer contains (possibly) multiple <htc_record>.
* Each record is a id-len-value.
*
* HTC header flags, control_byte0, control_byte1
* have different meaning depending whether its tx
* or rx.
*
* Alignment: htc_hdr, payload and trailer are
* 4-byte aligned.
*/
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
};
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
__le16 len;
union {
u8 trailer_len; /* for rx */
u8 control_byte0;
} __packed;
union {
u8 seq_no; /* for tx */
u8 control_byte1;
} __packed;
u8 pad0;
u8 pad1;
} __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id {
ATH10K_HTC_MSG_READY_ID = 1,
ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
};
enum ath10k_htc_version {
ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
};
enum ath10k_htc_conn_flags {
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
};
enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
struct ath10k_ath10k_htc_msg_hdr {
__le16 message_id; /* @enum htc_message_id */
} __packed;
struct ath10k_htc_unknown {
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_ready {
__le16 credit_count;
__le16 credit_size;
u8 max_endpoints;
u8 pad0;
} __packed;
struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_conn_svc {
__le16 service_id;
__le16 flags; /* @enum ath10k_htc_conn_flags */
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_conn_svc_response {
__le16 service_id;
u8 status; /* @enum ath10k_htc_conn_svc_status */
u8 eid;
__le16 max_msg_size;
} __packed;
struct ath10k_htc_setup_complete_extended {
u8 pad0;
u8 pad1;
__le32 flags; /* @enum htc_setup_complete_flags */
u8 max_msgs_per_bundled_recv;
u8 pad2;
u8 pad3;
u8 pad4;
} __packed;
struct ath10k_htc_msg {
struct ath10k_ath10k_htc_msg_hdr hdr;
union {
/* host-to-target */
struct ath10k_htc_conn_svc connect_service;
struct ath10k_htc_ready ready;
struct ath10k_htc_ready_extended ready_ext;
struct ath10k_htc_unknown unknown;
struct ath10k_htc_setup_complete_extended setup_complete_ext;
/* target-to-host */
struct ath10k_htc_conn_svc_response connect_service_response;
};
} __packed __aligned(4);
enum ath10k_ath10k_htc_record_id {
ATH10K_HTC_RECORD_NULL = 0,
ATH10K_HTC_RECORD_CREDITS = 1
};
struct ath10k_ath10k_htc_record_hdr {
u8 id; /* @enum ath10k_ath10k_htc_record_id */
u8 len;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_credit_report {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 credits;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_record {
struct ath10k_ath10k_htc_record_hdr hdr;
union {
struct ath10k_htc_credit_report credit_report[0];
u8 pauload[0];
};
} __packed __aligned(4);
/*
* note: the trailer offset is dynamic depending
* on payload length. this is only a struct layout draft
*/
struct ath10k_htc_frame {
struct ath10k_htc_hdr hdr;
union {
struct ath10k_htc_msg msg;
u8 payload[0];
};
struct ath10k_htc_record trailer[0];
} __packed __aligned(4);
/*******************/
/* Host-side stuff */
/*******************/
enum ath10k_htc_svc_gid {
ATH10K_HTC_SVC_GRP_RSVD = 0,
ATH10K_HTC_SVC_GRP_WMI = 1,
ATH10K_HTC_SVC_GRP_NMI = 2,
ATH10K_HTC_SVC_GRP_HTT = 3,
ATH10K_HTC_SVC_GRP_TEST = 254,
ATH10K_HTC_SVC_GRP_LAST = 255,
};
#define SVC(group, idx) \
(int)(((int)(group) << 8) | (int)(idx))
enum ath10k_htc_svc_id {
/* NOTE: service ID of 0x0000 is reserved and should never be used */
ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
/* raw stream service (i.e. flash, tcmd, calibration apps) */
ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
};
#undef SVC
enum ath10k_htc_ep_id {
ATH10K_HTC_EP_UNUSED = -1,
ATH10K_HTC_EP_0 = 0,
ATH10K_HTC_EP_1 = 1,
ATH10K_HTC_EP_2,
ATH10K_HTC_EP_3,
ATH10K_HTC_EP_4,
ATH10K_HTC_EP_5,
ATH10K_HTC_EP_6,
ATH10K_HTC_EP_7,
ATH10K_HTC_EP_8,
ATH10K_HTC_EP_COUNT,
};
struct ath10k_htc_ops {
void (*target_send_suspend_complete)(struct ath10k *ar);
};
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
};
/* service connection information */
struct ath10k_htc_svc_conn_req {
u16 service_id;
struct ath10k_htc_ep_ops ep_ops;
int max_send_queue_depth;
};
/* service connection response information */
struct ath10k_htc_svc_conn_resp {
u8 buffer_len;
u8 actual_len;
enum ath10k_htc_ep_id eid;
unsigned int max_msg_len;
u8 connect_resp_code;
};
#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
#define ATH10K_HTC_MAX_LEN 4096
#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath10k_htc_hdr))
#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
struct ath10k_htc_ep {
struct ath10k_htc *htc;
enum ath10k_htc_ep_id eid;
enum ath10k_htc_svc_id service_id;
struct ath10k_htc_ep_ops ep_ops;
int max_tx_queue_depth;
int max_ep_message_len;
u8 ul_pipe_id;
u8 dl_pipe_id;
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
struct sk_buff_head tx_queue;
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
u16 service_id;
u8 credit_allocation;
};
struct ath10k_htc {
struct ath10k *ar;
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
/* protects endpoint and stopping fields */
spinlock_t tx_lock;
struct ath10k_htc_ops htc_ops;
u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
int control_resp_len;
struct completion ctl_resp;
int total_transmit_credits;
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
bool stopping;
};
struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
struct ath10k_htc_ops *htc_ops);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
void ath10k_htc_stop(struct ath10k_htc *htc);
void ath10k_htc_destroy(struct ath10k_htc *htc);
struct sk_buff *ath10k_htc_alloc_skb(int size);
#endif

View File

@ -0,0 +1,152 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include "htt.h"
#include "core.h"
#include "debug.h"
static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
&conn_resp);
if (status)
return status;
htt->eid = conn_resp.eid;
return 0;
}
struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
{
struct ath10k_htt *htt;
int ret;
htt = kzalloc(sizeof(*htt), GFP_KERNEL);
if (!htt)
return NULL;
htt->ar = ar;
htt->max_throughput_mbps = 800;
/*
* Connect to HTC service.
* This has to be done before calling ath10k_htt_rx_attach,
* since ath10k_htt_rx_attach involves sending a rx ring configure
* message to the target.
*/
if (ath10k_htt_htc_attach(htt))
goto err_htc_attach;
ret = ath10k_htt_tx_attach(htt);
if (ret) {
ath10k_err("could not attach htt tx (%d)\n", ret);
goto err_htc_attach;
}
if (ath10k_htt_rx_attach(htt))
goto err_rx_attach;
/*
* Prefetch enough data to satisfy target
* classification engine.
* This is for LL chips. HL chips will probably
* transfer all frame in the tx fragment.
*/
htt->prefetch_len =
36 + /* 802.11 + qos + ht */
4 + /* 802.1q */
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
return htt;
err_rx_attach:
ath10k_htt_tx_detach(htt);
err_htc_attach:
kfree(htt);
return NULL;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
ath10k_dbg(ATH10K_DBG_HTT,
"htt target version %d.%d; host version %d.%d\n",
htt->target_version_major,
htt->target_version_minor,
HTT_CURRENT_VERSION_MAJOR,
HTT_CURRENT_VERSION_MINOR);
if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
ath10k_err("htt major versions are incompatible!\n");
return -ENOTSUPP;
}
if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
ath10k_warn("htt minor version differ but still compatible\n");
return 0;
}
int ath10k_htt_attach_target(struct ath10k_htt *htt)
{
int status;
init_completion(&htt->target_version_received);
status = ath10k_htt_h2t_ver_req_msg(htt);
if (status)
return status;
status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) {
ath10k_warn("htt version request timed out\n");
return -ETIMEDOUT;
}
status = ath10k_htt_verify_version(htt);
if (status)
return status;
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}
void ath10k_htt_detach(struct ath10k_htt *htt)
{
ath10k_htt_rx_detach(htt);
ath10k_htt_tx_detach(htt);
kfree(htt);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,510 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
#include "htt.h"
#include "mac.h"
#include "hif.h"
#include "txrx.h"
#include "debug.h"
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ieee80211_wake_queues(htt->ar->hw);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
spin_lock_bh(&htt->tx_lock);
__ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
{
int ret = 0;
spin_lock_bh(&htt->tx_lock);
if (htt->num_pending_tx >= htt->max_num_pending_tx) {
ret = -EBUSY;
goto exit;
}
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ieee80211_stop_queues(htt->ar->hw);
exit:
spin_unlock_bh(&htt->tx_lock);
return ret;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
{
int msdu_id;
lockdep_assert_held(&htt->tx_lock);
msdu_id = find_first_zero_bit(htt->used_msdu_ids,
htt->max_num_pending_tx);
if (msdu_id == htt->max_num_pending_tx)
return -ENOBUFS;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
}
int ath10k_htt_tx_attach(struct ath10k_htt *htt)
{
u8 pipe;
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
/* At the beginning free queue number should hint us the maximum
* queue length */
pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
htt->max_num_pending_tx, GFP_KERNEL);
if (!htt->pending_tx)
return -ENOMEM;
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(htt->max_num_pending_tx),
GFP_KERNEL);
if (!htt->used_msdu_ids) {
kfree(htt->pending_tx);
return -ENOMEM;
}
return 0;
}
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
{
struct sk_buff *txdesc;
int msdu_id;
/* No locks needed. Called after communication with the device has
* been stopped. */
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
txdesc = htt->pending_tx[msdu_id];
if (!txdesc)
continue;
ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
ATH10K_SKB_CB(txdesc)->htt.discard = true;
ath10k_txrx_tx_unref(htt, txdesc);
}
}
void ath10k_htt_tx_detach(struct ath10k_htt *htt)
{
ath10k_htt_tx_cleanup_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
return;
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_htt *htt = ar->htt;
if (skb_cb->htt.is_conf) {
dev_kfree_skb_any(skb);
return;
}
if (skb_cb->is_aborted) {
skb_cb->htt.discard = true;
/* if the skbuff is aborted we need to make sure we'll free up
* the tx resources, we can't simply run tx_unref() 2 times
* because if htt tx completion came in earlier we'd access
* unallocated memory */
if (skb_cb->htt.refcount > 1)
skb_cb->htt.refcount = 1;
}
ath10k_txrx_tx_unref(htt, skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0;
int ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->ver_req);
skb = ath10k_htc_alloc_skb(len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
ATH10K_SKB_CB(skb)->htt.is_conf = true;
ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
int len;
int ret;
/*
* the HW expects the buffer to be an integral number of 4-byte
* "words"
*/
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup.hdr.num_rings = 1;
/* FIXME: do we need all of this? */
flags = 0;
flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
flags |= HTT_RX_RING_FLAGS_PPDU_START;
flags |= HTT_RX_RING_FLAGS_PPDU_END;
flags |= HTT_RX_RING_FLAGS_MPDU_START;
flags |= HTT_RX_RING_FLAGS_MPDU_END;
flags |= HTT_RX_RING_FLAGS_MSDU_START;
flags |= HTT_RX_RING_FLAGS_MSDU_END;
flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
flags |= HTT_RX_RING_FLAGS_CTRL_RX;
flags |= HTT_RX_RING_FLAGS_MGMT_RX;
flags |= HTT_RX_RING_FLAGS_NULL_RX;
flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
ring->fw_idx_shadow_reg_paddr =
__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
ATH10K_SKB_CB(skb)->htt.is_conf = true;
ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
int len = 0;
int msdu_id = -1;
int res;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
return res;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
txdesc = ath10k_htc_alloc_skb(len);
if (!txdesc) {
res = -ENOMEM;
goto err;
}
spin_lock_bh(&htt->tx_lock);
msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
if (msdu_id < 0) {
spin_unlock_bh(&htt->tx_lock);
res = msdu_id;
goto err;
}
htt->pending_tx[msdu_id] = txdesc;
spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
goto err;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
/* refcount is decremented by HTC and HTT completions until it reaches
* zero and is freed */
skb_cb = ATH10K_SKB_CB(txdesc);
skb_cb->htt.msdu_id = msdu_id;
skb_cb->htt.refcount = 2;
skb_cb->htt.msdu = msdu;
res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
return 0;
err:
ath10k_skb_unmap(dev, msdu);
if (txdesc)
dev_kfree_skb_any(txdesc);
if (msdu_id >= 0) {
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
}
ath10k_htt_tx_dec_pending(htt);
return res;
}
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
struct htt_cmd *cmd;
struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *txdesc = NULL;
struct sk_buff *txfrag = NULL;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
u8 tid;
int prefetch_len, desc_len, frag_len;
dma_addr_t frags_paddr;
int msdu_id = -1;
int res;
u8 flags0;
u16 flags1;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
return res;
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
frag_len = sizeof(*tx_frags) * 2;
txdesc = ath10k_htc_alloc_skb(desc_len);
if (!txdesc) {
res = -ENOMEM;
goto err;
}
txfrag = dev_alloc_skb(frag_len);
if (!txfrag) {
res = -ENOMEM;
goto err;
}
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
ath10k_warn("htt alignment check failed. dropping packet.\n");
res = -EIO;
goto err;
}
spin_lock_bh(&htt->tx_lock);
msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
if (msdu_id < 0) {
spin_unlock_bh(&htt->tx_lock);
res = msdu_id;
goto err;
}
htt->pending_tx[msdu_id] = txdesc;
spin_unlock_bh(&htt->tx_lock);
res = ath10k_skb_map(dev, msdu);
if (res)
goto err;
/* tx fragment list must be terminated with zero-entry */
skb_put(txfrag, frag_len);
tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
tx_frags[0].len = __cpu_to_le32(msdu->len);
tx_frags[1].paddr = __cpu_to_le32(0);
tx_frags[1].len = __cpu_to_le32(0);
res = ath10k_skb_map(dev, txfrag);
if (res)
goto err;
ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
(unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
txfrag->data, frag_len);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
msdu->data, msdu->len);
skb_put(txdesc, desc_len);
cmd = (struct htt_cmd *)txdesc->data;
memset(cmd, 0, desc_len);
tid = ATH10K_SKB_CB(msdu)->htt.tid;
ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
flags0 = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
cmd->data_tx.flags0 = flags0;
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
cmd->data_tx.len = __cpu_to_le16(msdu->len);
cmd->data_tx.id = __cpu_to_le16(msdu_id);
cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
/* refcount is decremented by HTC and HTT completions until it reaches
* zero and is freed */
skb_cb = ATH10K_SKB_CB(txdesc);
skb_cb->htt.msdu_id = msdu_id;
skb_cb->htt.refcount = 2;
skb_cb->htt.txfrag = txfrag;
skb_cb->htt.msdu = msdu;
res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
return 0;
err:
if (txfrag)
ath10k_skb_unmap(dev, txfrag);
if (txdesc)
dev_kfree_skb_any(txdesc);
if (txfrag)
dev_kfree_skb_any(txfrag);
if (msdu_id >= 0) {
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
}
ath10k_htt_tx_dec_pending(htt);
ath10k_skb_unmap(dev, msdu);
return res;
}

View File

@ -0,0 +1,304 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HW_H_
#define _HW_H_
#include "targaddrs.h"
/* Supported FW version */
#define SUPPORTED_FW_MAJOR 1
#define SUPPORTED_FW_MINOR 0
#define SUPPORTED_FW_RELEASE 0
#define SUPPORTED_FW_BUILD 629
/* QCA988X 1.0 definitions */
#define QCA988X_HW_1_0_VERSION 0x4000002c
#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
};
enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_DISABLED = 0,
ATH10K_MCAST2UCAST_ENABLED = 1,
};
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 0
#define TARGET_MAC_AGGR_DELIM 0
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_PEERS 16
#define TARGET_NUM_OFFLOAD_PEERS 0
#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_NUM_PEER_KEYS 2
#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_NUM_MCAST_GROUPS 0
#define TARGET_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
#define TARGET_TX_DBG_LOG_SIZE 1024
#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
#define TARGET_VOW_CONFIG 0
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
* PCIe standard forces this to be a power of 2.
* Some Host OS's limit MSI requests that can be granted to 8
* so for now we abide by this limit and avoid requesting more
* than that.
*/
#define MSI_NUM_REQUEST_LOG2 3
#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
/*
* Granted MSIs are assigned as follows:
* Firmware uses the first
* Remaining MSIs, if any, are used by Copy Engines
* This mapping is known to both Target firmware and Host software.
* It may be changed as long as Host and Target are kept in sync.
*/
/* MSI for firmware (errors, etc.) */
#define MSI_ASSIGN_FW 0
/* MSIs for Copy Engines */
#define MSI_ASSIGN_CE_INITIAL 1
#define MSI_ASSIGN_CE_MAX 7
/* as of IP3.7.1 */
#define RTC_STATE_V_ON 3
#define RTC_STATE_COLD_RESET_MASK 0x00000400
#define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000
#define PCIE_SOC_WAKE_V_MASK 0x00000001
#define PCIE_SOC_WAKE_ADDRESS 0x0004
#define PCIE_SOC_WAKE_RESET 0x00000000
#define SOC_GLOBAL_RESET_ADDRESS 0x0008
#define RTC_SOC_BASE_ADDRESS 0x00004000
#define RTC_WMAC_BASE_ADDRESS 0x00005000
#define MAC_COEX_BASE_ADDRESS 0x00006000
#define BT_COEX_BASE_ADDRESS 0x00007000
#define SOC_PCIE_BASE_ADDRESS 0x00008000
#define SOC_CORE_BASE_ADDRESS 0x00009000
#define WLAN_UART_BASE_ADDRESS 0x0000c000
#define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000
#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
#define WLAN_MAC_BASE_ADDRESS 0x00020000
#define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00
#define CE_WRAPPER_BASE_ADDRESS 0x00057000
#define CE0_BASE_ADDRESS 0x00057400
#define CE1_BASE_ADDRESS 0x00057800
#define CE2_BASE_ADDRESS 0x00057c00
#define CE3_BASE_ADDRESS 0x00058000
#define CE4_BASE_ADDRESS 0x00058400
#define CE5_BASE_ADDRESS 0x00058800
#define CE6_BASE_ADDRESS 0x00058c00
#define CE7_BASE_ADDRESS 0x00059000
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
#define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0
#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
#define SOC_LPO_CAL_OFFSET 0x000000e0
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
#define CLOCK_GPIO_OFFSET 0xffffffff
#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define SI_CONFIG_OFFSET 0x00000000
#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define SI_CONFIG_I2C_LSB 16
#define SI_CONFIG_I2C_MASK 0x00010000
#define SI_CONFIG_POS_SAMPLE_LSB 7
#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
#define SI_CONFIG_INACTIVE_DATA_LSB 5
#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
#define SI_CONFIG_INACTIVE_CLK_LSB 4
#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
#define SI_CONFIG_DIVIDER_LSB 0
#define SI_CONFIG_DIVIDER_MASK 0x0000000f
#define SI_CS_OFFSET 0x00000004
#define SI_CS_DONE_ERR_MASK 0x00000400
#define SI_CS_DONE_INT_MASK 0x00000200
#define SI_CS_START_LSB 8
#define SI_CS_START_MASK 0x00000100
#define SI_CS_RX_CNT_LSB 4
#define SI_CS_RX_CNT_MASK 0x000000f0
#define SI_CS_TX_CNT_LSB 0
#define SI_CS_TX_CNT_MASK 0x0000000f
#define SI_TX_DATA0_OFFSET 0x00000008
#define SI_TX_DATA1_OFFSET 0x0000000c
#define SI_RX_DATA0_OFFSET 0x00000010
#define SI_RX_DATA1_OFFSET 0x00000014
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
/* Firmware indications to the Host via SCRATCH_3 register. */
#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
/* HOST_REG interrupt from firmware */
#define PCIE_INTR_FIRMWARE_MASK 0x00000400
#define PCIE_INTR_CE_MASK_ALL 0x0007f800
#define DRAM_BASE_ADDRESS 0x00400000
#define MISSING 0
#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
#define RESET_CONTROL_MBOX_RST_MASK MISSING
#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
#define LOCAL_SCRATCH_OFFSET 0x18
#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
#define MBOX_BASE_ADDRESS MISSING
#define INT_STATUS_ENABLE_ERROR_LSB MISSING
#define INT_STATUS_ENABLE_ERROR_MASK MISSING
#define INT_STATUS_ENABLE_CPU_LSB MISSING
#define INT_STATUS_ENABLE_CPU_MASK MISSING
#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
#define INT_STATUS_ENABLE_ADDRESS MISSING
#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
#define HOST_INT_STATUS_ADDRESS MISSING
#define CPU_INT_STATUS_ADDRESS MISSING
#define ERROR_INT_STATUS_ADDRESS MISSING
#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
#define COUNT_DEC_ADDRESS MISSING
#define HOST_INT_STATUS_CPU_MASK MISSING
#define HOST_INT_STATUS_CPU_LSB MISSING
#define HOST_INT_STATUS_ERROR_MASK MISSING
#define HOST_INT_STATUS_ERROR_LSB MISSING
#define HOST_INT_STATUS_COUNTER_MASK MISSING
#define HOST_INT_STATUS_COUNTER_LSB MISSING
#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
#define WINDOW_DATA_ADDRESS MISSING
#define WINDOW_READ_ADDR_ADDRESS MISSING
#define WINDOW_WRITE_ADDR_ADDRESS MISSING
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
#endif /* _HW_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _MAC_H_
#define _MAC_H_
#include <net/mac80211.h>
#include "core.h"
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
struct ath10k *ath10k_mac_create(void);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
void ath10k_mac_unregister(struct ath10k *ar);
struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
return (struct ath10k_vif *)vif->drv_priv;
}
static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
if (arvif->tx_seq_no == 0)
arvif->tx_seq_no = 0x1000;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
arvif->tx_seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
}
}
#endif /* _MAC_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,355 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _PCI_H_
#define _PCI_H_
#include <linux/interrupt.h>
#include "hw.h"
#include "ce.h"
/* FW dump area */
#define REG_DUMP_COUNT_QCA988X 60
/*
* maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
*/
#define DIAG_TRANSFER_LIMIT 2048
/*
* maximum number of bytes that can be
* handled atomically by DiagRead/DiagWrite
*/
#define DIAG_TRANSFER_LIMIT 2048
struct bmi_xfer {
struct completion done;
bool wait_for_resp;
u32 resp_len;
};
struct ath10k_pci_compl {
struct list_head list;
int send_or_recv;
struct ce_state *ce_state;
struct hif_ce_pipe_info *pipe_info;
void *transfer_context;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
/* compl_state.send_or_recv */
#define HIF_CE_COMPLETE_FREE 0
#define HIF_CE_COMPLETE_SEND 1
#define HIF_CE_COMPLETE_RECV 2
/*
* PCI-specific Target state
*
* NOTE: Structure is shared between Host software and Target firmware!
*
* Much of this may be of interest to the Host so
* HOST_INTEREST->hi_interconnect_state points here
* (and all members are 32-bit quantities in order to
* facilitate Host access). In particular, Host software is
* required to initialize pipe_cfg_addr and svc_to_pipe_map.
*/
struct pcie_state {
/* Pipe configuration Target address */
/* NB: ce_pipe_config[CE_COUNT] */
u32 pipe_cfg_addr;
/* Service to pipe map Target address */
/* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
u32 svc_to_pipe_map;
/* number of MSI interrupts requested */
u32 msi_requested;
/* number of MSI interrupts granted */
u32 msi_granted;
/* Message Signalled Interrupt address */
u32 msi_addr;
/* Base data */
u32 msi_data;
/*
* Data for firmware interrupt;
* MSI data for other interrupts are
* in various SoC registers
*/
u32 msi_fw_intr_data;
/* PCIE_PWR_METHOD_* */
u32 power_mgmt_method;
/* PCIE_CONFIG_FLAG_* */
u32 config_flags;
};
/* PCIE_CONFIG_FLAG definitions */
#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
/* Host software's Copy Engine configuration. */
#define CE_ATTR_FLAGS 0
/*
* Configuration information for a Copy Engine pipe.
* Passed from Host to Target during startup (one per CE).
*
* NOTE: Structure is shared between Host software and Target firmware!
*/
struct ce_pipe_config {
u32 pipenum;
u32 pipedir;
u32 nentries;
u32 nbytes_max;
u32 flags;
u32 reserved;
};
/*
* Directions for interconnect pipe configuration.
* These definitions may be used during configuration and are shared
* between Host and Target.
*
* Pipe Directions are relative to the Host, so PIPEDIR_IN means
* "coming IN over air through Target to Host" as with a WiFi Rx operation.
* Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
* as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
* Target since things that are "PIPEDIR_OUT" are coming IN to the Target
* over the interconnect.
*/
#define PIPEDIR_NONE 0
#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
#define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */
struct service_to_pipe {
u32 service_id;
u32 pipedir;
u32 pipenum;
};
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
struct hif_ce_pipe_info {
/* Handle of underlying Copy Engine */
struct ce_state *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
/* Convenience back pointer to hif_ce_state. */
struct ath10k *hif_ce_state;
size_t buf_sz;
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
/* List of free CE completion slots */
struct list_head compl_free;
/* Limit the number of outstanding send requests. */
int num_sends_allowed;
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
* interrupts.
*/
int num_msi_intrs;
struct tasklet_struct intr_tq;
struct tasklet_struct msi_fw_err;
/* Number of Copy Engines supported */
unsigned int ce_count;
int started;
atomic_t keep_awake_count;
bool verified_awake;
/* List of CE completions to be processed */
struct list_head compl_process;
/* protects compl_processing and compl_process */
spinlock_t compl_lock;
bool compl_processing;
struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
/* Target address used to signal a pending firmware event */
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
struct ce_state *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
struct ce_state *ce_id_to_state[CE_COUNT_MAX];
/* makes sure that dummy reads are atomic */
spinlock_t hw_v1_workaround_lock;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
{
return ar->hif.priv;
}
static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
{
return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
{
iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
#define BAR_NUM 0
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
/*
* TODO: Should be a function call specific to each Target-type.
* This convoluted macro converts from Target CPU Virtual Address Space to CE
* Address Space. As part of this process, we conservatively fetch the current
* PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/*
* This API allows the Host to access Target registers directly
* and relatively efficiently over PCIe.
* This allows the Host to avoid extra overhead associated with
* sending a message to firmware and waiting for a response message
* from firmware, as is done on other interconnects.
*
* Yet there is some complexity with direct accesses because the
* Target's power state is not known a priori. The Host must issue
* special PCIe reads/writes in order to explicitly wake the Target
* and to verify that it is awake and will remain awake.
*
* Usage:
*
* Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
* These calls must be bracketed by ath10k_pci_wake and
* ath10k_pci_sleep. A single BEGIN/END pair is adequate for
* multiple READ/WRITE operations.
*
* Use ath10k_pci_wake to put the Target in a state in
* which it is legal for the Host to directly access it. This
* may involve waking the Target from a low power state, which
* may take up to 2Ms!
*
* Use ath10k_pci_sleep to tell the Target that as far as
* this code path is concerned, it no longer needs to remain
* directly accessible. BEGIN/END is under a reference counter;
* multiple code paths may issue BEGIN/END on a single targid.
*/
static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *addr = ar_pci->mem;
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
unsigned long irq_flags;
spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
ioread32(addr+offset+4); /* 3rd read prior to write */
ioread32(addr+offset+4); /* 2nd read prior to write */
ioread32(addr+offset+4); /* 1st read prior to write */
iowrite32(value, addr+offset);
spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
irq_flags);
} else {
iowrite32(value, addr+offset);
}
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + offset);
}
extern unsigned int ath10k_target_ps;
void ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline void ath10k_pci_wake(struct ath10k *ar)
{
if (ath10k_target_ps)
ath10k_do_pci_wake(ar);
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
{
if (ath10k_target_ps)
ath10k_do_pci_sleep(ar);
}
#endif /* _PCI_H_ */

View File

@ -0,0 +1,990 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _RX_DESC_H_
#define _RX_DESC_H_
enum rx_attention_flags {
RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0,
RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1,
RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2,
RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3,
RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4,
RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5,
RX_ATTENTION_FLAGS_NON_QOS = 1 << 6,
RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7,
RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8,
RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9,
RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10,
RX_ATTENTION_FLAGS_EOSP = 1 << 11,
RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12,
RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13,
RX_ATTENTION_FLAGS_ORDER = 1 << 14,
RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15,
RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16,
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17,
RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19,
RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20,
RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21,
RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22,
RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23,
RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24,
RX_ATTENTION_FLAGS_DIRECTED = 1 << 25,
RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26,
RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27,
RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28,
RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29,
RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30,
RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31,
};
struct rx_attention {
__le32 flags; /* %RX_ATTENTION_FLAGS_ */
} __packed;
/*
* first_mpdu
* Indicates the first MSDU of the PPDU. If both first_mpdu
* and last_mpdu are set in the MSDU then this is a not an
* A-MPDU frame but a stand alone MPDU. Interior MPDU in an
* A-MPDU shall have both first_mpdu and last_mpdu bits set to
* 0. The PPDU start status will only be valid when this bit
* is set.
*
* last_mpdu
* Indicates the last MSDU of the last MPDU of the PPDU. The
* PPDU end status will only be valid when this bit is set.
*
* mcast_bcast
* Multicast / broadcast indicator. Only set when the MAC
* address 1 bit 0 is set indicating mcast/bcast and the BSSID
* matches one of the 4 BSSID registers. Only set when
* first_msdu is set.
*
* peer_idx_invalid
* Indicates no matching entries within the the max search
* count. Only set when first_msdu is set.
*
* peer_idx_timeout
* Indicates an unsuccessful search for the peer index due to
* timeout. Only set when first_msdu is set.
*
* power_mgmt
* Power management bit set in the 802.11 header. Only set
* when first_msdu is set.
*
* non_qos
* Set if packet is not a non-QoS data frame. Only set when
* first_msdu is set.
*
* null_data
* Set if frame type indicates either null data or QoS null
* data format. Only set when first_msdu is set.
*
* mgmt_type
* Set if packet is a management packet. Only set when
* first_msdu is set.
*
* ctrl_type
* Set if packet is a control packet. Only set when first_msdu
* is set.
*
* more_data
* Set if more bit in frame control is set. Only set when
* first_msdu is set.
*
* eosp
* Set if the EOSP (end of service period) bit in the QoS
* control field is set. Only set when first_msdu is set.
*
* u_apsd_trigger
* Set if packet is U-APSD trigger. Key table will have bits
* per TID to indicate U-APSD trigger.
*
* fragment
* Indicates that this is an 802.11 fragment frame. This is
* set when either the more_frag bit is set in the frame
* control or the fragment number is not zero. Only set when
* first_msdu is set.
*
* order
* Set if the order bit in the frame control is set. Only set
* when first_msdu is set.
*
* classification
* Indicates that this status has a corresponding MSDU that
* requires FW processing. The OLE will have classification
* ring mask registers which will indicate the ring(s) for
* packets and descriptors which need FW attention.
*
* overflow_err
* PCU Receive FIFO does not have enough space to store the
* full receive packet. Enough space is reserved in the
* receive FIFO for the status is written. This MPDU remaining
* packets in the PPDU will be filtered and no Ack response
* will be transmitted.
*
* msdu_length_err
* Indicates that the MSDU length from the 802.3 encapsulated
* length field extends beyond the MPDU boundary.
*
* tcp_udp_chksum_fail
* Indicates that the computed checksum (tcp_udp_chksum) did
* not match the checksum in the TCP/UDP header.
*
* ip_chksum_fail
* Indicates that the computed checksum did not match the
* checksum in the IP header.
*
* sa_idx_invalid
* Indicates no matching entry was found in the address search
* table for the source MAC address.
*
* da_idx_invalid
* Indicates no matching entry was found in the address search
* table for the destination MAC address.
*
* sa_idx_timeout
* Indicates an unsuccessful search for the source MAC address
* due to the expiring of the search timer.
*
* da_idx_timeout
* Indicates an unsuccessful search for the destination MAC
* address due to the expiring of the search timer.
*
* encrypt_required
* Indicates that this data type frame is not encrypted even if
* the policy for this MPDU requires encryption as indicated in
* the peer table key type.
*
* directed
* MPDU is a directed packet which means that the RA matched
* our STA addresses. In proxySTA it means that the TA matched
* an entry in our address search table with the corresponding
* 'no_ack' bit is the address search entry cleared.
*
* buffer_fragment
* Indicates that at least one of the rx buffers has been
* fragmented. If set the FW should look at the rx_frag_info
* descriptor described below.
*
* mpdu_length_err
* Indicates that the MPDU was pre-maturely terminated
* resulting in a truncated MPDU. Don't trust the MPDU length
* field.
*
* tkip_mic_err
* Indicates that the MPDU Michael integrity check failed
*
* decrypt_err
* Indicates that the MPDU decrypt integrity check failed
*
* fcs_err
* Indicates that the MPDU FCS check failed
*
* msdu_done
* If set indicates that the RX packet data, RX header data, RX
* PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
* start/end descriptors and RX Attention descriptor are all
* valid. This bit must be in the last octet of the
* descriptor.
*/
struct rx_frag_info {
u8 ring0_more_count;
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
} __packed;
/*
* ring0_more_count
* Indicates the number of more buffers associated with RX DMA
* ring 0. Field is filled in by the RX_DMA.
*
* ring1_more_count
* Indicates the number of more buffers associated with RX DMA
* ring 1. Field is filled in by the RX_DMA.
*
* ring2_more_count
* Indicates the number of more buffers associated with RX DMA
* ring 2. Field is filled in by the RX_DMA.
*
* ring3_more_count
* Indicates the number of more buffers associated with RX DMA
* ring 3. Field is filled in by the RX_DMA.
*/
enum htt_rx_mpdu_encrypt_type {
HTT_RX_MPDU_ENCRYPT_WEP40 = 0,
HTT_RX_MPDU_ENCRYPT_WEP104 = 1,
HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
HTT_RX_MPDU_ENCRYPT_WEP128 = 3,
HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4,
HTT_RX_MPDU_ENCRYPT_WAPI = 5,
HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
HTT_RX_MPDU_ENCRYPT_NONE = 7,
};
#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0
#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000
#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16
#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28
#define RX_MPDU_START_INFO0_FROM_DS (1 << 11)
#define RX_MPDU_START_INFO0_TO_DS (1 << 12)
#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13)
#define RX_MPDU_START_INFO0_RETRY (1 << 14)
#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15)
#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
#define RX_MPDU_START_INFO1_TID_LSB 28
#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
struct rx_mpdu_start {
__le32 info0;
union {
struct {
__le32 pn31_0;
__le32 info1; /* %RX_MPDU_START_INFO1_ */
} __packed;
struct {
u8 pn[6];
} __packed;
} __packed;
} __packed;
/*
* peer_idx
* The index of the address search table which associated with
* the peer table entry corresponding to this MPDU. Only valid
* when first_msdu is set.
*
* fr_ds
* Set if the from DS bit is set in the frame control. Only
* valid when first_msdu is set.
*
* to_ds
* Set if the to DS bit is set in the frame control. Only
* valid when first_msdu is set.
*
* encrypted
* Protected bit from the frame control. Only valid when
* first_msdu is set.
*
* retry
* Retry bit from the frame control. Only valid when
* first_msdu is set.
*
* txbf_h_info
* The MPDU data will contain H information. Primarily used
* for debug.
*
* seq_num
* The sequence number from the 802.11 header. Only valid when
* first_msdu is set.
*
* encrypt_type
* Indicates type of decrypt cipher used (as defined in the
* peer table)
* 0: WEP40
* 1: WEP104
* 2: TKIP without MIC
* 3: WEP128
* 4: TKIP (WPA)
* 5: WAPI
* 6: AES-CCM (WPA2)
* 7: No cipher
* Only valid when first_msdu_is set
*
* pn_31_0
* Bits [31:0] of the PN number extracted from the IV field
* WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is
* valid.
* TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
* WEPSeed[1], pn1}. Only pn[47:0] is valid.
* AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
* pn0}. Only pn[47:0] is valid.
* WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
* pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
* The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
* pn[47:0] are valid.
* Only valid when first_msdu is set.
*
* pn_47_32
* Bits [47:32] of the PN number. See description for
* pn_31_0. The remaining PN fields are in the rx_msdu_end
* descriptor
*
* pn
* Use this field to access the pn without worrying about
* byte-order and bitmasking/bitshifting.
*
* directed
* See definition in RX attention descriptor
*
* reserved_2
* Reserved: HW should fill with zero. FW should ignore.
*
* tid
* The TID field in the QoS control field
*/
#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff
#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0
#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16
#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13)
#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14)
#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15)
#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28)
#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29)
#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30)
#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31)
struct rx_mpdu_end {
__le32 info0;
} __packed;
/*
* reserved_0
* Reserved
*
* overflow_err
* PCU Receive FIFO does not have enough space to store the
* full receive packet. Enough space is reserved in the
* receive FIFO for the status is written. This MPDU remaining
* packets in the PPDU will be filtered and no Ack response
* will be transmitted.
*
* last_mpdu
* Indicates that this is the last MPDU of a PPDU.
*
* post_delim_err
* Indicates that a delimiter FCS error occurred after this
* MPDU before the next MPDU. Only valid when last_msdu is
* set.
*
* post_delim_cnt
* Count of the delimiters after this MPDU. This requires the
* last MPDU to be held until all the EOF descriptors have been
* received. This may be inefficient in the future when
* ML-MIMO is used. Only valid when last_mpdu is set.
*
* mpdu_length_err
* See definition in RX attention descriptor
*
* tkip_mic_err
* See definition in RX attention descriptor
*
* decrypt_err
* See definition in RX attention descriptor
*
* fcs_err
* See definition in RX attention descriptor
*/
#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff
#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0
#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000
#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14
#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000
#define RX_MSDU_START_INFO0_RING_MASK_LSB 20
#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24
#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff
#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0
#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300
#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8
#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000
#define RX_MSDU_START_INFO1_SA_IDX_LSB 16
#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10)
#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11)
#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12)
#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13)
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
enum rx_msdu_decap_format {
RX_MSDU_DECAP_RAW = 0,
RX_MSDU_DECAP_NATIVE_WIFI = 1,
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
struct rx_msdu_start {
__le32 info0; /* %RX_MSDU_START_INFO0_ */
__le32 flow_id_crc;
__le32 info1; /* %RX_MSDU_START_INFO1_ */
} __packed;
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
* still valid for MPDU frames without A-MSDU. It still
* represents MSDU length after decapsulation
*
* ip_offset
* Indicates the IP offset in bytes from the start of the
* packet after decapsulation. Only valid if ipv4_proto or
* ipv6_proto is set.
*
* ring_mask
* Indicates the destination RX rings for this MSDU.
*
* tcp_udp_offset
* Indicates the offset in bytes to the start of TCP or UDP
* header from the start of the IP header after decapsulation.
* Only valid if tcp_prot or udp_prot is set. The value 0
* indicates that the offset is longer than 127 bytes.
*
* reserved_0c
* Reserved: HW should fill with zero. FW should ignore.
*
* flow_id_crc
* The flow_id_crc runs CRC32 on the following information:
* IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
* protocol[7:0]}.
* IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
* next_header[7:0]}
* UDP case: sort_port[15:0], dest_port[15:0]
* TCP case: sort_port[15:0], dest_port[15:0],
* {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
* {16'b0, urgent_ptr[15:0]}, all options except 32-bit
* timestamp.
*
* msdu_number
* Indicates the MSDU number within a MPDU. This value is
* reset to zero at the start of each MPDU. If the number of
* MSDU exceeds 255 this number will wrap using modulo 256.
*
* decap_format
* Indicates the format after decapsulation:
* 0: RAW: No decapsulation
* 1: Native WiFi
* 2: Ethernet 2 (DIX)
* 3: 802.3 (SNAP/LLC)
*
* ipv4_proto
* Set if L2 layer indicates IPv4 protocol.
*
* ipv6_proto
* Set if L2 layer indicates IPv6 protocol.
*
* tcp_proto
* Set if the ipv4_proto or ipv6_proto are set and the IP
* protocol indicates TCP.
*
* udp_proto
* Set if the ipv4_proto or ipv6_proto are set and the IP
* protocol indicates UDP.
*
* ip_frag
* Indicates that either the IP More frag bit is set or IP frag
* number is non-zero. If set indicates that this is a
* fragmented IP packet.
*
* tcp_only_ack
* Set if only the TCP Ack bit is set in the TCP flags and if
* the TCP payload is 0.
*
* sa_idx
* The offset in the address table which matches the MAC source
* address.
*
* reserved_2b
* Reserved: HW should fill with zero. FW should ignore.
*/
#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14)
#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15)
#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
struct rx_msdu_end {
__le16 ip_hdr_cksum;
__le16 tcp_hdr_cksum;
u8 key_id_octet;
u8 classification_filter;
u8 wapi_pn[10];
__le32 info0;
} __packed;
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
* checksum used by TCP/UDP checksum.
*
*tcp_udp_chksum
* The value of the computed TCP/UDP checksum. A mode bit
* selects whether this checksum is the full checksum or the
* partial checksum which does not include the pseudo header.
*
*key_id_octet
* The key ID octet from the IV. Only valid when first_msdu is
* set.
*
*classification_filter
* Indicates the number classification filter rule
*
*ext_wapi_pn_63_48
* Extension PN (packet number) which is only used by WAPI.
* This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The
* WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
* descriptor.
*
*ext_wapi_pn_95_64
* Extension PN (packet number) which is only used by WAPI.
* This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
* pn11).
*
*ext_wapi_pn_127_96
* Extension PN (packet number) which is only used by WAPI.
* This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
* pn15).
*
*reported_mpdu_length
* MPDU length before decapsulation. Only valid when
* first_msdu is set. This field is taken directly from the
* length field of the A-MPDU delimiter or the preamble length
* field for non-A-MPDU frames.
*
*first_msdu
* Indicates the first MSDU of A-MSDU. If both first_msdu and
* last_msdu are set in the MSDU then this is a non-aggregated
* MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall
* have both first_mpdu and last_mpdu bits set to 0.
*
*last_msdu
* Indicates the last MSDU of the A-MSDU. MPDU end status is
* only valid when last_msdu is set.
*
*reserved_3a
* Reserved: HW should fill with zero. FW should ignore.
*
*pre_delim_err
* Indicates that the first delimiter had a FCS failure. Only
* valid when first_mpdu and first_msdu are set.
*
*reserved_3b
* Reserved: HW should fill with zero. FW should ignore.
*/
#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
#define RX_PPDU_START_SIG_RATE_OFDM_48 0
#define RX_PPDU_START_SIG_RATE_OFDM_24 1
#define RX_PPDU_START_SIG_RATE_OFDM_12 2
#define RX_PPDU_START_SIG_RATE_OFDM_6 3
#define RX_PPDU_START_SIG_RATE_OFDM_54 4
#define RX_PPDU_START_SIG_RATE_OFDM_36 5
#define RX_PPDU_START_SIG_RATE_OFDM_18 6
#define RX_PPDU_START_SIG_RATE_OFDM_9 7
#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C
#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f
#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0
#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0
#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5
#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000
#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18
#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24
#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4)
#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17)
#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0
#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0
#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24)
#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
#define RX_PPDU_START_INFO5_SERVICE_LSB 0
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
u8 ext20_mhz;
u8 ext40_mhz;
u8 ext80_mhz;
} rssi_chains[4];
u8 rssi_comb;
__le16 rsvd0;
u8 info0; /* %RX_PPDU_START_INFO0_ */
__le32 info1; /* %RX_PPDU_START_INFO1_ */
__le32 info2; /* %RX_PPDU_START_INFO2_ */
__le32 info3; /* %RX_PPDU_START_INFO3_ */
__le32 info4; /* %RX_PPDU_START_INFO4_ */
__le32 info5; /* %RX_PPDU_START_INFO5_ */
} __packed;
/*
* rssi_chain0_pri20
* RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain0_sec20
* RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain0_sec40
* RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain0_sec80
* RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain1_pri20
* RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain1_sec20
* RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain1_sec40
* RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain1_sec80
* RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain2_pri20
* RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain2_sec20
* RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain2_sec40
* RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain2_sec80
* RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain3_pri20
* RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain3_sec20
* RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain3_sec40
* RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_chain3_sec80
* RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
* Value of 0x80 indicates invalid.
*
* rssi_comb
* The combined RSSI of RX PPDU of all active chains and
* bandwidths. Value of 0x80 indicates invalid.
*
* reserved_4a
* Reserved: HW should fill with 0, FW should ignore.
*
* is_greenfield
* Do we really support this?
*
* reserved_4b
* Reserved: HW should fill with 0, FW should ignore.
*
* l_sig_rate
* If l_sig_rate_select is 0:
* 0x8: OFDM 48 Mbps
* 0x9: OFDM 24 Mbps
* 0xA: OFDM 12 Mbps
* 0xB: OFDM 6 Mbps
* 0xC: OFDM 54 Mbps
* 0xD: OFDM 36 Mbps
* 0xE: OFDM 18 Mbps
* 0xF: OFDM 9 Mbps
* If l_sig_rate_select is 1:
* 0x8: CCK 11 Mbps long preamble
* 0x9: CCK 5.5 Mbps long preamble
* 0xA: CCK 2 Mbps long preamble
* 0xB: CCK 1 Mbps long preamble
* 0xC: CCK 11 Mbps short preamble
* 0xD: CCK 5.5 Mbps short preamble
* 0xE: CCK 2 Mbps short preamble
*
* l_sig_rate_select
* Legacy signal rate select. If set then l_sig_rate indicates
* CCK rates. If clear then l_sig_rate indicates OFDM rates.
*
* l_sig_length
* Length of legacy frame in octets.
*
* l_sig_parity
* Odd parity over l_sig_rate and l_sig_length
*
* l_sig_tail
* Tail bits for Viterbi decoder
*
* preamble_type
* Indicates the type of preamble ahead:
* 0x4: Legacy (OFDM/CCK)
* 0x8: HT
* 0x9: HT with TxBF
* 0xC: VHT
* 0xD: VHT with TxBF
* 0x80 - 0xFF: Reserved for special baseband data types such
* as radar and spectral scan.
*
* ht_sig_vht_sig_a_1
* If preamble_type == 0x8 or 0x9
* HT-SIG (first 24 bits)
* If preamble_type == 0xC or 0xD
* VHT-SIG A (first 24 bits)
* Else
* Reserved
*
* reserved_6
* Reserved: HW should fill with 0, FW should ignore.
*
* ht_sig_vht_sig_a_2
* If preamble_type == 0x8 or 0x9
* HT-SIG (last 24 bits)
* If preamble_type == 0xC or 0xD
* VHT-SIG A (last 24 bits)
* Else
* Reserved
*
* txbf_h_info
* Indicates that the packet data carries H information which
* is used for TxBF debug.
*
* reserved_7
* Reserved: HW should fill with 0, FW should ignore.
*
* vht_sig_b
* WiFi 1.0 and WiFi 2.0 will likely have this field to be all
* 0s since the BB does not plan on decoding VHT SIG-B.
*
* reserved_8
* Reserved: HW should fill with 0, FW should ignore.
*
* service
* Service field from BB for OFDM, HT and VHT packets. CCK
* packets will have service field of 0.
*
* reserved_9
* Reserved: HW should fill with 0, FW should ignore.
*/
#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2)
#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff
#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0
#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
struct rx_ppdu_end {
__le32 evm_p0;
__le32 evm_p1;
__le32 evm_p2;
__le32 evm_p3;
__le32 evm_p4;
__le32 evm_p5;
__le32 evm_p6;
__le32 evm_p7;
__le32 evm_p8;
__le32 evm_p9;
__le32 evm_p10;
__le32 evm_p11;
__le32 evm_p12;
__le32 evm_p13;
__le32 evm_p14;
__le32 evm_p15;
__le32 tsf_timestamp;
__le32 wb_timestamp;
u8 locationing_timestamp;
u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
/*
* evm_p0
* EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p1
* EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p2
* EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p3
* EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p4
* EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p5
* EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p6
* EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p7
* EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p8
* EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p9
* EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p10
* EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p11
* EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p12
* EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p13
* EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p14
* EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3.
*
* evm_p15
* EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3.
*
* tsf_timestamp
* Receive TSF timestamp sampled on the rising edge of
* rx_clear. For PHY errors this may be the current TSF when
* phy_error is asserted if the rx_clear does not assert before
* the end of the PHY error.
*
* wb_timestamp
* WLAN/BT timestamp is a 1 usec resolution timestamp which
* does not get updated based on receive beacon like TSF. The
* same rules for capturing tsf_timestamp are used to capture
* the wb_timestamp.
*
* locationing_timestamp
* Timestamp used for locationing. This timestamp is used to
* indicate fractions of usec. For example if the MAC clock is
* running at 80 MHz, the timestamp will increment every 12.5
* nsec. The value starts at 0 and increments to 79 and
* returns to 0 and repeats. This information is valid for
* every PPDU. This information can be used in conjunction
* with wb_timestamp to capture large delta times.
*
* phy_err_code
* See the 1.10.8.1.2 for the list of the PHY error codes.
*
* phy_err
* Indicates a PHY error was detected for this PPDU.
*
* rx_location
* Indicates that location information was requested.
*
* txbf_h_info
* Indicates that the packet data carries H information which
* is used for TxBF debug.
*
* reserved_18
* Reserved: HW should fill with 0, FW should ignore.
*
* rx_antenna
* Receive antenna value
*
* tx_ht_vht_ack
* Indicates that a HT or VHT Ack/BA frame was transmitted in
* response to this receive packet.
*
* bb_captured_channel
* Indicates that the BB has captured a channel dump. FW can
* then read the channel dump memory. This may indicate that
* the channel was captured either based on PCU setting the
* capture_channel bit BB descriptor or FW setting the
* capture_channel mode bit.
*
* reserved_19
* Reserved: HW should fill with 0, FW should ignore.
*
* bb_length
* Indicates the number of bytes of baseband information for
* PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
* which indicates that this is not a normal PPDU but rather
* contains baseband debug information.
*
* reserved_20
* Reserved: HW should fill with 0, FW should ignore.
*
* ppdu_done
* PPDU end status is only valid when ppdu_done bit is set.
* Every time HW sets this bit in memory FW/SW must clear this
* bit in memory. FW will initialize all the ppdu_done dword
* to 0.
*/
#define FW_RX_DESC_INFO0_DISCARD (1 << 0)
#define FW_RX_DESC_INFO0_FORWARD (1 << 1)
#define FW_RX_DESC_INFO0_INSPECT (1 << 5)
#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
#define FW_RX_DESC_INFO0_EXT_LSB 6
struct fw_rx_desc_base {
u8 info0;
} __packed;
#endif /* _RX_DESC_H_ */

View File

@ -0,0 +1,449 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __TARGADDRS_H__
#define __TARGADDRS_H__
/*
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
* host_interest structure. It must match the address of the _host_interest
* symbol (see linker script).
*
* Host Interest is shared between Host and Target in order to coordinate
* between the two, and is intended to remain constant (with additions only
* at the end) across software releases.
*
* All addresses are available here so that it's possible to
* write a single binary that works with all Target Types.
* May be used in assembler code as well as C.
*/
#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
#define HOST_INTEREST_MAX_SIZE 0x200
/*
* These are items that the Host may need to access via BMI or via the
* Diagnostic Window. The position of items in this structure must remain
* constant across firmware revisions! Types for each item must be fixed
* size across target and host platforms. More items may be added at the end.
*/
struct host_interest {
/*
* Pointer to application-defined area, if any.
* Set by Target application during startup.
*/
u32 hi_app_host_interest; /* 0x00 */
/* Pointer to register dump area, valid after Target crash. */
u32 hi_failure_state; /* 0x04 */
/* Pointer to debug logging header */
u32 hi_dbglog_hdr; /* 0x08 */
u32 hi_unused0c; /* 0x0c */
/*
* General-purpose flag bits, similar to SOC_OPTION_* flags.
* Can be used by application rather than by OS.
*/
u32 hi_option_flag; /* 0x10 */
/*
* Boolean that determines whether or not to
* display messages on the serial port.
*/
u32 hi_serial_enable; /* 0x14 */
/* Start address of DataSet index, if any */
u32 hi_dset_list_head; /* 0x18 */
/* Override Target application start address */
u32 hi_app_start; /* 0x1c */
/* Clock and voltage tuning */
u32 hi_skip_clock_init; /* 0x20 */
u32 hi_core_clock_setting; /* 0x24 */
u32 hi_cpu_clock_setting; /* 0x28 */
u32 hi_system_sleep_setting; /* 0x2c */
u32 hi_xtal_control_setting; /* 0x30 */
u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
u32 hi_ref_voltage_trim_setting; /* 0x3c */
u32 hi_clock_info; /* 0x40 */
/* Host uses BE CPU or not */
u32 hi_be; /* 0x44 */
u32 hi_stack; /* normal stack */ /* 0x48 */
u32 hi_err_stack; /* error stack */ /* 0x4c */
u32 hi_desired_cpu_speed_hz; /* 0x50 */
/* Pointer to Board Data */
u32 hi_board_data; /* 0x54 */
/*
* Indication of Board Data state:
* 0: board data is not yet initialized.
* 1: board data is initialized; unknown size
* >1: number of bytes of initialized board data
*/
u32 hi_board_data_initialized; /* 0x58 */
u32 hi_dset_ram_index_table; /* 0x5c */
u32 hi_desired_baud_rate; /* 0x60 */
u32 hi_dbglog_config; /* 0x64 */
u32 hi_end_ram_reserve_sz; /* 0x68 */
u32 hi_mbox_io_block_sz; /* 0x6c */
u32 hi_num_bpatch_streams; /* 0x70 -- unused */
u32 hi_mbox_isr_yield_limit; /* 0x74 */
u32 hi_refclk_hz; /* 0x78 */
u32 hi_ext_clk_detected; /* 0x7c */
u32 hi_dbg_uart_txpin; /* 0x80 */
u32 hi_dbg_uart_rxpin; /* 0x84 */
u32 hi_hci_uart_baud; /* 0x88 */
u32 hi_hci_uart_pin_assignments; /* 0x8C */
u32 hi_hci_uart_baud_scale_val; /* 0x90 */
u32 hi_hci_uart_baud_step_val; /* 0x94 */
u32 hi_allocram_start; /* 0x98 */
u32 hi_allocram_sz; /* 0x9c */
u32 hi_hci_bridge_flags; /* 0xa0 */
u32 hi_hci_uart_support_pins; /* 0xa4 */
u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
/*
* 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
* [31:16]: wakeup timeout in ms
*/
/* Pointer to extended board Data */
u32 hi_board_ext_data; /* 0xac */
u32 hi_board_ext_data_config; /* 0xb0 */
/*
* Bit [0] : valid
* Bit[31:16: size
*/
/*
* hi_reset_flag is used to do some stuff when target reset.
* such as restore app_start after warm reset or
* preserve host Interest area, or preserve ROM data, literals etc.
*/
u32 hi_reset_flag; /* 0xb4 */
/* indicate hi_reset_flag is valid */
u32 hi_reset_flag_valid; /* 0xb8 */
u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
/* 0xbc - [31:0]: idle timeout in ms */
/* ACS flags */
u32 hi_acs_flags; /* 0xc0 */
u32 hi_console_flags; /* 0xc4 */
u32 hi_nvram_state; /* 0xc8 */
u32 hi_option_flag2; /* 0xcc */
/* If non-zero, override values sent to Host in WMI_READY event. */
u32 hi_sw_version_override; /* 0xd0 */
u32 hi_abi_version_override; /* 0xd4 */
/*
* Percentage of high priority RX traffic to total expected RX traffic
* applicable only to ar6004
*/
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */
u32 hi_cal_data; /* 0xe4 */
/* Number of packet log buffers */
u32 hi_pktlog_num_buffers; /* 0xe8 */
/* wow extension configuration */
u32 hi_wow_ext_config; /* 0xec */
u32 hi_pwr_save_flags; /* 0xf0 */
/* Spatial Multiplexing Power Save (SMPS) options */
u32 hi_smps_options; /* 0xf4 */
/* Interconnect-specific state */
u32 hi_interconnect_state; /* 0xf8 */
/* Coex configuration flags */
u32 hi_coex_config; /* 0xfc */
/* Early allocation support */
u32 hi_early_alloc; /* 0x100 */
/* FW swap field */
/*
* Bits of this 32bit word will be used to pass specific swap
* instruction to FW
*/
/*
* Bit 0 -- AP Nart descriptor no swap. When this bit is set
* FW will not swap TX descriptor. Meaning packets are formed
* on the target processor.
*/
/* Bit 1 - unused */
u32 hi_fw_swap; /* 0x104 */
} __packed;
#define HI_ITEM(item) offsetof(struct host_interest, item)
/* Bits defined in hi_option_flag */
/* Enable timer workaround */
#define HI_OPTION_TIMER_WAR 0x01
/* Limit BMI command credits */
#define HI_OPTION_BMI_CRED_LIMIT 0x02
/* Relay Dot11 hdr to/from host */
#define HI_OPTION_RELAY_DOT11_HDR 0x04
/* MAC addr method 0-locally administred 1-globally unique addrs */
#define HI_OPTION_MAC_ADDR_METHOD 0x08
/* Firmware Bridging */
#define HI_OPTION_FW_BRIDGE 0x10
/* Enable CPU profiling */
#define HI_OPTION_ENABLE_PROFILE 0x20
/* Disable debug logging */
#define HI_OPTION_DISABLE_DBGLOG 0x40
/* Skip Era Tracking */
#define HI_OPTION_SKIP_ERA_TRACKING 0x80
/* Disable PAPRD (debug) */
#define HI_OPTION_PAPRD_DISABLE 0x100
#define HI_OPTION_NUM_DEV_LSB 0x200
#define HI_OPTION_NUM_DEV_MSB 0x800
#define HI_OPTION_DEV_MODE_LSB 0x1000
#define HI_OPTION_DEV_MODE_MSB 0x8000000
/* Disable LowFreq Timer Stabilization */
#define HI_OPTION_NO_LFT_STBL 0x10000000
/* Skip regulatory scan */
#define HI_OPTION_SKIP_REG_SCAN 0x20000000
/*
* Do regulatory scan during init before
* sending WMI ready event to host
*/
#define HI_OPTION_INIT_REG_SCAN 0x40000000
/* REV6: Do not adjust memory map */
#define HI_OPTION_SKIP_MEMMAP 0x80000000
#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
/* 2 bits of hi_option_flag are used to represent 3 modes */
#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
/* 2 bits of hi_option flag are usedto represent 4 submodes */
#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
/* Num dev Mask */
#define HI_OPTION_NUM_DEV_MASK 0x7
#define HI_OPTION_NUM_DEV_SHIFT 0x9
/* firmware bridging */
#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
/*
Fw Mode/SubMode Mask
|-----------------------------------------------------------------------------|
| SUB | SUB | SUB | SUB | | | | |
|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
|-----------------------------------------------------------------------------|
*/
#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_MASK 0x3
#define HI_OPTION_FW_MODE_SHIFT 0xC
#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_MASK 0x3
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
/* hi_option_flag2 options */
#define HI_OPTION_OFFLOAD_AMSDU 0x01
#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
#define HI_OPTION_RF_KILL_SHIFT 0x2
#define HI_OPTION_RF_KILL_MASK 0x1
/* hi_reset_flag */
/* preserve App Start address */
#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
/* preserve host interest */
#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
/* preserve ROM data */
#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
#define HI_RESET_FLAG_WARM_RESET 0x20
/* define hi_fw_swap bits */
#define HI_DESC_IN_FW_BIT 0x01
/* indicate the reset flag is valid */
#define HI_RESET_FLAG_IS_VALID 0x12345678
/* ACS is enabled */
#define HI_ACS_FLAGS_ENABLED (1 << 0)
/* Use physical WWAN device */
#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
/* Use test VAP */
#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
/*
* CONSOLE FLAGS
*
* Bit Range Meaning
* --------- --------------------------------
* 2..0 UART ID (0 = Default)
* 3 Baud Select (0 = 9600, 1 = 115200)
* 30..4 Reserved
* 31 Enable Console
*
*/
#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
#define HI_CONSOLE_FLAGS_UART_SHIFT 0
#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
/* SM power save options */
#define HI_SMPS_ALLOW_MASK (0x00000001)
#define HI_SMPS_MODE_MASK (0x00000002)
#define HI_SMPS_MODE_STATIC (0x00000000)
#define HI_SMPS_MODE_DYNAMIC (0x00000002)
#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
#define HI_SMPS_DATA_THRESH_SHIFT (3)
#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
#define HI_SMPS_RSSI_THRESH_SHIFT (11)
#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
#define HI_SMPS_LOWPWR_CM_SHIFT (15)
#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
#define HI_SMPS_HIPWR_CM_SHIFT (19)
/*
* WOW Extension configuration
*
* Bit Range Meaning
* --------- --------------------------------
* 8..0 Size of each WOW pattern (max 511)
* 15..9 Number of patterns per list (max 127)
* 17..16 Number of lists (max 4)
* 30..18 Reserved
* 31 Enabled
*
* set values (except enable) to zeros for default settings
*/
#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
#define HI_WOW_EXT_NUM_LIST_SHIFT 16
#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
HI_WOW_EXT_NUM_LIST_MASK) | \
(((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
HI_WOW_EXT_NUM_PATTERNS_MASK) | \
(((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
HI_WOW_EXT_PATTERN_SIZE_MASK))
#define HI_WOW_EXT_GET_NUM_LISTS(config) \
(((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
(((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
HI_WOW_EXT_NUM_PATTERNS_SHIFT)
#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
(((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
HI_WOW_EXT_PATTERN_SIZE_SHIFT)
/*
* Early allocation configuration
* Support RAM bank configuration before BMI done and this eases the memory
* allocation at very early stage
* Bit Range Meaning
* --------- ----------------------------------
* [0:3] number of bank assigned to be IRAM
* [4:15] reserved
* [16:31] magic number
*
* Note:
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
* may be reclaimed when necesary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
* mind to avoid data corruption.
*/
#define HI_EARLY_ALLOC_MAGIC 0x6d8a
#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
#define HI_EARLY_ALLOC_VALID() \
((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
(((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
>> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
/*power save flag bit definitions*/
#define HI_PWR_SAVE_LPL_ENABLED 0x1
/*b1-b3 reserved*/
/*b4-b5 : dev0 LPL type : 0 - none
1- Reduce Pwr Search
2- Reduce Pwr Listen*/
/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
#define HI_PWR_SAVE_LPL_DEV0_LSB 4
#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
/*power save related utility macros*/
#define HI_LPL_ENABLED() \
((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
#define HI_DEV_LPL_TYPE_GET(_devix) \
(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
(HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
#define HOST_INTEREST_SMPS_IS_ALLOWED() \
((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
/* Reserve 1024 bytes for extended board data */
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
#endif /* __TARGADDRS_H__ */

View File

@ -0,0 +1,20 @@
/*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#include <linux/tracepoint.h>
#define _TRACE_H_
/* create empty functions when tracing is disabled */
#if !defined(CONFIG_ATH10K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(...)
#undef DEFINE_EVENT
#define DEFINE_EVENT(evt_class, name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
#define ATH10K_MSG_MAX 200
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf),
TP_STRUCT__entry(
__dynamic_array(char, msg, ATH10K_MSG_MAX)
),
TP_fast_assign(
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH10K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH10K_MSG_MAX);
),
TP_printk("%s", __get_str(msg))
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
TP_PROTO(struct va_format *vaf),
TP_ARGS(vaf)
);
TRACE_EVENT(ath10k_log_dbg,
TP_PROTO(unsigned int level, struct va_format *vaf),
TP_ARGS(level, vaf),
TP_STRUCT__entry(
__field(unsigned int, level)
__dynamic_array(char, msg, ATH10K_MSG_MAX)
),
TP_fast_assign(
__entry->level = level;
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH10K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH10K_MSG_MAX);
),
TP_printk("%s", __get_str(msg))
);
TRACE_EVENT(ath10k_log_dbg_dump,
TP_PROTO(const char *msg, const char *prefix,
const void *buf, size_t buf_len),
TP_ARGS(msg, prefix, buf, buf_len),
TP_STRUCT__entry(
__string(msg, msg)
__string(prefix, prefix)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(msg, msg);
__assign_str(prefix, prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s/%s\n", __get_str(prefix), __get_str(msg)
)
);
TRACE_EVENT(ath10k_wmi_cmd,
TP_PROTO(int id, void *buf, size_t buf_len),
TP_ARGS(id, buf, buf_len),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"id %d len %zu",
__entry->id,
__entry->buf_len
)
);
TRACE_EVENT(ath10k_wmi_event,
TP_PROTO(int id, void *buf, size_t buf_len),
TP_ARGS(id, buf, buf_len),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"id %d len %zu",
__entry->id,
__entry->buf_len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -0,0 +1,417 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "txrx.h"
#include "htt.h"
#include "mac.h"
#include "debug.h"
static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
return;
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
* offchan_tx_skb. */
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn("completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
int ret;
if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
return;
ATH10K_SKB_CB(txdesc)->htt.refcount--;
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
return;
if (txfrag) {
ret = ath10k_skb_unmap(dev, txfrag);
if (ret)
ath10k_warn("txfrag unmap failed (%d)\n", ret);
dev_kfree_skb_any(txfrag);
}
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
if (ATH10K_SKB_CB(txdesc)->htt.discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
exit:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
dev_kfree_skb_any(txdesc);
}
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct sk_buff *txdesc;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn("warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
txdesc = htt->pending_tx[tx_done->msdu_id];
ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
enum ieee80211_band band,
struct ieee80211_rx_status *status)
{
u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
u8 info0 = info->rate.info0;
u32 info1 = info->rate.info1;
u32 info2 = info->rate.info2;
u8 preamble = 0;
/* Check if valid fields */
if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
return;
preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
rate_idx = 0;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) {
case IEEE80211_BAND_2GHZ:
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
/* HT-SIG - Table 20-11 in info1 and info2 */
mcs = info1 & 0x1F;
nss = mcs >> 3;
bw = (info1 >> 7) & 1;
sgi = (info2 >> 7) & 1;
status->rate_idx = mcs;
status->flag |= RX_FLAG_HT;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
if (bw)
status->flag |= RX_FLAG_40MHZ;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = (info2 >> 4) & 0x0F;
nss = (info1 >> 10) & 0x07;
bw = info1 & 3;
sgi = info2 & 1;
status->rate_idx = mcs;
status->vht_nss = nss;
if (sgi)
status->flag |= RX_FLAG_SHORT_GI;
switch (bw) {
/* 20MHZ */
case 0:
break;
/* 40MHZ */
case 1:
status->flag |= RX_FLAG_40MHZ;
break;
/* 80MHZ */
case 2:
status->flag |= RX_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
break;
default:
break;
}
}
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
{
struct ieee80211_rx_status *status;
struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
status = IEEE80211_SKB_RXCB(info->skb);
memset(status, 0, sizeof(*status));
if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(
__le16_to_cpu(hdr->frame_control) &
~IEEE80211_FCTL_PROTECTED);
}
if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
status->flag |= RX_FLAG_MMIC_ERROR;
if (info->fcs_err)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
status->signal = info->signal;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
spin_unlock_bh(&ar->data_lock);
if (!ch) {
ath10k_warn("no channel configured; ignoring frame!\n");
dev_kfree_skb_any(info->skb);
return;
}
process_rx_rates(ar, info, ch->band, status);
status->band = ch->band;
status->freq = ch->center_freq;
ath10k_dbg(ATH10K_DBG_DATA,
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
info->skb,
info->skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
status->flag & RX_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
status->band);
ieee80211_rx(ar->hw, info->skb);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (memcmp(peer->addr, addr, ETH_ALEN))
continue;
return peer;
}
return NULL;
}
static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
int peer_id)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list)
if (test_bit(peer_id, peer->peer_ids))
return peer;
return NULL;
}
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
ret = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
mapped == expect_mapped;
}), 3*HZ);
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
}
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
}
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = ev->vdev_id;
memcpy(peer->addr, ev->addr, ETH_ALEN);
list_add(&peer->list, &ar->peers);
wake_up(&ar->peer_mapping_wq);
}
ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn("unknown peer id %d\n", ev->peer_id);
goto exit;
}
ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
list_del(&peer->list);
kfree(peer);
wake_up(&ar->peer_mapping_wq);
}
exit:
spin_unlock_bh(&ar->data_lock);
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _TXRX_H_
#define _TXRX_H_
#include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
const u8 *addr);
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
const u8 *addr);
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev);
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -60,6 +60,7 @@
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "base.h"
#include "reg.h"
#include "debug.h"
@ -666,9 +667,46 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
return htype;
}
static struct ieee80211_rate *
ath5k_get_rate(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *info,
struct ath5k_buf *bf, int idx)
{
/*
* convert a ieee80211_tx_rate RC-table entry to
* the respective ieee80211_rate struct
*/
if (bf->rates[idx].idx < 0) {
return NULL;
}
return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
}
static u16
ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *info,
struct ath5k_buf *bf, int idx)
{
struct ieee80211_rate *rate;
u16 hw_rate;
u8 rc_flags;
rate = ath5k_get_rate(hw, info, bf, idx);
if (!rate)
return 0;
rc_flags = bf->rates[idx].flags;
hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
rate->hw_value_short : rate->hw_value;
return hw_rate;
}
static int
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
struct ath5k_txq *txq, int padsize)
struct ath5k_txq *txq, int padsize,
struct ieee80211_tx_control *control)
{
struct ath5k_desc *ds = bf->desc;
struct sk_buff *skb = bf->skb;
@ -688,7 +726,11 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
rate = ieee80211_get_tx_rate(ah->hw, info);
ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
ARRAY_SIZE(bf->rates));
rate = ath5k_get_rate(ah->hw, info, bf, 0);
if (!rate) {
ret = -EINVAL;
goto err_unmap;
@ -698,8 +740,8 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
flags |= AR5K_TXDESC_NOACK;
rc_flags = info->control.rates[0].flags;
hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
rate->hw_value_short : rate->hw_value;
hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
pktlen = skb->len;
@ -722,12 +764,13 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
info->control.vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(ah->ah_txpower.txp_requested * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
cts_rate, duration);
if (ret)
goto err_unmap;
@ -736,13 +779,15 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ah->ah_capabilities.cap_has_mrr_support) {
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
for (i = 0; i < 3; i++) {
rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
rate = ath5k_get_rate(ah->hw, info, bf, i);
if (!rate)
break;
mrr_rate[i] = rate->hw_value;
mrr_tries[i] = info->control.rates[i + 1].count;
mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
mrr_tries[i] = bf->rates[i].count;
}
ath5k_hw_setup_mrr_tx_desc(ah, ds,
@ -1515,7 +1560,7 @@ unlock:
void
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq)
struct ath5k_txq *txq, struct ieee80211_tx_control *control)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_buf *bf;
@ -1555,7 +1600,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->skb = skb;
if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
bf->skb = NULL;
spin_lock_irqsave(&ah->txbuflock, flags);
list_add_tail(&bf->list, &ah->txbuf);
@ -1571,11 +1616,13 @@ drop_packet:
static void
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_txq *txq, struct ath5k_tx_status *ts)
struct ath5k_txq *txq, struct ath5k_tx_status *ts,
struct ath5k_buf *bf)
{
struct ieee80211_tx_info *info;
u8 tries[3];
int i;
int size = 0;
ah->stats.tx_all_count++;
ah->stats.tx_bytes_count += skb->len;
@ -1587,6 +1634,9 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
ieee80211_tx_info_clear_status(info);
size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
memcpy(info->status.rates, bf->rates, size);
for (i = 0; i < ts->ts_final_idx; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
@ -1663,7 +1713,7 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
DMA_TO_DEVICE);
ath5k_tx_frame_completed(ah, skb, txq, &ts);
ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
}
/*
@ -1917,7 +1967,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
skb = ieee80211_get_buffered_bc(ah->hw, vif);
while (skb) {
ath5k_tx_queue(ah->hw, skb, ah->cabq);
ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
if (ah->cabq->txq_len >= ah->cabq->txq_max)
break;
@ -2442,7 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |

View File

@ -47,6 +47,7 @@ struct ath5k_hw;
struct ath5k_txq;
struct ieee80211_channel;
struct ath_bus_ops;
struct ieee80211_tx_control;
enum nl80211_iftype;
enum ath5k_srev_type {
@ -66,6 +67,7 @@ struct ath5k_buf {
dma_addr_t daddr; /* physical addr of desc */
struct sk_buff *skb; /* skbuff for buf */
dma_addr_t skbaddr; /* physical addr of skb data */
struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */
};
struct ath5k_vif {
@ -103,7 +105,7 @@ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq);
struct ath5k_txq *txq, struct ieee80211_tx_control *control);
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);

View File

@ -66,7 +66,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
return;
}
ath5k_tx_queue(hw, skb, &ah->txqs[qnum]);
ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
}

View File

@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
u32 id, freq;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
/* default to the current channel, but use the one specified as argument
* if any
*/
freq = vif->ch_hint;
if (chan)
freq = chan->center_freq;
/* never send freq zero to the firmware */
if (WARN_ON(freq == 0))
return -EINVAL;
mgmt = (const struct ieee80211_mgmt *) buf;
if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control) &&
@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
return ath6kl_send_go_probe_resp(vif, buf, len,
chan->center_freq);
return ath6kl_send_go_probe_resp(vif, buf, len, freq);
}
id = vif->send_action_id++;
@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
/* AP mode Power saving processing */
if (vif->nw_type == AP_NETWORK) {
queued = ath6kl_mgmt_powersave_ap(vif,
id, chan->center_freq,
wait, buf,
len, &more_data, no_cck);
queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len,
&more_data, no_cck);
if (queued)
return 0;
}
return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
chan->center_freq, wait,
buf, len, no_cck);
return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq,
wait, buf, len, no_cck);
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
@ -3679,6 +3686,20 @@ err:
return NULL;
}
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support ath6kl_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
WIPHY_WOWLAN_4WAY_HANDSHAKE,
.n_patterns = WOW_MAX_FILTERS_PER_LIST,
.pattern_min_len = 1,
.pattern_max_len = WOW_PATTERN_SIZE,
};
#endif
int ath6kl_cfg80211_init(struct ath6kl *ar)
{
struct wiphy *wiphy = ar->wiphy;
@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
#ifdef CONFIG_PM
wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
WIPHY_WOWLAN_4WAY_HANDSHAKE;
wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
wiphy->wowlan = &ath6kl_wowlan_support;
#endif
wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;

View File

@ -1240,20 +1240,14 @@ static ssize_t ath6kl_force_roam_write(struct file *file,
char buf[20];
size_t len;
u8 bssid[ETH_ALEN];
int i;
int addr[ETH_ALEN];
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
&addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
!= ETH_ALEN)
if (!mac_pton(buf, bssid))
return -EINVAL;
for (i = 0; i < ETH_ALEN; i++)
bssid[i] = addr[i];
ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
if (ret)

View File

@ -1696,10 +1696,16 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
test_bit(WMI_READY,
&ar->flag),
WMI_TIMEOUT);
if (timeleft <= 0) {
clear_bit(WMI_READY, &ar->flag);
ath6kl_err("wmi is not ready or wait was interrupted: %ld\n",
timeleft);
ret = -EIO;
goto err_htc_stop;
}
ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
ath6kl_info("%s %s fw %s api %d%s\n",
ar->hw.name,
@ -1718,12 +1724,6 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
goto err_htc_stop;
}
if (!timeleft || signal_pending(current)) {
ath6kl_err("wmi is not ready or wait was interrupted\n");
ret = -EIO;
goto err_htc_stop;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
/* communicate the wmi protocol verision to the target */

View File

@ -345,16 +345,16 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
{
struct hif_scatter_req *s_req;
struct bus_request *bus_req;
int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf;
scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat)
sg_sz = sizeof(struct scatterlist) * n_scat_entry;
size = sizeof(struct scatterlist) * n_scat_entry;
else
buf_sz = 2 * L1_CACHE_BYTES +
size = 2 * L1_CACHE_BYTES +
ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
for (i = 0; i < n_scat_req; i++) {
@ -364,7 +364,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
return -ENOMEM;
if (virt_scat) {
virt_buf = kzalloc(buf_sz, GFP_KERNEL);
virt_buf = kzalloc(size, GFP_KERNEL);
if (!virt_buf) {
kfree(s_req);
return -ENOMEM;
@ -374,7 +374,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
(u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
} else {
/* allocate sglist */
s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
s_req->sgentries = kzalloc(size, GFP_KERNEL);
if (!s_req->sgentries) {
kfree(s_req);

View File

@ -1061,6 +1061,22 @@ static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
return;
}
static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{
/*
* cfg80211 suspend/WOW currently not supported for USB.
*/
return 0;
}
static int ath6kl_usb_resume(struct ath6kl *ar)
{
/*
* cfg80211 resume currently not supported for USB.
*/
return 0;
}
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.diag_read32 = ath6kl_usb_diag_read32,
.diag_write32 = ath6kl_usb_diag_write32,
@ -1074,6 +1090,8 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.pipe_map_service = ath6kl_usb_map_service_pipe,
.pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
.cleanup_scatter = ath6kl_usb_cleanup_scatter,
.suspend = ath6kl_usb_suspend,
.resume = ath6kl_usb_resume,
};
/* ath6kl usb driver registered functions */
@ -1152,7 +1170,7 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
#ifdef CONFIG_PM
static int ath6kl_usb_suspend(struct usb_interface *interface,
static int ath6kl_usb_pm_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct ath6kl_usb *device;
@ -1162,7 +1180,7 @@ static int ath6kl_usb_suspend(struct usb_interface *interface,
return 0;
}
static int ath6kl_usb_resume(struct usb_interface *interface)
static int ath6kl_usb_pm_resume(struct usb_interface *interface)
{
struct ath6kl_usb *device;
device = usb_get_intfdata(interface);
@ -1175,7 +1193,7 @@ static int ath6kl_usb_resume(struct usb_interface *interface)
return 0;
}
static int ath6kl_usb_reset_resume(struct usb_interface *intf)
static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
{
if (usb_get_intfdata(intf))
ath6kl_usb_remove(intf);
@ -1184,9 +1202,9 @@ static int ath6kl_usb_reset_resume(struct usb_interface *intf)
#else
#define ath6kl_usb_suspend NULL
#define ath6kl_usb_resume NULL
#define ath6kl_usb_reset_resume NULL
#define ath6kl_usb_pm_suspend NULL
#define ath6kl_usb_pm_resume NULL
#define ath6kl_usb_pm_reset_resume NULL
#endif
@ -1201,9 +1219,9 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
static struct usb_driver ath6kl_usb_driver = {
.name = "ath6kl_usb",
.probe = ath6kl_usb_probe,
.suspend = ath6kl_usb_suspend,
.resume = ath6kl_usb_resume,
.reset_resume = ath6kl_usb_reset_resume,
.suspend = ath6kl_usb_pm_suspend,
.resume = ath6kl_usb_pm_resume,
.reset_resume = ath6kl_usb_pm_reset_resume,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,

View File

@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
{ 5, 4, 1 }, /* lvl 5 */
{ 6, 5, 1 }, /* lvl 6 */
{ 7, 6, 1 }, /* lvl 7 */
{ 7, 6, 0 }, /* lvl 8 */
{ 7, 7, 0 } /* lvl 9 */
{ 7, 7, 1 }, /* lvl 8 */
{ 7, 8, 0 } /* lvl 9 */
};
#define ATH9K_ANI_OFDM_NUM_LEVEL \
ARRAY_SIZE(ofdm_level_table)
@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
{ 4, 0 }, /* lvl 4 */
{ 5, 0 }, /* lvl 5 */
{ 6, 0 }, /* lvl 6 */
{ 6, 0 }, /* lvl 7 (only for high rssi) */
{ 7, 0 } /* lvl 8 (only for high rssi) */
{ 7, 0 }, /* lvl 7 (only for high rssi) */
{ 8, 0 } /* lvl 8 (only for high rssi) */
};
#define ATH9K_ANI_CCK_NUM_LEVEL \
@ -177,10 +177,15 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
weak_sig = true;
if (aniState->ofdmWeakSigDetect != weak_sig)
/*
* OFDM Weak signal detection is always enabled for AP mode.
*/
if (ah->opmode != NL80211_IFTYPE_AP &&
aniState->ofdmWeakSigDetect != weak_sig) {
ath9k_hw_ani_control(ah,
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
entry_ofdm->ofdm_weak_signal_on);
}
if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@ -363,18 +368,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
/*
* enable phy counters if hw supports or if not, enable phy
* interrupts (so we can count each one)
*/
ath9k_ani_restart(ah);
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
REGWRITE_BUFFER_FLUSH(ah);
}
static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)

View File

@ -23,17 +23,12 @@
#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
/* units are errors per second */
#define ATH9K_ANI_OFDM_TRIG_LOW 400
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
/* units are errors per second */
#define ATH9K_ANI_CCK_TRIG_HIGH 600
/* units are errors per second */
#define ATH9K_ANI_CCK_TRIG_LOW 300
#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
#define ATH9K_ANI_FIRSTEP_LVL 2
@ -45,10 +40,6 @@
/* in ms */
#define ATH9K_ANI_POLLINTERVAL 1000
#define HAL_NOISE_IMMUNE_MAX 4
#define HAL_SPUR_IMMUNE_MAX 7
#define HAL_FIRST_STEP_MAX 2
#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0

View File

@ -3563,14 +3563,24 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
int chain;
u32 regval;
u32 regval, value, gpio;
static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
AR_PHY_SWITCH_CHAIN_0,
AR_PHY_SWITCH_CHAIN_1,
AR_PHY_SWITCH_CHAIN_2,
};
u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) {
if (ah->config.xlna_gpio)
gpio = ah->config.xlna_gpio;
else
gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
ath9k_hw_cfg_output(ah, gpio,
AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
}
value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
@ -3796,7 +3806,13 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
if (AR_SREV_9485(ah) &&
(ar9003_hw_get_rx_gain_idx(ah) == 0) &&
ah->config.xatten_margin_cfg)
value = 5;
else
value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
value);
@ -4546,7 +4562,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
is2GHz);
for (i = 0; i < ar9300RateSize; i++) {
ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
i, targetPowerValT2[i]);
}
}
@ -5272,7 +5288,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
return;
for (i = 0; i < ar9300RateSize; i++) {
ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
i, targetPowerValT2[i]);
}

View File

@ -627,9 +627,26 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
{
if (AR_SREV_9462_20(ah))
if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_mixed_rx_gain_table_2p0);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
ar9462_2p0_baseband_core_mix_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
ar9462_2p0_baseband_postamble_mix_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
ar9462_2p0_baseband_postamble_5g_xlna);
}
}
static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
{
if (AR_SREV_9462_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_2p0_5g_xlna_only_rxgain);
INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
ar9462_2p0_baseband_postamble_5g_xlna);
}
}
static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
@ -645,6 +662,9 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
case 2:
ar9003_rx_gain_table_mode2(ah);
break;
case 3:
ar9003_rx_gain_table_mode3(ah);
break;
}
}

View File

@ -735,6 +735,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
return -EINVAL;
}
/*
* SOC, MAC, BB, RADIO initvals.
*/
for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
@ -746,11 +749,39 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
modesIndex);
}
/*
* RXGAIN initvals.
*/
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
if (AR_SREV_9462_20(ah)) {
/*
* CUS217 mix LNA mode.
*/
if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
1, regWrites);
REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
modesIndex, regWrites);
}
/*
* 5G-XLNA
*/
if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
(ar9003_hw_get_rx_gain_idx(ah) == 3)) {
REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
modesIndex, regWrites);
}
}
if (AR_SREV_9550(ah))
REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
regWrites);
/*
* TXGAIN initvals.
*/
if (AR_SREV_9550(ah)) {
int modes_txgain_index;
@ -772,8 +803,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesFastClock,
modesIndex, regWrites);
/*
* Clock frequency initvals.
*/
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
/*
* JAPAN regulatory.
*/
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
@ -906,6 +943,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &ah->ani;
int m1ThreshLow, m2ThreshLow;
int m1Thresh, m2Thresh;
int m2CountThr, m2CountThrLow;
int m1ThreshLowExt, m2ThreshLowExt;
int m1ThreshExt, m2ThreshExt;
s32 value, value2;
switch (cmd & ah->ani_function) {
@ -919,6 +961,61 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
*/
u32 on = param ? 1 : 0;
if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
goto skip_ws_det;
m1ThreshLow = on ?
aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
m2ThreshLow = on ?
aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
m1Thresh = on ?
aniState->iniDef.m1Thresh : m1Thresh_off;
m2Thresh = on ?
aniState->iniDef.m2Thresh : m2Thresh_off;
m2CountThr = on ?
aniState->iniDef.m2CountThr : m2CountThr_off;
m2CountThrLow = on ?
aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
m1ThreshLowExt = on ?
aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
m2ThreshLowExt = on ?
aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
m1ThreshExt = on ?
aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
m2ThreshExt = on ?
aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
m1ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
m2ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M1_THRESH,
m1Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2_THRESH,
m2Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2COUNT_THR,
m2CountThr);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
m2CountThrLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
m1ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
m2ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH,
m1ThreshExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH,
m2ThreshExt);
skip_ws_det:
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);

View File

@ -351,6 +351,8 @@
#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
/*
* AGC Field Definitions
*/

View File

@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@ -1449,4 +1449,284 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
{0x0000b1fc, 0x00000196},
};
static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
};
static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
{0x0000a00c, 0x00810080},
{0x0000a010, 0x00830082},
{0x0000a014, 0x01810180},
{0x0000a018, 0x01830182},
{0x0000a01c, 0x01850184},
{0x0000a020, 0x01890188},
{0x0000a024, 0x018b018a},
{0x0000a028, 0x018d018c},
{0x0000a02c, 0x03820190},
{0x0000a030, 0x03840383},
{0x0000a034, 0x03880385},
{0x0000a038, 0x038a0389},
{0x0000a03c, 0x038c038b},
{0x0000a040, 0x0390038d},
{0x0000a044, 0x03920391},
{0x0000a048, 0x03940393},
{0x0000a04c, 0x03960395},
{0x0000a050, 0x00000000},
{0x0000a054, 0x00000000},
{0x0000a058, 0x00000000},
{0x0000a05c, 0x00000000},
{0x0000a060, 0x00000000},
{0x0000a064, 0x00000000},
{0x0000a068, 0x00000000},
{0x0000a06c, 0x00000000},
{0x0000a070, 0x00000000},
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
{0x0000a080, 0x29292929},
{0x0000a084, 0x29292929},
{0x0000a088, 0x29292929},
{0x0000a08c, 0x29292929},
{0x0000a090, 0x22292929},
{0x0000a094, 0x1d1d2222},
{0x0000a098, 0x0c111117},
{0x0000a09c, 0x00030303},
{0x0000a0a0, 0x00000000},
{0x0000a0a4, 0x00000000},
{0x0000a0a8, 0x00000000},
{0x0000a0ac, 0x00000000},
{0x0000a0b0, 0x00000000},
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
{0x0000a0c0, 0x001f0000},
{0x0000a0c4, 0x01000101},
{0x0000a0c8, 0x011e011f},
{0x0000a0cc, 0x011c011d},
{0x0000a0d0, 0x02030204},
{0x0000a0d4, 0x02010202},
{0x0000a0d8, 0x021f0200},
{0x0000a0dc, 0x0302021e},
{0x0000a0e0, 0x03000301},
{0x0000a0e4, 0x031e031f},
{0x0000a0e8, 0x0402031d},
{0x0000a0ec, 0x04000401},
{0x0000a0f0, 0x041e041f},
{0x0000a0f4, 0x0502041d},
{0x0000a0f8, 0x05000501},
{0x0000a0fc, 0x051e051f},
{0x0000a100, 0x06010602},
{0x0000a104, 0x061f0600},
{0x0000a108, 0x061d061e},
{0x0000a10c, 0x07020703},
{0x0000a110, 0x07000701},
{0x0000a114, 0x00000000},
{0x0000a118, 0x00000000},
{0x0000a11c, 0x00000000},
{0x0000a120, 0x00000000},
{0x0000a124, 0x00000000},
{0x0000a128, 0x00000000},
{0x0000a12c, 0x00000000},
{0x0000a130, 0x00000000},
{0x0000a134, 0x00000000},
{0x0000a138, 0x00000000},
{0x0000a13c, 0x00000000},
{0x0000a140, 0x001f0000},
{0x0000a144, 0x01000101},
{0x0000a148, 0x011e011f},
{0x0000a14c, 0x011c011d},
{0x0000a150, 0x02030204},
{0x0000a154, 0x02010202},
{0x0000a158, 0x021f0200},
{0x0000a15c, 0x0302021e},
{0x0000a160, 0x03000301},
{0x0000a164, 0x031e031f},
{0x0000a168, 0x0402031d},
{0x0000a16c, 0x04000401},
{0x0000a170, 0x041e041f},
{0x0000a174, 0x0502041d},
{0x0000a178, 0x05000501},
{0x0000a17c, 0x051e051f},
{0x0000a180, 0x06010602},
{0x0000a184, 0x061f0600},
{0x0000a188, 0x061d061e},
{0x0000a18c, 0x07020703},
{0x0000a190, 0x07000701},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
{0x0000a1a0, 0x00000000},
{0x0000a1a4, 0x00000000},
{0x0000a1a8, 0x00000000},
{0x0000a1ac, 0x00000000},
{0x0000a1b0, 0x00000000},
{0x0000a1b4, 0x00000000},
{0x0000a1b8, 0x00000000},
{0x0000a1bc, 0x00000000},
{0x0000a1c0, 0x00000000},
{0x0000a1c4, 0x00000000},
{0x0000a1c8, 0x00000000},
{0x0000a1cc, 0x00000000},
{0x0000a1d0, 0x00000000},
{0x0000a1d4, 0x00000000},
{0x0000a1d8, 0x00000000},
{0x0000a1dc, 0x00000000},
{0x0000a1e0, 0x00000000},
{0x0000a1e4, 0x00000000},
{0x0000a1e8, 0x00000000},
{0x0000a1ec, 0x00000000},
{0x0000a1f0, 0x00000396},
{0x0000a1f4, 0x00000396},
{0x0000a1f8, 0x00000396},
{0x0000a1fc, 0x00000196},
{0x0000b000, 0x00010000},
{0x0000b004, 0x00030002},
{0x0000b008, 0x00050004},
{0x0000b00c, 0x00810080},
{0x0000b010, 0x00830082},
{0x0000b014, 0x01810180},
{0x0000b018, 0x01830182},
{0x0000b01c, 0x01850184},
{0x0000b020, 0x02810280},
{0x0000b024, 0x02830282},
{0x0000b028, 0x02850284},
{0x0000b02c, 0x02890288},
{0x0000b030, 0x028b028a},
{0x0000b034, 0x0388028c},
{0x0000b038, 0x038a0389},
{0x0000b03c, 0x038c038b},
{0x0000b040, 0x0390038d},
{0x0000b044, 0x03920391},
{0x0000b048, 0x03940393},
{0x0000b04c, 0x03960395},
{0x0000b050, 0x00000000},
{0x0000b054, 0x00000000},
{0x0000b058, 0x00000000},
{0x0000b05c, 0x00000000},
{0x0000b060, 0x00000000},
{0x0000b064, 0x00000000},
{0x0000b068, 0x00000000},
{0x0000b06c, 0x00000000},
{0x0000b070, 0x00000000},
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
{0x0000b080, 0x2a2d2f32},
{0x0000b084, 0x21232328},
{0x0000b088, 0x19191c1e},
{0x0000b08c, 0x12141417},
{0x0000b090, 0x07070e0e},
{0x0000b094, 0x03030305},
{0x0000b098, 0x00000003},
{0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
{0x0000b0ac, 0x00000000},
{0x0000b0b0, 0x00000000},
{0x0000b0b4, 0x00000000},
{0x0000b0b8, 0x00000000},
{0x0000b0bc, 0x00000000},
{0x0000b0c0, 0x003f0020},
{0x0000b0c4, 0x00400041},
{0x0000b0c8, 0x0140005f},
{0x0000b0cc, 0x0160015f},
{0x0000b0d0, 0x017e017f},
{0x0000b0d4, 0x02410242},
{0x0000b0d8, 0x025f0240},
{0x0000b0dc, 0x027f0260},
{0x0000b0e0, 0x0341027e},
{0x0000b0e4, 0x035f0340},
{0x0000b0e8, 0x037f0360},
{0x0000b0ec, 0x04400441},
{0x0000b0f0, 0x0460045f},
{0x0000b0f4, 0x0541047f},
{0x0000b0f8, 0x055f0540},
{0x0000b0fc, 0x057f0560},
{0x0000b100, 0x06400641},
{0x0000b104, 0x0660065f},
{0x0000b108, 0x067e067f},
{0x0000b10c, 0x07410742},
{0x0000b110, 0x075f0740},
{0x0000b114, 0x077f0760},
{0x0000b118, 0x07800781},
{0x0000b11c, 0x07a0079f},
{0x0000b120, 0x07c107bf},
{0x0000b124, 0x000007c0},
{0x0000b128, 0x00000000},
{0x0000b12c, 0x00000000},
{0x0000b130, 0x00000000},
{0x0000b134, 0x00000000},
{0x0000b138, 0x00000000},
{0x0000b13c, 0x00000000},
{0x0000b140, 0x003f0020},
{0x0000b144, 0x00400041},
{0x0000b148, 0x0140005f},
{0x0000b14c, 0x0160015f},
{0x0000b150, 0x017e017f},
{0x0000b154, 0x02410242},
{0x0000b158, 0x025f0240},
{0x0000b15c, 0x027f0260},
{0x0000b160, 0x0341027e},
{0x0000b164, 0x035f0340},
{0x0000b168, 0x037f0360},
{0x0000b16c, 0x04400441},
{0x0000b170, 0x0460045f},
{0x0000b174, 0x0541047f},
{0x0000b178, 0x055f0540},
{0x0000b17c, 0x057f0560},
{0x0000b180, 0x06400641},
{0x0000b184, 0x0660065f},
{0x0000b188, 0x067e067f},
{0x0000b18c, 0x07410742},
{0x0000b190, 0x075f0740},
{0x0000b194, 0x077f0760},
{0x0000b198, 0x07800781},
{0x0000b19c, 0x07a0079f},
{0x0000b1a0, 0x07c107bf},
{0x0000b1a4, 0x000007c0},
{0x0000b1a8, 0x00000000},
{0x0000b1ac, 0x00000000},
{0x0000b1b0, 0x00000000},
{0x0000b1b4, 0x00000000},
{0x0000b1b8, 0x00000000},
{0x0000b1bc, 0x00000000},
{0x0000b1c0, 0x00000000},
{0x0000b1c4, 0x00000000},
{0x0000b1c8, 0x00000000},
{0x0000b1cc, 0x00000000},
{0x0000b1d0, 0x00000000},
{0x0000b1d4, 0x00000000},
{0x0000b1d8, 0x00000000},
{0x0000b1dc, 0x00000000},
{0x0000b1e0, 0x00000000},
{0x0000b1e4, 0x00000000},
{0x0000b1e8, 0x00000000},
{0x0000b1ec, 0x00000000},
{0x0000b1f0, 0x00000396},
{0x0000b1f4, 0x00000396},
{0x0000b1f8, 0x00000396},
{0x0000b1fc, 0x00000196},
};
static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
/* Addr allmodes */
{0x00009fd0, 0x0a2d6b93},
};
static const u32 ar9462_2p0_baseband_postamble_mix_rxgain[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
{0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
{0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
};
#endif /* INITVALS_9462_2P0_H */

View File

@ -296,6 +296,7 @@ struct ath_tx {
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
struct ath_txq *txq_map[IEEE80211_NUM_ACS];
struct ath_txq *uapsdq;
u32 txq_max_pending[IEEE80211_NUM_ACS];
u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
};
@ -343,6 +344,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct sk_buff *skb);
void ath_tx_tasklet(struct ath_softc *sc);
void ath_tx_edma_tasklet(struct ath_softc *sc);
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@ -353,6 +356,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an);
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
/********/
/* VIFs */
@ -623,6 +631,10 @@ void ath_ant_comb_update(struct ath_softc *sc);
/* Main driver core */
/********************/
#define ATH9K_PCI_CUS198 0x0001
#define ATH9K_PCI_CUS230 0x0002
#define ATH9K_PCI_CUS217 0x0004
/*
* Default cache line size, in bytes.
* Used when PCI device not fully initialized by bootrom/BIOS
@ -707,6 +719,7 @@ struct ath_softc {
unsigned int hw_busy_count;
unsigned long sc_flags;
unsigned long driver_data;
u32 intrstatus;
u16 ps_flags; /* PS_* */

View File

@ -108,23 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
}
static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->beacon.cabq;
ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, XMIT, "CABQ TX failed\n");
ieee80211_free_txskb(hw, skb);
}
}
static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@ -206,10 +189,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
while (skb) {
ath9k_tx_cabq(hw, skb);
skb = ieee80211_get_buffered_bc(hw, vif);
}
if (skb)
ath_tx_cabq(hw, vif, skb);
return bf;
}

View File

@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (!caldata) {
chan->noisefloor = nf;
ah->noise = ath9k_hw_getchan_noise(ah, chan);
return false;
}

View File

@ -142,6 +142,7 @@ struct ath9k_htc_target_aggr {
#define WLAN_RC_40_FLAG 0x02
#define WLAN_RC_SGI_FLAG 0x04
#define WLAN_RC_HT_FLAG 0x08
#define ATH_RC_TX_STBC_FLAG 0x20
struct ath9k_htc_rateset {
u8 rs_nrates;

View File

@ -517,6 +517,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams >= 2)
ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
if (tx_streams != rx_streams) {
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_streams - 1) <<

View File

@ -627,6 +627,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
trate->rates.ht_rates.rs_nrates = j;
caps = WLAN_RC_HT_FLAG;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
caps |= ATH_RC_TX_STBC_FLAG;
if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&

View File

@ -1870,7 +1870,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->caldata = caldata;
if (caldata && (chan->channel != caldata->channel ||
chan->channelFlags != caldata->channelFlags)) {
chan->channelFlags != caldata->channelFlags ||
chan->chanmode != caldata->chanmode)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
@ -3041,7 +3042,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
timer_next = tsf + trig_timeout;
ath_dbg(ath9k_hw_common(ah), HWTIMER,
ath_dbg(ath9k_hw_common(ah), BTCOEX,
"current tsf %x period %x timer_next %x\n",
tsf, timer_period, timer_next);
@ -3140,7 +3141,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
index);
timer->overflow(timer->arg);
}
@ -3149,7 +3150,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
ath_dbg(common, HWTIMER,
ath_dbg(common, BTCOEX,
"Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}

View File

@ -307,6 +307,10 @@ struct ath9k_ops_config {
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
u16 ani_poll_interval; /* ANI poll interval in ms */
/* Platform specific config */
u32 xlna_gpio;
bool xatten_margin_cfg;
};
enum ath9k_int {
@ -888,6 +892,9 @@ struct ath_hw {
struct ar5416IniArray iniCckfirJapan2484;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray ini_radio_post_sys2ant;
struct ar5416IniArray ini_modes_rxgain_5g_xlna;
struct ar5416IniArray ini_modes_rxgain_bb_core;
struct ar5416IniArray ini_modes_rxgain_bb_postamble;
struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];

View File

@ -432,6 +432,8 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
sc->tx.txq_map[i]->mac80211_qnum = i;
@ -511,6 +513,27 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->spec_config.fft_period = 0xF;
}
static void ath9k_init_platform(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
if (common->bus_ops->ath_bus_type != ATH_PCI)
return;
if (sc->driver_data & (ATH9K_PCI_CUS198 |
ATH9K_PCI_CUS230)) {
ah->config.xlna_gpio = 9;
ah->config.xatten_margin_cfg = true;
ath_info(common, "Set parameters for %s\n",
(sc->driver_data & ATH9K_PCI_CUS198) ?
"CUS198" : "CUS230");
} else if (sc->driver_data & ATH9K_PCI_CUS217) {
ath_info(common, "CUS217 card detected\n");
}
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
void *ctx)
{
@ -602,6 +625,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->btcoex_enabled = ath9k_btcoex_enable == 1;
common->disable_ani = false;
/*
* Platform quirks.
*/
ath9k_init_platform(sc);
/*
* Enable Antenna diversity only when BTCOEX is disabled
* and the user manually requests the feature.
@ -753,6 +781,15 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support ath9k_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
.n_patterns = MAX_NUM_USER_PATTERN,
.pattern_min_len = 1,
.pattern_max_len = MAX_PATTERN_SIZE,
};
#endif
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@ -800,13 +837,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
#ifdef CONFIG_PM_SLEEP
if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
device_can_wakeup(sc->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT;
hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
hw->wiphy->wowlan.pattern_min_len = 1;
hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
}
device_can_wakeup(sc->dev))
hw->wiphy->wowlan = &ath9k_wowlan_support;
atomic_set(&sc->wow_sleep_proc_intr, -1);
atomic_set(&sc->wow_got_bmiss_intr, -1);

View File

@ -1210,13 +1210,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_update_survey_stats(sc);
spin_unlock_irqrestore(&common->cc_lock, flags);
/*
* Preserve the current channel values, before updating
* the same channel
*/
if (ah->curchan && (old_pos == pos))
ath9k_hw_getnf(ah, ah->curchan);
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
curchan, channel_type);
@ -2347,6 +2340,7 @@ struct ieee80211_ops ath9k_ops = {
.flush = ath9k_flush,
.tx_frames_pending = ath9k_tx_frames_pending,
.tx_last_beacon = ath9k_tx_last_beacon,
.release_buffered_frames = ath9k_release_buffered_frames,
.get_stats = ath9k_get_stats,
.set_antenna = ath9k_set_antenna,
.get_antenna = ath9k_get_antenna,

View File

@ -34,8 +34,51 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
/* PCI-E CUS198 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2086),
.driver_data = ATH9K_PCI_CUS198 },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x1237),
.driver_data = ATH9K_PCI_CUS198 },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2126),
.driver_data = ATH9K_PCI_CUS198 },
/* PCI-E CUS230 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2152),
.driver_data = ATH9K_PCI_CUS230 },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_FOXCONN,
0xE075),
.driver_data = ATH9K_PCI_CUS230 },
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
/* PCI-E CUS217 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0034,
PCI_VENDOR_ID_AZWAVE,
0x2116),
.driver_data = ATH9K_PCI_CUS217 },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0034,
0x11AD, /* LITEON */
0x6661),
.driver_data = ATH9K_PCI_CUS217 },
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
@ -221,6 +264,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = pcim_iomap_table(pdev)[0];
sc->driver_data = id->driver_data;
/* Will be cleared in ath9k_start() */
set_bit(SC_OP_INVALID, &sc->sc_flags);

View File

@ -518,6 +518,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
!txfail);
} else {
if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
ieee80211_sta_eosp(sta);
}
/* retry the un-acked ones */
if (bf->bf_next == NULL && bf_last->bf_stale) {
struct ath_buf *tbf;
@ -786,25 +790,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
return ndelim;
}
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_q,
int *aggr_len)
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
int rl = 0, nframes = 0, ndelim, prev_al = 0;
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
struct ath_buf *bf;
u16 seqno;
do {
while (1) {
skb = skb_peek(&tid->buf_q);
if (!skb)
break;
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
@ -820,10 +819,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
seqno = bf->bf_state.seqno;
/* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
status = ATH_AGGR_BAW_CLOSED;
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
break;
}
if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
struct ath_tx_status ts = {};
@ -837,6 +834,40 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
continue;
}
bf->bf_next = NULL;
bf->bf_lastbf = bf;
return bf;
}
return NULL;
}
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_q,
int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
int rl = 0, nframes = 0, ndelim, prev_al = 0;
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
do {
bf = ath_tx_get_tid_subframe(sc, txq, tid);
if (!bf) {
status = ATH_AGGR_BAW_CLOSED;
break;
}
skb = bf->bf_mpdu;
fi = get_frame_info(skb);
if (!bf_first)
bf_first = bf;
@ -882,7 +913,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
/* link buffers of this frame to the aggregate */
if (!fi->retries)
ath_tx_addto_baw(sc, tid, seqno);
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
bf->bf_state.ndelim = ndelim;
__skb_unlink(skb, &tid->buf_q);
@ -1090,10 +1121,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, int len)
{
struct ath_hw *ah = sc->sc_ah;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
struct ath_buf *bf_first = bf;
struct ath_buf *bf_first = NULL;
struct ath_tx_info info;
bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
memset(&info, 0, sizeof(info));
info.is_first = true;
@ -1101,24 +1130,11 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
info.txpower = MAX_RATE_POWER;
info.qcu = txq->axq_qnum;
info.flags = ATH9K_TXDESC_INTREQ;
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
info.flags |= ATH9K_TXDESC_NOACK;
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
info.flags |= ATH9K_TXDESC_LDPC;
ath_buf_set_rate(sc, bf, &info, len);
if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
info.flags |= ATH9K_TXDESC_CLRDMASK;
if (bf->bf_state.bfs_paprd)
info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
while (bf) {
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_frame_info *fi = get_frame_info(skb);
bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
info.type = get_hw_packet_type(skb);
if (bf->bf_next)
@ -1126,6 +1142,26 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
else
info.link = 0;
if (!bf_first) {
bf_first = bf;
info.flags = ATH9K_TXDESC_INTREQ;
if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
txq == sc->tx.uapsdq)
info.flags |= ATH9K_TXDESC_CLRDMASK;
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
info.flags |= ATH9K_TXDESC_NOACK;
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
info.flags |= ATH9K_TXDESC_LDPC;
if (bf->bf_state.bfs_paprd)
info.flags |= (u32) bf->bf_state.bfs_paprd <<
ATH9K_TXDESC_PAPRD_S;
ath_buf_set_rate(sc, bf, &info, len);
}
info.buf_addr[0] = bf->bf_buf_addr;
info.buf_len[0] = skb->len;
info.pkt_len = fi->framelen;
@ -1135,7 +1171,7 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
if (aggr) {
if (bf == bf_first)
info.aggr = AGGR_BUF_FIRST;
else if (!bf->bf_next)
else if (bf == bf_first->bf_lastbf)
info.aggr = AGGR_BUF_LAST;
else
info.aggr = AGGR_BUF_MIDDLE;
@ -1144,6 +1180,9 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
info.aggr_len = len;
}
if (bf == bf_first->bf_lastbf)
bf_first = NULL;
ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
bf = bf->bf_next;
}
@ -1328,6 +1367,70 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
ath_txq_unlock_complete(sc, txq);
}
void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_txq *txq = sc->tx.uapsdq;
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf;
int sent = 0;
int i;
INIT_LIST_HEAD(&bf_q);
for (i = 0; tids && nframes; i++, tids >>= 1) {
struct ath_atx_tid *tid;
if (!(tids & 1))
continue;
tid = ATH_AN_2_TID(an, i);
if (tid->paused)
continue;
ath_txq_lock(sc, tid->ac->txq);
while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
if (!bf)
break;
__skb_unlink(bf->bf_mpdu, &tid->buf_q);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
bf->bf_state.bf_type &= ~BUF_AGGR;
if (bf_tail)
bf_tail->bf_next = bf;
bf_tail = bf;
nframes--;
sent++;
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
if (skb_queue_empty(&tid->buf_q))
ieee80211_sta_set_buffered(an->sta, i, false);
}
ath_txq_unlock_complete(sc, tid->ac->txq);
}
if (list_empty(&bf_q))
return;
info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
info->flags |= IEEE80211_TX_STATUS_EOSP;
bf = list_first_entry(&bf_q, struct ath_buf, list);
ath_txq_lock(sc, txq);
ath_tx_fill_desc(sc, bf, txq, 0);
ath_tx_txqaddbuf(sc, txq, &bf_q, false);
ath_txq_unlock(sc, txq);
}
/********************/
/* Queue Management */
/********************/
@ -1681,8 +1784,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
struct sk_buff *skb, struct ath_tx_control *txctl)
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
@ -1695,21 +1799,22 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* - seqno is not within block-ack window
* - h/w queue depth exceeds low water mark
*/
if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
txq != sc->tx.uapsdq) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
__skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an || !txctl->an->sleeping)
ath_tx_queue_tid(txctl->txq, tid);
ath_tx_queue_tid(txq, tid);
return;
}
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
ieee80211_free_txskb(sc->hw, skb);
return;
@ -1724,10 +1829,10 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
/* Queue to h/w without aggregation */
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
}
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
@ -1865,8 +1970,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
return bf;
}
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@ -1874,13 +1978,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
int padpos, padsize;
int frmlen = skb->len + FCS_LEN;
u8 tidno;
int q;
int padpos, padsize;
/* NOTE: sta can be NULL according to net/mac80211.h */
if (sta)
@ -1901,6 +2000,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
}
if ((vif && vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_AP_VLAN) ||
!ieee80211_is_data(hdr->frame_control))
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
/* Add the padding after the header if this is not already done */
padpos = ieee80211_hdrlen(hdr->frame_control);
padsize = padpos & 3;
@ -1910,16 +2014,34 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
hdr = (struct ieee80211_hdr *) skb->data;
}
if ((vif && vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_AP_VLAN) ||
!ieee80211_is_data(hdr->frame_control))
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
setup_frame_info(hw, sta, skb, frmlen);
return 0;
}
/* Upon failure caller should free skb */
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
u8 tidno;
int q;
int ret;
ret = ath_tx_prepare(hw, skb, txctl);
if (ret)
return ret;
hdr = (struct ieee80211_hdr *) skb->data;
/*
* At this point, the vif, hw_key and sta pointers in the tx control
* info are no longer valid (overwritten by the ath_frame_info data.
@ -1935,6 +2057,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq->stopped = true;
}
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
}
if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
@ -1948,11 +2076,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
*/
ath_tx_send_ampdu(sc, tid, skb, txctl);
ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
goto out;
}
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
if (txctl->paprd)
dev_kfree_skb_any(skb);
@ -1967,7 +2095,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->bf_state.bfs_paprd_timestamp = jiffies;
ath_set_rates(vif, sta, bf);
ath_tx_send_normal(sc, txctl->txq, tid, skb);
ath_tx_send_normal(sc, txq, tid, skb);
out:
ath_txq_unlock(sc, txq);
@ -1975,6 +2103,74 @@ out:
return 0;
}
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_tx_control txctl = {
.txq = sc->beacon.cabq
};
struct ath_tx_info info = {};
struct ieee80211_hdr *hdr;
struct ath_buf *bf_tail = NULL;
struct ath_buf *bf;
LIST_HEAD(bf_q);
int duration = 0;
int max_duration;
max_duration =
sc->cur_beacon_conf.beacon_interval * 1000 *
sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
do {
struct ath_frame_info *fi = get_frame_info(skb);
if (ath_tx_prepare(hw, skb, &txctl))
break;
bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
if (!bf)
break;
bf->bf_lastbf = bf;
ath_set_rates(vif, NULL, bf);
ath_buf_set_rate(sc, bf, &info, fi->framelen);
duration += info.rates[0].PktDuration;
if (bf_tail)
bf_tail->bf_next = bf;
list_add_tail(&bf->list, &bf_q);
bf_tail = bf;
skb = NULL;
if (duration > max_duration)
break;
skb = ieee80211_get_buffered_bc(hw, vif);
} while(skb);
if (skb)
ieee80211_free_txskb(hw, skb);
if (list_empty(&bf_q))
return;
bf = list_first_entry(&bf_q, struct ath_buf, list);
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
sizeof(*hdr), DMA_TO_DEVICE);
}
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
TX_STAT_INC(txctl.txq->axq_qnum, queued);
ath_txq_unlock(sc, txctl.txq);
}
/*****************/
/* TX Completion */
/*****************/
@ -2020,7 +2216,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
__skb_queue_tail(&txq->complete_q, skb);
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.uapsdq)
txq = sc->tx.txq_map[q];
if (txq == sc->tx.txq_map[q]) {
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
@ -2031,8 +2232,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
txq->stopped = false;
}
}
__skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,

View File

@ -133,6 +133,9 @@ struct carl9170_sta_tid {
/* Preaggregation reorder queue */
struct sk_buff_head queue;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
};
#define CARL9170_QUEUE_TIMEOUT 256

View File

@ -1448,6 +1448,8 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
tid_info->state = CARL9170_TID_STATE_PROGRESS;
tid_info->tid = tid;
tid_info->max = sta_info->ampdu_max_len;
tid_info->sta = sta;
tid_info->vif = vif;
INIT_LIST_HEAD(&tid_info->list);
INIT_LIST_HEAD(&tid_info->tmp_list);
@ -1857,6 +1859,7 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SIGNAL_DBM;
if (!modparam_noht) {

View File

@ -625,7 +625,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
goto unlock;
sta = __carl9170_get_tx_sta(ar, skb);
sta = iter->sta;
if (WARN_ON(!sta))
goto unlock;
@ -866,6 +866,93 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
return false;
}
static void carl9170_tx_get_rates(struct ar9170 *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE);
info = IEEE80211_SKB_CB(skb);
ieee80211_get_tx_rates(vif, sta, skb,
info->control.rates,
IEEE80211_TX_MAX_RATES);
}
static void carl9170_tx_apply_rateset(struct ar9170 *ar,
struct ieee80211_tx_info *sinfo,
struct sk_buff *skb)
{
struct ieee80211_tx_rate *txrate;
struct ieee80211_tx_info *info;
struct _carl9170_tx_superframe *txc = (void *) skb->data;
int i;
bool ampdu;
bool no_ack;
info = IEEE80211_SKB_CB(skb);
ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
/* Set the rate control probe flag for all (sub-) frames.
* This is because the TX_STATS_AMPDU flag is only set on
* the last frame, so it has to be inherited.
*/
info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
/* NOTE: For the first rate, the ERP & AMPDU flags are directly
* taken from mac_control. For all fallback rate, the firmware
* updates the mac_control flags from the rate info field.
*/
for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
__le32 phy_set;
txrate = &sinfo->control.rates[i];
if (txrate->idx < 0)
break;
phy_set = carl9170_tx_physet(ar, info, txrate);
if (i == 0) {
__le16 mac_tmp = cpu_to_le16(0);
/* first rate - part of the hw's frame header */
txc->f.phy_control = phy_set;
if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
else if (carl9170_tx_cts_check(ar, txrate))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
txc->f.mac_control |= mac_tmp;
} else {
/* fallback rates are stored in the firmware's
* retry rate set array.
*/
txc->s.rr[i - 1] = phy_set;
}
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
txrate->count);
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
else if (carl9170_tx_cts_check(ar, txrate))
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
}
}
static int carl9170_tx_prepare(struct ar9170 *ar,
struct ieee80211_sta *sta,
struct sk_buff *skb)
@ -874,13 +961,10 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
struct _carl9170_tx_superframe *txc;
struct carl9170_vif_info *cvif;
struct ieee80211_tx_info *info;
struct ieee80211_tx_rate *txrate;
struct carl9170_tx_info *arinfo;
unsigned int hw_queue;
int i;
__le16 mac_tmp;
u16 len;
bool ampdu, no_ack;
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
@ -889,8 +973,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
AR9170_TX_HWDESC_LEN);
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
((CARL9170_TX_SUPER_MISC_VIF_ID >>
CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
@ -932,8 +1014,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
AR9170_TX_MAC_QOS);
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
if (unlikely(no_ack))
if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
if (info->control.hw_key) {
@ -954,8 +1035,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
}
}
ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
if (ampdu) {
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
unsigned int density, factor;
if (unlikely(!sta || !cvif))
@ -982,50 +1062,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
txc->s.ampdu_settings, factor);
}
/*
* NOTE: For the first rate, the ERP & AMPDU flags are directly
* taken from mac_control. For all fallback rate, the firmware
* updates the mac_control flags from the rate info field.
*/
for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
__le32 phy_set;
txrate = &info->control.rates[i];
if (txrate->idx < 0)
break;
phy_set = carl9170_tx_physet(ar, info, txrate);
if (i == 0) {
/* first rate - part of the hw's frame header */
txc->f.phy_control = phy_set;
if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
else if (carl9170_tx_cts_check(ar, txrate))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
} else {
/* fallback rates are stored in the firmware's
* retry rate set array.
*/
txc->s.rr[i - 1] = phy_set;
}
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
txrate->count);
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
else if (carl9170_tx_cts_check(ar, txrate))
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
}
txc->s.len = cpu_to_le16(skb->len);
txc->f.length = cpu_to_le16(len + FCS_LEN);
txc->f.mac_control = mac_tmp;
@ -1086,31 +1122,12 @@ static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
}
}
static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
struct sk_buff *_src)
{
struct _carl9170_tx_superframe *dest, *src;
dest = (void *) _dest->data;
src = (void *) _src->data;
/*
* The mac80211 rate control algorithm expects that all MPDUs in
* an AMPDU share the same tx vectors.
* This is not really obvious right now, because the hardware
* does the AMPDU setup according to its own rulebook.
* Our nicely assembled, strictly monotonic increasing mpdu
* chains will be broken up, mashed back together...
*/
return (dest->f.phy_control == src->f.phy_control);
}
static void carl9170_tx_ampdu(struct ar9170 *ar)
{
struct sk_buff_head agg;
struct carl9170_sta_tid *tid_info;
struct sk_buff *skb, *first;
struct ieee80211_tx_info *tx_info_first;
unsigned int i = 0, done_ampdus = 0;
u16 seq, queue, tmpssn;
@ -1156,6 +1173,7 @@ retry:
goto processed;
}
tx_info_first = NULL;
while ((skb = skb_peek(&tid_info->queue))) {
/* strict 0, 1, ..., n - 1, n frame sequence order */
if (unlikely(carl9170_get_seq(skb) != seq))
@ -1166,8 +1184,13 @@ retry:
(tid_info->max - 1)))
break;
if (!carl9170_tx_rate_check(ar, skb, first))
break;
if (!tx_info_first) {
carl9170_tx_get_rates(ar, tid_info->vif,
tid_info->sta, first);
tx_info_first = IEEE80211_SKB_CB(first);
}
carl9170_tx_apply_rateset(ar, tx_info_first, skb);
atomic_inc(&ar->tx_ampdu_upload);
tid_info->snx = seq = SEQ_NEXT(seq);
@ -1182,8 +1205,7 @@ retry:
if (skb_queue_empty(&tid_info->queue) ||
carl9170_get_seq(skb_peek(&tid_info->queue)) !=
tid_info->snx) {
/*
* stop TID, if A-MPDU frames are still missing,
/* stop TID, if A-MPDU frames are still missing,
* or whenever the queue is empty.
*/
@ -1450,12 +1472,14 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
struct ieee80211_sta *sta = control->sta;
struct ieee80211_vif *vif;
bool run;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
info = IEEE80211_SKB_CB(skb);
vif = info->control.vif;
if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
goto err_free;
@ -1486,6 +1510,8 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
} else {
unsigned int queue = skb_get_queue_mapping(skb);
carl9170_tx_get_rates(ar, vif, sta, skb);
carl9170_tx_apply_rateset(ar, info, skb);
skb_queue_tail(&ar->tx_pending[queue], skb);
}

View File

@ -42,11 +42,11 @@ static int __ath_regd_init(struct ath_regulatory *reg);
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
/* We allow IBSS on these on a case by case basis by regulatory domain */
#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\
#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\
#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\
#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\
NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \

View File

@ -28,7 +28,7 @@ config WIL6210_ISR_COR
such monitoring impossible.
Say y unless you debug interrupts
config ATH6KL_TRACING
config WIL6210_TRACING
bool "wil6210 tracing support"
depends on WIL6210
depends on EVENT_TRACING

View File

@ -402,6 +402,30 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
return 0;
}
static int wil_fix_bcon(struct wil6210_priv *wil,
struct cfg80211_beacon_data *bcon)
{
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
int rc = 0;
if (bcon->probe_resp_len <= hlen)
return 0;
if (!bcon->proberesp_ies) {
bcon->proberesp_ies = f->u.probe_resp.variable;
bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
rc = 1;
}
if (!bcon->assocresp_ies) {
bcon->assocresp_ies = f->u.probe_resp.variable;
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
rc = 1;
}
return rc;
}
static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_ap_settings *info)
@ -423,10 +447,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
info->ssid, info->ssid_len);
if (wil_fix_bcon(wil, bcon))
wil_dbg_misc(wil, "Fixed bcon\n");
rc = wil_reset(wil);
if (rc)
return rc;
/* Rx VRING. */
rc = wil_rx_init(wil);
if (rc)
return rc;
rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
if (rc)
return rc;
@ -455,8 +487,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
if (rc)
return rc;
/* Rx VRING. After MAC and beacon */
rc = wil_rx_init(wil);
netif_carrier_on(ndev);

View File

@ -286,41 +286,36 @@ static int __wil_up(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
struct ieee80211_channel *channel = wdev->preset_chandef.chan;
int rc;
int bi;
u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
rc = wil_reset(wil);
if (rc)
return rc;
/* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
/* Rx VRING. After MAC and beacon */
rc = wil_rx_init(wil);
if (rc)
return rc;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
wil_dbg_misc(wil, "type: STATION\n");
bi = 0;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_AP:
wil_dbg_misc(wil, "type: AP\n");
bi = 100;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_CLIENT:
wil_dbg_misc(wil, "type: P2P_CLIENT\n");
bi = 0;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_P2P_GO:
wil_dbg_misc(wil, "type: P2P_GO\n");
bi = 100;
ndev->type = ARPHRD_ETHER;
break;
case NL80211_IFTYPE_MONITOR:
wil_dbg_misc(wil, "type: Monitor\n");
bi = 0;
ndev->type = ARPHRD_IEEE80211_RADIOTAP;
/* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
break;
@ -328,36 +323,9 @@ static int __wil_up(struct wil6210_priv *wil)
return -EOPNOTSUPP;
}
/* Apply profile in the following order: */
/* SSID and channel for the AP */
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (wdev->ssid_len == 0) {
wil_err(wil, "SSID not set\n");
return -EINVAL;
}
rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
if (rc)
return rc;
break;
default:
break;
}
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
/* Set up beaconing if required. */
if (bi > 0) {
rc = wmi_pcp_start(wil, bi, wmi_nettype,
(channel ? channel->hw_value : 0));
if (rc)
return rc;
}
/* Rx VRING. After MAC and beacon */
wil_rx_init(wil);
napi_enable(&wil->napi_rx);
napi_enable(&wil->napi_tx);

View File

@ -768,9 +768,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
rc = wmi_tx_eapol(wil, skb);
} else {
/* find vring */
vring = wil_find_tx_vring(wil, skb);
if (!vring) {
@ -779,7 +777,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
}
switch (rc) {
case 0:
/* statistics will be updated on the tx_complete */

View File

@ -329,7 +329,6 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
int wmi_set_channel(struct wil6210_priv *wil, int channel);
int wmi_get_channel(struct wil6210_priv *wil, int *channel);
int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr);
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,

View File

@ -75,10 +75,11 @@ static const struct {
{0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
{0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
{0x880000, 0x88a000, 0x880000}, /* various RGF */
{0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
{0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
/*
* 920000..930000 ucode code RAM
* 930000..932000 ucode data RAM
* 932000..949000 back-door debug data
*/
};
@ -314,8 +315,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
data->info.channel, data->info.mcs, data->info.snr);
wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
le16_to_cpu(data->info.stype));
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
@ -739,8 +740,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
if (!wil->secure_pcp)
cmd.disable_sec = 1;
/*
* Processing time may be huge, in case of secure AP it takes about
* 3500ms for FW to start AP
*/
rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
if (rc)
return rc;
@ -834,40 +839,6 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
}
int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct wmi_eapol_tx_cmd *cmd;
struct ethhdr *eth;
u16 eapol_len = skb->len - ETH_HLEN;
void *eapol = skb->data + ETH_HLEN;
uint i;
int rc;
skb_set_mac_header(skb, 0);
eth = eth_hdr(skb);
wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
goto found_dest;
}
return -EINVAL;
found_dest:
/* find out eapol data & len */
cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
if (!cmd)
return -EINVAL;
memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
cmd->eapol_len = cpu_to_le16(eapol_len);
memcpy(cmd->eapol, eapol, eapol_len);
rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
kfree(cmd);
return rc;
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
const void *mac_addr)
{

View File

@ -28,7 +28,7 @@ config B43
config B43_BCMA
bool "Support for BCMA bus"
depends on B43 && BCMA
depends on B43 && (BCMA = y || BCMA = B43)
default y
config B43_BCMA_EXTRA
@ -39,7 +39,7 @@ config B43_BCMA_EXTRA
config B43_SSB
bool
depends on B43 && SSB
depends on B43 && (SSB = y || SSB = B43)
default y
# Auto-select SSB PCI-HOST support, if possible
@ -111,6 +111,7 @@ config B43_PIO
config B43_PHY_N
bool "Support for 802.11n (N-PHY) devices"
depends on B43
default y
---help---
Support for the N-PHY.
@ -132,6 +133,7 @@ config B43_PHY_LP
config B43_PHY_HT
bool "Support for HT-PHY (high throughput) devices"
depends on B43 && B43_BCMA
default y
---help---
Support for the HT-PHY.

View File

@ -22,9 +22,11 @@
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/brcmfmac-sdio.h>
#include <defs.h>
@ -303,6 +305,153 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
*ret = retval;
}
/**
* brcmf_sdio_buffrw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @write: direction flag
* @addr: dongle memory address as source/destination
* @pkt: skb pointer
*
* This function takes the respbonsibility as the interface function to MMC
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
bool write, u32 addr, struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
unsigned int max_blks, max_req_sz;
unsigned short max_seg_sz, seg_sz;
unsigned char *pkt_data;
struct sk_buff *pkt_next = NULL;
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
struct sg_table st;
struct scatterlist *sgl;
struct mmc_host *host;
int ret = 0;
if (!pktlist->qlen)
return -EINVAL;
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Single skb use the standard mmc interface */
if (pktlist->qlen == 1) {
pkt_next = pktlist->next;
req_sz = pkt_next->len + 3;
req_sz &= (uint)~3;
if (write)
return sdio_memcpy_toio(sdiodev->func[fn], addr,
((u8 *)(pkt_next->data)),
req_sz);
else if (fn == 1)
return sdio_memcpy_fromio(sdiodev->func[fn],
((u8 *)(pkt_next->data)),
addr, req_sz);
else
/* function 2 read is FIFO operation */
return sdio_readsb(sdiodev->func[fn],
((u8 *)(pkt_next->data)), addr,
req_sz);
}
host = sdiodev->func[fn]->card->host;
func_blk_sz = sdiodev->func[fn]->cur_blksize;
/* Blocks per command is limited by host count, host transfer
* size and the maximum for IO_RW_EXTENDED of 511 blocks.
*/
max_blks = min_t(unsigned int, host->max_blk_count, 511u);
max_req_sz = min_t(unsigned int, host->max_req_size,
max_blks * func_blk_sz);
max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
max_seg_sz = min_t(unsigned short, max_seg_sz, pktlist->qlen);
seg_sz = pktlist->qlen;
pkt_offset = 0;
pkt_next = pktlist->next;
if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL))
return -ENOMEM;
while (seg_sz) {
req_sz = 0;
sg_cnt = 0;
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
memset(&mmc_dat, 0, sizeof(struct mmc_data));
sgl = st.sgl;
/* prep sg table */
while (pkt_next != (struct sk_buff *)pktlist) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
if (sg_data_sz > host->max_seg_size)
sg_data_sz = host->max_seg_size;
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
sg_set_buf(sgl, pkt_data, sg_data_sz);
sg_cnt++;
sgl = sg_next(sgl);
req_sz += sg_data_sz;
pkt_offset += sg_data_sz;
if (pkt_offset == pkt_next->len) {
pkt_offset = 0;
pkt_next = pkt_next->next;
}
if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
break;
}
seg_sz -= sg_cnt;
if (req_sz % func_blk_sz != 0) {
brcmf_err("sg request length %u is not %u aligned\n",
req_sz, func_blk_sz);
sg_free_table(&st);
return -ENOTBLK;
}
mmc_dat.sg = st.sgl;
mmc_dat.sg_len = sg_cnt;
mmc_dat.blksz = func_blk_sz;
mmc_dat.blocks = req_sz / func_blk_sz;
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED;
mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
mmc_cmd.arg |= 1<<27; /* block mode */
/* incrementing addr for function 1 */
mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
if (fn == 1)
addr += req_sz;
mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
mmc_wait_for_req(host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret != 0) {
brcmf_err("CMD53 sg block %s failed %d\n",
write ? "write" : "read", ret);
ret = -EIO;
break;
}
}
sg_free_table(&st);
return ret;
}
static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
uint flags, uint width, u32 *addr)
{
@ -355,9 +504,9 @@ int
brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
int err = 0;
struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
@ -367,9 +516,10 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (err)
goto done;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
fn, addr, pkt);
skb_queue_head_init(&pkt_list);
skb_queue_tail(&pkt_list, pkt);
err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
skb_dequeue_tail(&pkt_list);
done:
return err;
@ -391,8 +541,7 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
goto done;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
pktq);
err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
done:
return err;
@ -424,10 +573,10 @@ int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
@ -446,13 +595,14 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr &= SBSDIO_SB_OFT_ADDR_MASK;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
addr, pkt);
skb_queue_head_init(&pkt_list);
skb_queue_tail(&pkt_list, pkt);
err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
skb_dequeue_tail(&pkt_list);
done:
return err;
@ -466,6 +616,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
struct sk_buff_head pkt_list;
dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
pkt = dev_alloc_skb(dsize);
@ -474,6 +625,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
return -EIO;
}
pkt->priority = 0;
skb_queue_head_init(&pkt_list);
/* Determine initial transfer parameters */
sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@ -501,9 +653,10 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
skb_put(pkt, dsize);
if (write)
memcpy(pkt->data, data, dsize);
bcmerror = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
write, SDIO_FUNC_1,
sdaddr, pkt);
skb_queue_tail(&pkt_list, pkt);
bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
sdaddr, &pkt_list);
skb_dequeue_tail(&pkt_list);
if (bcmerror) {
brcmf_err("membytes transfer failed\n");
break;

View File

@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
static bool
bool
brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
{
bool is_err = false;
@ -76,7 +76,7 @@ brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
return is_err;
}
static void
void
brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
{
#ifdef CONFIG_PM_SLEEP
@ -211,115 +211,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
/* precondition: host controller is claimed */
static int
brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
uint func, uint addr, struct sk_buff *pkt, uint pktlen)
{
int err_ret = 0;
if ((write) && (!fifo)) {
err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
((u8 *) (pkt->data)), pktlen);
} else if (write) {
err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
((u8 *) (pkt->data)), pktlen);
} else if (fifo) {
err_ret = sdio_readsb(sdiodev->func[func],
((u8 *) (pkt->data)), addr, pktlen);
} else {
err_ret = sdio_memcpy_fromio(sdiodev->func[func],
((u8 *) (pkt->data)),
addr, pktlen);
}
return err_ret;
}
/*
* This function takes a queue of packets. The packets on the queue
* are assumed to be properly aligned by the caller.
*/
int
brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
uint write, uint func, uint addr,
struct sk_buff_head *pktq)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
struct sk_buff *pkt;
brcmf_dbg(SDIO, "Enter\n");
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
skb_queue_walk(pktq, pkt) {
uint pkt_len = pkt->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
addr, pkt, pkt_len);
if (err_ret) {
brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
brcmf_dbg(SDIO, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len);
}
if (!fifo)
addr += pkt_len;
SGCount++;
}
brcmf_dbg(SDIO, "Exit\n");
return err_ret;
}
/*
* This function takes a single DMA-able packet.
*/
int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint write, uint func, uint addr,
struct sk_buff *pkt)
{
int status;
uint pkt_len;
bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(SDIO, "Enter\n");
if (pkt == NULL)
return -EINVAL;
pkt_len = pkt->len;
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
pkt_len += 3;
pkt_len &= (uint)~3;
status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
addr, pkt, pkt_len);
if (status) {
brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
write ? "TX" : "RX", pkt, addr, pkt_len, status);
} else {
brcmf_dbg(SDIO, "%s xfr'd %p, addr=0x%05x, len=%d\n",
write ? "TX" : "RX", pkt, addr, pkt_len);
}
return status;
}
static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
{
/* read 24 bits and return valid 17 bit addr */
@ -468,7 +359,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");

View File

@ -583,6 +583,7 @@ enum brcmf_netif_stop_reason {
* @bssidx: index of bss associated with this interface.
* @mac_addr: assigned mac address.
* @netif_stop: bitmap indicates reason why netif queues are stopped.
* @netif_stop_lock: spinlock for update netif_stop from multiple sources.
* @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
* @pend_8021x_wait: used for signalling change in count.
*/
@ -598,6 +599,7 @@ struct brcmf_if {
s32 bssidx;
u8 mac_addr[ETH_ALEN];
u8 netif_stop;
spinlock_t netif_stop_lock;
atomic_t pend_8021x_cnt;
wait_queue_head_t pend_8021x_wait;
};

View File

@ -30,6 +30,7 @@
#include "dhd_bus.h"
#include "fwsignal.h"
#include "dhd_dbg.h"
#include "tracepoint.h"
struct brcmf_proto_cdc_dcmd {
__le32 cmd; /* dongle command value */
@ -292,6 +293,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
h->flags2 = 0;
h->data_offset = offset;
BDC_SET_IF_IDX(h, ifidx);
trace_brcmf_bdchdr(pktbuf->data);
}
int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
@ -309,6 +311,7 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
return -EBADE;
}
trace_brcmf_bdchdr(pktbuf->data);
h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
*ifidx = BDC_GET_IF_IDX(h);

View File

@ -156,8 +156,11 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
"txs_suppr_core: %u\n"
"txs_suppr_ps: %u\n"
"txs_tossed: %u\n"
"txs_host_tossed: %u\n"
"bus_flow_block: %u\n"
"fws_flow_block: %u\n"
"send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
"fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
"requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
fwstats->header_pulls,
fwstats->header_only_pkt,
fwstats->tlv_parse_failed,
@ -176,14 +179,17 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
fwstats->txs_supp_core,
fwstats->txs_supp_ps,
fwstats->txs_tossed,
fwstats->txs_host_tossed,
fwstats->bus_flow_block,
fwstats->fws_flow_block,
fwstats->send_pkts[0], fwstats->send_pkts[1],
fwstats->send_pkts[2], fwstats->send_pkts[3],
fwstats->send_pkts[4],
fwstats->fifo_credits_sent[0],
fwstats->fifo_credits_sent[1],
fwstats->fifo_credits_sent[2],
fwstats->fifo_credits_sent[3],
fwstats->fifo_credits_sent[4]);
fwstats->requested_sent[0],
fwstats->requested_sent[1],
fwstats->requested_sent[2],
fwstats->requested_sent[3],
fwstats->requested_sent[4]);
return simple_read_from_buffer(data, count, ppos, buf, res);
}

View File

@ -141,8 +141,7 @@ struct brcmf_fws_stats {
u32 header_pulls;
u32 pkt2bus;
u32 send_pkts[5];
u32 fifo_credits_sent[5];
u32 fifo_credits_back[6];
u32 requested_sent[5];
u32 generic_error;
u32 mac_update_failed;
u32 mac_ps_update_failed;
@ -158,6 +157,9 @@ struct brcmf_fws_stats {
u32 txs_supp_core;
u32 txs_supp_ps;
u32 txs_tossed;
u32 txs_host_tossed;
u32 bus_flow_block;
u32 fws_flow_block;
};
struct brcmf_pub;

View File

@ -179,7 +179,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
/* Can the device send data? */
if (drvr->bus_if->state != BRCMF_BUS_DATA) {
@ -240,11 +240,15 @@ done:
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state)
{
unsigned long flags;
if (!ifp)
return;
brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
ifp->bssidx, ifp->netif_stop, reason, state);
spin_lock_irqsave(&ifp->netif_stop_lock, flags);
if (state) {
if (!ifp->netif_stop)
netif_stop_queue(ifp->ndev);
@ -254,6 +258,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
if (!ifp->netif_stop)
netif_wake_queue(ifp->ndev);
}
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
void brcmf_txflowblock(struct device *dev, bool state)
@ -264,9 +269,14 @@ void brcmf_txflowblock(struct device *dev, bool state)
brcmf_dbg(TRACE, "Enter\n");
if (brcmf_fws_fc_active(drvr->fws)) {
brcmf_fws_bus_blocked(drvr, state);
} else {
for (i = 0; i < BRCMF_MAX_IFS; i++)
brcmf_txflowblock_if(drvr->iflist[i],
BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state);
BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
state);
}
}
void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
@ -280,7 +290,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
u8 ifidx;
int ret;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(DATA, "Enter\n");
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
@ -630,7 +640,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
/* set appropriate operations */
ndev->netdev_ops = &brcmf_netdev_ops_pri;
ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->hard_header_len += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@ -779,6 +789,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
ifp->bssidx = bssidx;
init_waitqueue_head(&ifp->pend_8021x_wait);
spin_lock_init(&ifp->netif_stop_lock);
if (mac_addr != NULL)
memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);

View File

@ -448,8 +448,6 @@ struct brcmf_sdio {
uint rxblen; /* Allocated length of rxbuf */
u8 *rxctl; /* Aligned pointer into rxbuf */
u8 *rxctl_orig; /* pointer for freeing rxctl */
u8 *databuf; /* Buffer for receiving big glom packet */
u8 *dataptr; /* Aligned pointer into databuf */
uint rxlen; /* Length of valid data in buffer */
spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
@ -473,8 +471,6 @@ struct brcmf_sdio {
s32 idletime; /* Control for activity timeout */
s32 idlecount; /* Activity timeout counter */
s32 idleclock; /* How to set bus driver when idle */
s32 sd_rxchain;
bool use_rxchain; /* If brcmf should use PKT chains */
bool rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
bool alp_only; /* Don't use HT clock (ALP only) */
@ -495,8 +491,7 @@ struct brcmf_sdio {
struct workqueue_struct *brcmf_wq;
struct work_struct datawork;
struct list_head dpc_tsklst;
spinlock_t dpc_tl_lock;
atomic_t dpc_tskcnt;
const struct firmware *firmware;
u32 fw_ptr;
@ -1026,29 +1021,6 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
/* copy a buffer into a pkt buffer chain */
static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
{
uint n, ret = 0;
struct sk_buff *p;
u8 *buf;
buf = bus->dataptr;
/* copy the data */
skb_queue_walk(&bus->glom, p) {
n = min_t(uint, p->len, len);
memcpy(p->data, buf, n);
buf += n;
len -= n;
ret += n;
if (!len)
break;
}
return ret;
}
/* return total length of buffer chain */
static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
{
@ -1202,8 +1174,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
int errcode;
u8 doff, sfdoff;
bool usechain = bus->use_rxchain;
struct brcmf_sdio_read rd_new;
/* If packets, issue read(s) and send up packet chain */
@ -1238,7 +1208,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (sublen % BRCMF_SDALIGN) {
brcmf_err("sublen %d not multiple of %d\n",
sublen, BRCMF_SDALIGN);
usechain = false;
}
totlen += sublen;
@ -1305,27 +1274,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
* packet and and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func[1]);
if (usechain) {
errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, &bus->glom);
} else if (bus->dataptr) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC,
bus->dataptr, dlen);
sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
dlen, sublen);
errcode = -1;
}
pnext = NULL;
} else {
brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
dlen);
errcode = -1;
}
sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
@ -2061,23 +2012,6 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
}
}
static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
{
struct list_head *new_hd;
unsigned long flags;
if (in_interrupt())
new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
else
new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (new_hd == NULL)
return;
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
list_add_tail(new_hd, &bus->dpc_tsklst);
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
u8 idx;
@ -2312,7 +2246,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
data_ok(bus)) || PKT_AVAILABLE()) {
brcmf_sdbrcm_adddpctsk(bus);
atomic_inc(&bus->dpc_tskcnt);
}
/* If we're done for now, turn off clock request. */
@ -2342,7 +2276,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
unsigned long flags;
brcmf_dbg(TRACE, "Enter\n");
@ -2369,26 +2302,21 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
} else {
ret = 0;
}
spin_unlock_bh(&bus->txqlock);
if (pktq_len(&bus->txq) >= TXHI) {
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
spin_unlock_bh(&bus->txqlock);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
if (list_empty(&bus->dpc_tsklst)) {
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
brcmf_sdbrcm_adddpctsk(bus);
if (atomic_read(&bus->dpc_tskcnt) == 0) {
atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
} else {
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
return ret;
@ -2525,7 +2453,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
unsigned long flags;
brcmf_dbg(TRACE, "Enter\n");
@ -2612,18 +2539,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
} while (ret < 0 && retries++ < TXRETRIES);
}
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
list_empty(&bus->dpc_tsklst)) {
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
atomic_read(&bus->dpc_tskcnt) == 0) {
bus->activity = false;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_dbg(INFO, "idle\n");
brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
sdio_release_host(bus->sdiodev->func[1]);
} else {
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
if (ret)
@ -3451,7 +3373,7 @@ void brcmf_sdbrcm_isr(void *arg)
if (!bus->intr)
brcmf_err("isr w/o interrupt configured!\n");
brcmf_sdbrcm_adddpctsk(bus);
atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
@ -3460,7 +3382,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
#ifdef DEBUG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
#endif /* DEBUG */
unsigned long flags;
brcmf_dbg(TIMER, "Enter\n");
@ -3476,11 +3397,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->intr ||
(bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
if (list_empty(&bus->dpc_tsklst)) {
if (atomic_read(&bus->dpc_tskcnt) == 0) {
u8 devpend;
spin_unlock_irqrestore(&bus->dpc_tl_lock,
flags);
sdio_claim_host(bus->sdiodev->func[1]);
devpend = brcmf_sdio_regrb(bus->sdiodev,
SDIO_CCCR_INTx,
@ -3489,9 +3408,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
} else {
spin_unlock_irqrestore(&bus->dpc_tl_lock,
flags);
}
/* If there is something, make like the ISR and
@ -3500,7 +3416,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->sdcnt.pollcnt++;
atomic_set(&bus->ipend, 1);
brcmf_sdbrcm_adddpctsk(bus);
atomic_inc(&bus->dpc_tskcnt);
queue_work(bus->brcmf_wq, &bus->datawork);
}
}
@ -3545,41 +3461,15 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
return (atomic_read(&bus->ipend) > 0);
}
static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
if (chipid == BCM43143_CHIP_ID)
return true;
if (chipid == BCM43241_CHIP_ID)
return true;
if (chipid == BCM4329_CHIP_ID)
return true;
if (chipid == BCM4330_CHIP_ID)
return true;
if (chipid == BCM4334_CHIP_ID)
return true;
if (chipid == BCM4335_CHIP_ID)
return true;
return false;
}
static void brcmf_sdio_dataworker(struct work_struct *work)
{
struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
datawork);
struct list_head *cur_hd, *tmp_hd;
unsigned long flags;
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
while (atomic_read(&bus->dpc_tskcnt)) {
brcmf_sdbrcm_dpc(bus);
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
list_del(cur_hd);
kfree(cur_hd);
atomic_dec(&bus->dpc_tskcnt);
}
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
@ -3589,9 +3479,6 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
kfree(bus->rxbuf);
bus->rxctl = bus->rxbuf = NULL;
bus->rxlen = 0;
kfree(bus->databuf);
bus->databuf = NULL;
}
static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
@ -3604,29 +3491,10 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
ALIGNMENT) + BRCMF_SDALIGN;
bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
if (!(bus->rxbuf))
goto fail;
return false;
}
/* Allocate buffer to receive glomed packet */
bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
if (!(bus->databuf)) {
/* release rxbuf which was already located as above */
if (!bus->rxblen)
kfree(bus->rxbuf);
goto fail;
}
/* Align the buffer */
if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
((unsigned long)bus->databuf % BRCMF_SDALIGN));
else
bus->dataptr = bus->databuf;
return true;
fail:
return false;
}
static bool
@ -3667,11 +3535,6 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
goto fail;
}
if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
goto fail;
}
if (brcmf_sdbrcm_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
goto fail;
@ -3770,10 +3633,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
/* bus module does not support packet chaining */
bus->use_rxchain = false;
bus->sd_rxchain = false;
/* SR state */
bus->sleeping = false;
bus->sr_enabled = false;
@ -3927,8 +3786,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->watchdog_tsk = NULL;
}
/* Initialize DPC thread */
INIT_LIST_HEAD(&bus->dpc_tsklst);
spin_lock_init(&bus->dpc_tl_lock);
atomic_set(&bus->dpc_tskcnt, 0);
/* Assign bus interface call back */
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;

View File

@ -101,7 +101,8 @@ struct brcmf_event;
BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),

File diff suppressed because it is too large Load Diff

View File

@ -29,5 +29,6 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp);
void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
#endif /* FWSIGNAL_H_ */

View File

@ -170,7 +170,6 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
struct device *dev;
struct brcmf_bus *bus_if;
@ -272,16 +271,6 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
uint rw, uint fnc, uint addr,
u32 *word, uint nbyte);
/* read or write any buffer using cmd53 */
extern int
brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint rw, uint fnc_num, u32 addr,
struct sk_buff *pkt);
extern int
brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
uint write, uint func, uint addr,
struct sk_buff_head *pktq);
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
@ -291,4 +280,8 @@ extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
wait_queue_head_t *wq);
extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
#endif /* _BRCM_SDH_H_ */

View File

@ -87,6 +87,27 @@ TRACE_EVENT(brcmf_hexdump,
TP_printk("hexdump [length=%lu]", __entry->len)
);
TRACE_EVENT(brcmf_bdchdr,
TP_PROTO(void *data),
TP_ARGS(data),
TP_STRUCT__entry(
__field(u8, flags)
__field(u8, prio)
__field(u8, flags2)
__field(u32, siglen)
__dynamic_array(u8, signal, *((u8 *)data + 3) * 4)
),
TP_fast_assign(
__entry->flags = *(u8 *)data;
__entry->prio = *((u8 *)data + 1);
__entry->flags2 = *((u8 *)data + 2);
__entry->siglen = *((u8 *)data + 3) * 4;
memcpy(__get_dynamic_array(signal),
(u8 *)data + 4, __entry->siglen);
),
TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
);
#ifdef CONFIG_BRCM_TRACING
#undef TRACE_INCLUDE_PATH

View File

@ -82,6 +82,7 @@ struct brcmf_usbdev_info {
int tx_high_watermark;
int tx_freecount;
bool tx_flowblock;
spinlock_t tx_flowblock_lock;
struct brcmf_usbreq *tx_reqs;
struct brcmf_usbreq *rx_reqs;
@ -411,6 +412,7 @@ static void brcmf_usb_tx_complete(struct urb *urb)
{
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
unsigned long flags;
brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
req->skb);
@ -419,11 +421,13 @@ static void brcmf_usb_tx_complete(struct urb *urb)
brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
req->skb = NULL;
brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
devinfo->tx_flowblock) {
brcmf_txflowblock(devinfo->dev, false);
devinfo->tx_flowblock = false;
}
spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
}
static void brcmf_usb_rx_complete(struct urb *urb)
@ -568,6 +572,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
struct brcmf_usbreq *req;
int ret;
unsigned long flags;
brcmf_dbg(USB, "Enter, skb=%p\n", skb);
if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
@ -599,11 +604,13 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
goto fail;
}
spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
!devinfo->tx_flowblock) {
brcmf_txflowblock(dev, true);
devinfo->tx_flowblock = true;
}
spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
return 0;
fail:
@ -1164,6 +1171,7 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
/* Initialize the spinlocks */
spin_lock_init(&devinfo->qlock);
spin_lock_init(&devinfo->tx_flowblock_lock);
INIT_LIST_HEAD(&devinfo->rx_freeq);
INIT_LIST_HEAD(&devinfo->rx_postq);

View File

@ -3982,6 +3982,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_fil_af_params_le *af_params;
bool ack;
s32 chan_nr;
u32 freq;
brcmf_dbg(TRACE, "Enter\n");
@ -3994,6 +3995,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
return -EPERM;
}
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
/* Right now the only reason to get a probe response */
/* is for p2p listen response or for p2p GO from */
@ -4009,7 +4012,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
ie_offset = DOT11_MGMT_HDR_LEN +
DOT11_BCN_PRB_FIXED_LEN;
ie_len = len - ie_offset;
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
err = brcmf_vif_set_mgmt_ie(vif,
@ -4033,16 +4035,22 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
/* Add the length exepted for 802.11 header */
action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
/* Add the channel */
chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
/* Add the channel. Use the one specified as parameter if any or
* the current one (got from the firmware) otherwise
*/
if (chan)
freq = chan->center_freq;
else
brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
&freq);
chan_nr = ieee80211_frequency_to_channel(freq);
af_params->channel = cpu_to_le32(chan_nr);
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
le16_to_cpu(action_frame->len));
brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
*cookie, le16_to_cpu(action_frame->len),
chan->center_freq);
*cookie, le16_to_cpu(action_frame->len), freq);
ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
af_params);

View File

@ -245,6 +245,14 @@ module_param(cw1200_ba_tx_tids, int, 0644);
MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs");
MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs");
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support cw1200_wowlan_support = {
/* Support only for limited wowlan functionalities */
.flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
};
#endif
static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
const bool have_5ghz)
{
@ -289,10 +297,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
BIT(NL80211_IFTYPE_P2P_GO);
#ifdef CONFIG_PM
/* Support only for limited wowlan functionalities */
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY |
WIPHY_WOWLAN_DISCONNECT;
hw->wiphy->wowlan.n_patterns = 0;
hw->wiphy->wowlan = &cw1200_wowlan_support;
#endif
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;

View File

@ -3727,7 +3727,8 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* 5. Setup HW Constants
* ********************/
/* Device-specific setup */
if (il3945_hw_set_hw_params(il)) {
err = il3945_hw_set_hw_params(il);
if (err) {
IL_ERR("failed to set hw settings\n");
goto out_eeprom_free;
}

View File

@ -331,6 +331,19 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
return;
}
/*
* Firmware will not transmit frame on passive channel, if it not yet
* received some valid frame on that channel. When this error happen
* we have to wait until firmware will unblock itself i.e. when we
* note received beacon or other frame. We unblock queues in
* il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
*/
if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
il->iw_mode == NL80211_IFTYPE_STATION) {
il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
D_INFO("Stopped queues - RX waiting on passive channel\n");
}
txq->time_stamp = jiffies;
info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
ieee80211_tx_info_clear_status(info);
@ -488,6 +501,11 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
return;
}
if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
D_INFO("Woke queues - frame received on passive channel\n");
}
skb = dev_alloc_skb(128);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");

View File

@ -588,6 +588,11 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
return;
}
if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
D_INFO("Woke queues - frame received on passive channel\n");
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!il->cfg->mod_params->sw_crypto &&
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
@ -2806,6 +2811,19 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
return;
}
/*
* Firmware will not transmit frame on passive channel, if it not yet
* received some valid frame on that channel. When this error happen
* we have to wait until firmware will unblock itself i.e. when we
* note received beacon or other frame. We unblock queues in
* il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
*/
if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
il->iw_mode == NL80211_IFTYPE_STATION) {
il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
D_INFO("Stopped queues - RX waiting on passive channel\n");
}
spin_lock_irqsave(&il->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
@ -5741,7 +5759,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |

View File

@ -5306,6 +5306,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_BSSID) {
D_MAC80211("BSSID %pM\n", bss_conf->bssid);
/*
* On passive channel we wait with blocked queues to see if
* there is traffic on that channel. If no frame will be
* received (what is very unlikely since scan detects AP on
* that channel, but theoretically possible), mac80211 associate
* procedure will time out and mac80211 will call us with NULL
* bssid. We have to unblock queues on such condition.
*/
if (is_zero_ether_addr(bss_conf->bssid))
il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
/*
* If there is currently a HW scan going on in the background,
* then we need to cancel it, otherwise sometimes we are not

View File

@ -1299,6 +1299,8 @@ struct il_priv {
/* queue refcounts */
#define IL_MAX_HW_QUEUES 32
unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
#define IL_STOP_REASON_PASSIVE 0
unsigned long stop_reason;
/* for each AC */
atomic_t queue_stop_count[4];
@ -2256,6 +2258,19 @@ il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
txq->swq_id = (hwq << 2) | ac;
}
static inline void
_il_wake_queue(struct il_priv *il, u8 ac)
{
if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
ieee80211_wake_queue(il->hw, ac);
}
static inline void
_il_stop_queue(struct il_priv *il, u8 ac)
{
if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
ieee80211_stop_queue(il->hw, ac);
}
static inline void
il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
{
@ -2264,8 +2279,7 @@ il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
u8 hwq = (queue >> 2) & 0x1f;
if (test_and_clear_bit(hwq, il->queue_stopped))
if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
ieee80211_wake_queue(il->hw, ac);
_il_wake_queue(il, ac);
}
static inline void
@ -2276,8 +2290,27 @@ il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
u8 hwq = (queue >> 2) & 0x1f;
if (!test_and_set_bit(hwq, il->queue_stopped))
if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
ieee80211_stop_queue(il->hw, ac);
_il_stop_queue(il, ac);
}
static inline void
il_wake_queues_by_reason(struct il_priv *il, int reason)
{
u8 ac;
if (test_and_clear_bit(reason, &il->stop_reason))
for (ac = 0; ac < 4; ac++)
_il_wake_queue(il, ac);
}
static inline void
il_stop_queues_by_reason(struct il_priv *il, int reason)
{
u8 ac;
if (!test_and_set_bit(reason, &il->stop_reason))
for (ac = 0; ac < 4; ac++)
_il_stop_queue(il, ac);
}
#ifdef ieee80211_stop_queue

View File

@ -7,14 +7,16 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
iwlwifi-objs += $(iwlwifi-m)
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/

View File

@ -915,6 +915,9 @@ struct iwl_priv {
__le64 replay_ctr;
__le16 last_seq_ctl;
bool have_rekey_data;
#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan_support;
#endif
/* device_pointers: pointers to ucode event tables */
struct {

Some files were not shown because too many files have changed in this diff Show More