Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6

This commit is contained in:
David S. Miller 2009-09-09 17:33:45 -07:00
commit ea6a634ef7
52 changed files with 1677 additions and 543 deletions

View File

@ -6,6 +6,35 @@ be removed from this file.
---------------------------
What: PRISM54
When: 2.6.34
Why: prism54 FullMAC PCI / Cardbus devices used to be supported only by the
prism54 wireless driver. After Intersil stopped selling these
devices in preference for the newer more flexible SoftMAC devices
a SoftMAC device driver was required and prism54 did not support
them. The p54pci driver now exists and has been present in the kernel for
a while. This driver supports both SoftMAC devices and FullMAC devices.
The main difference between these devices was the amount of memory which
could be used for the firmware. The SoftMAC devices support a smaller
amount of memory. Because of this the SoftMAC firmware fits into FullMAC
devices's memory. p54pci supports not only PCI / Cardbus but also USB
and SPI. Since p54pci supports all devices prism54 supports
you will have a conflict. I'm not quite sure how distributions are
handling this conflict right now. prism54 was kept around due to
claims users may experience issues when using the SoftMAC driver.
Time has passed users have not reported issues. If you use prism54
and for whatever reason you cannot use p54pci please let us know!
E-mail us at: linux-wireless@vger.kernel.org
For more information see the p54 wiki page:
http://wireless.kernel.org/en/users/Drivers/p54
Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
What: IRQF_SAMPLE_RANDOM
Check: IRQF_SAMPLE_RANDOM
When: July 2009

View File

@ -876,6 +876,7 @@ M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
M: Bob Copeland <me@bobcopeland.com>
L: linux-wireless@vger.kernel.org
L: ath5k-devel@lists.ath5k.org
W: http://wireless.kernel.org/en/users/Drivers/ath5k
S: Maintained
F: drivers/net/wireless/ath/ath5k/
@ -887,6 +888,7 @@ M: Vasanthakumar Thiagarajan <vasanth@atheros.com>
M: Senthil Balasubramanian <senthilkumar@atheros.com>
L: linux-wireless@vger.kernel.org
L: ath9k-devel@lists.ath9k.org
W: http://wireless.kernel.org/en/users/Drivers/ath9k
S: Supported
F: drivers/net/wireless/ath/ath9k/

View File

@ -1303,10 +1303,13 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)
& MDIO_CTRL_MAXF_MASK)));
break;
case SSB_BUSTYPE_PCI:
case SSB_BUSTYPE_PCMCIA:
bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
(0x0d & MDIO_CTRL_MAXF_MASK)));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
WARN_ON(1); /* A device with this bus does not exist. */
break;
}
br32(bp, B44_MDIO_CTRL);
@ -1764,10 +1767,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
case SSB_BUSTYPE_PCI:
strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SSB:
strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
WARN_ON(1); /* A device with this bus does not exist. */
break;
}
}

View File

@ -275,51 +275,26 @@ config PCMCIA_WL3501
micro support for ethtool.
config PRISM54
tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus'
tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
depends on PCI && EXPERIMENTAL && WLAN_80211
select WIRELESS_EXT
select FW_LOADER
---help---
Enable PCI and Cardbus support for the following chipset based cards:
This enables support for FullMAC PCI/Cardbus prism54 devices. This
driver is now deprecated in favor for the SoftMAC driver, p54pci.
p54pci supports FullMAC PCI/Cardbus devices as well. For details on
the scheduled removal of this driver on the kernel see the feature
removal schedule:
ISL3880 - Prism GT 802.11 b/g
ISL3877 - Prism Indigo 802.11 a
ISL3890 - Prism Duette 802.11 a/b/g
For a complete list of supported cards visit <http://prism54.org>.
Here is the latest confirmed list of supported cards:
Documentation/feature-removal-schedule.txt
3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72 (version 1)
Allnet ALL0271 PCI Card
Compex WL54G Cardbus Card
Corega CG-WLCB54GT Cardbus Card
D-Link Air Plus Xtreme G A1 Cardbus Card aka DWL-g650
I-O Data WN-G54/CB Cardbus Card
Kobishi XG-300 aka Z-Com Cardbus Card
Netgear WG511 Cardbus Card
Ovislink WL-5400PCI PCI Card
Peabird WLG-PCI PCI Card
Sitecom WL-100i Cardbus Card
Sitecom WL-110i PCI Card
SMC2802W - EZ Connect g 2.4GHz 54 Mbps Wireless PCI Card
SMC2835W - EZ Connect g 2.4GHz 54 Mbps Wireless Cardbus Card
SMC2835W-V2 - EZ Connect g 2.4GHz 54 Mbps Wireless Cardbus Card
Z-Com XG-900 PCI Card
Zyxel G-100 Cardbus Card
For more information refer to the p54 wiki:
If you enable this you will need a firmware file as well.
You will need to copy this to /usr/lib/hotplug/firmware/isl3890.
You can get this non-GPL'd firmware file from the Prism54 project page:
<http://prism54.org>
You will also need the /etc/hotplug/firmware.agent script from
a current hotplug package.
http://wireless.kernel.org/en/users/Drivers/p54
Note: You need a motherboard with DMA support to use any of these cards
If you want to compile the driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>.
The module will be called prism54.
Note: You need a motherboard with DMA support to use any of these cards
When built as module you get the module prism54
config USB_ZD1201
tristate "USB ZD1201 based Wireless device support"

View File

@ -396,6 +396,136 @@ static struct ar9170_phy_init ar5416_phy_init[] = {
{ 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
};
/*
* look up a certain register in ar5416_phy_init[] and return the init. value
* for the band and bandwidth given. Return 0 if register address not found.
*/
static u32 ar9170_get_default_phy_reg_val(u32 reg, bool is_2ghz, bool is_40mhz)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
if (ar5416_phy_init[i].reg != reg)
continue;
if (is_2ghz) {
if (is_40mhz)
return ar5416_phy_init[i]._2ghz_40;
else
return ar5416_phy_init[i]._2ghz_20;
} else {
if (is_40mhz)
return ar5416_phy_init[i]._5ghz_40;
else
return ar5416_phy_init[i]._5ghz_20;
}
}
return 0;
}
/*
* initialize some phy regs from eeprom values in modal_header[]
* acc. to band and bandwith
*/
static int ar9170_init_phy_from_eeprom(struct ar9170 *ar,
bool is_2ghz, bool is_40mhz)
{
static const u8 xpd2pd[16] = {
0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2,
0x2, 0x3, 0x7, 0x2, 0xB, 0x2, 0x2, 0x2
};
u32 defval, newval;
/* pointer to the modal_header acc. to band */
struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz];
ar9170_regwrite_begin(ar);
/* ant common control (index 0) */
newval = le32_to_cpu(m->antCtrlCommon);
ar9170_regwrite(0x1c5964, newval);
/* ant control chain 0 (index 1) */
newval = le32_to_cpu(m->antCtrlChain[0]);
ar9170_regwrite(0x1c5960, newval);
/* ant control chain 2 (index 2) */
newval = le32_to_cpu(m->antCtrlChain[1]);
ar9170_regwrite(0x1c7960, newval);
/* SwSettle (index 3) */
if (!is_40mhz) {
defval = ar9170_get_default_phy_reg_val(0x1c5844,
is_2ghz, is_40mhz);
newval = (defval & ~0x3f80) |
((m->switchSettling & 0x7f) << 7);
ar9170_regwrite(0x1c5844, newval);
}
/* adcDesired, pdaDesired (index 4) */
defval = ar9170_get_default_phy_reg_val(0x1c5850, is_2ghz, is_40mhz);
newval = (defval & ~0xffff) | ((u8)m->pgaDesiredSize << 8) |
((u8)m->adcDesiredSize);
ar9170_regwrite(0x1c5850, newval);
/* TxEndToXpaOff, TxFrameToXpaOn (index 5) */
defval = ar9170_get_default_phy_reg_val(0x1c5834, is_2ghz, is_40mhz);
newval = (m->txEndToXpaOff << 24) | (m->txEndToXpaOff << 16) |
(m->txFrameToXpaOn << 8) | m->txFrameToXpaOn;
ar9170_regwrite(0x1c5834, newval);
/* TxEndToRxOn (index 6) */
defval = ar9170_get_default_phy_reg_val(0x1c5828, is_2ghz, is_40mhz);
newval = (defval & ~0xff0000) | (m->txEndToRxOn << 16);
ar9170_regwrite(0x1c5828, newval);
/* thresh62 (index 7) */
defval = ar9170_get_default_phy_reg_val(0x1c8864, is_2ghz, is_40mhz);
newval = (defval & ~0x7f000) | (m->thresh62 << 12);
ar9170_regwrite(0x1c8864, newval);
/* tx/rx attenuation chain 0 (index 8) */
defval = ar9170_get_default_phy_reg_val(0x1c5848, is_2ghz, is_40mhz);
newval = (defval & ~0x3f000) | ((m->txRxAttenCh[0] & 0x3f) << 12);
ar9170_regwrite(0x1c5848, newval);
/* tx/rx attenuation chain 2 (index 9) */
defval = ar9170_get_default_phy_reg_val(0x1c7848, is_2ghz, is_40mhz);
newval = (defval & ~0x3f000) | ((m->txRxAttenCh[1] & 0x3f) << 12);
ar9170_regwrite(0x1c7848, newval);
/* tx/rx margin chain 0 (index 10) */
defval = ar9170_get_default_phy_reg_val(0x1c620c, is_2ghz, is_40mhz);
newval = (defval & ~0xfc0000) | ((m->rxTxMarginCh[0] & 0x3f) << 18);
/* bsw margin chain 0 for 5GHz only */
if (!is_2ghz)
newval = (newval & ~0x3c00) | ((m->bswMargin[0] & 0xf) << 10);
ar9170_regwrite(0x1c620c, newval);
/* tx/rx margin chain 2 (index 11) */
defval = ar9170_get_default_phy_reg_val(0x1c820c, is_2ghz, is_40mhz);
newval = (defval & ~0xfc0000) | ((m->rxTxMarginCh[1] & 0x3f) << 18);
ar9170_regwrite(0x1c820c, newval);
/* iqCall, iqCallq chain 0 (index 12) */
defval = ar9170_get_default_phy_reg_val(0x1c5920, is_2ghz, is_40mhz);
newval = (defval & ~0x7ff) | (((u8)m->iqCalICh[0] & 0x3f) << 5) |
((u8)m->iqCalQCh[0] & 0x1f);
ar9170_regwrite(0x1c5920, newval);
/* iqCall, iqCallq chain 2 (index 13) */
defval = ar9170_get_default_phy_reg_val(0x1c7920, is_2ghz, is_40mhz);
newval = (defval & ~0x7ff) | (((u8)m->iqCalICh[1] & 0x3f) << 5) |
((u8)m->iqCalQCh[1] & 0x1f);
ar9170_regwrite(0x1c7920, newval);
/* xpd gain mask (index 14) */
defval = ar9170_get_default_phy_reg_val(0x1c6258, is_2ghz, is_40mhz);
newval = (defval & ~0xf0000) | (xpd2pd[m->xpdGain & 0xf] << 16);
ar9170_regwrite(0x1c6258, newval);
ar9170_regwrite_finish();
return ar9170_regwrite_result();
}
int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
{
int i, err;
@ -426,7 +556,9 @@ int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
if (err)
return err;
/* XXX: use EEPROM data here! */
err = ar9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz);
if (err)
return err;
err = ar9170_init_power_cal(ar);
if (err)
@ -987,6 +1119,282 @@ static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
#undef SHIFT
}
static u8 ar9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array)
{
int i;
for (i = 0; i < 3; i++)
if (x <= x_array[i + 1])
break;
return ar9170_interpolate_u8(x,
x_array[i],
y_array[i],
x_array[i + 1],
y_array[i + 1]);
}
static int ar9170_set_freq_cal_data(struct ar9170 *ar,
struct ieee80211_channel *channel)
{
u8 *cal_freq_pier;
u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
int chain, idx, i;
u8 f;
switch (channel->band) {
case IEEE80211_BAND_2GHZ:
f = channel->center_freq - 2300;
cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
i = AR5416_NUM_2G_CAL_PIERS - 1;
break;
case IEEE80211_BAND_5GHZ:
f = (channel->center_freq - 4800) / 5;
cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
i = AR5416_NUM_5G_CAL_PIERS - 1;
break;
default:
return -EINVAL;
break;
}
for (; i >= 0; i--) {
if (cal_freq_pier[i] != 0xff)
break;
}
if (i < 0)
return -EINVAL;
idx = ar9170_find_freq_idx(i, cal_freq_pier, f);
ar9170_regwrite_begin(ar);
for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) {
for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) {
struct ar9170_calibration_data_per_freq *cal_pier_data;
int j;
switch (channel->band) {
case IEEE80211_BAND_2GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_2G[chain][idx];
break;
case IEEE80211_BAND_5GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_5G[chain][idx];
break;
default:
return -EINVAL;
}
for (j = 0; j < 2; j++) {
vpds[j][i] = ar9170_interpolate_u8(f,
cal_freq_pier[idx],
cal_pier_data->vpd_pdg[j][i],
cal_freq_pier[idx + 1],
cal_pier_data[1].vpd_pdg[j][i]);
pwrs[j][i] = ar9170_interpolate_u8(f,
cal_freq_pier[idx],
cal_pier_data->pwr_pdg[j][i],
cal_freq_pier[idx + 1],
cal_pier_data[1].pwr_pdg[j][i]) / 2;
}
}
for (i = 0; i < 76; i++) {
u32 phy_data;
u8 tmp;
if (i < 25) {
tmp = ar9170_interpolate_val(i, &pwrs[0][0],
&vpds[0][0]);
} else {
tmp = ar9170_interpolate_val(i - 12,
&pwrs[1][0],
&vpds[1][0]);
}
phy_data |= tmp << ((i & 3) << 3);
if ((i & 3) == 3) {
ar9170_regwrite(0x1c6280 + chain * 0x1000 +
(i & ~3), phy_data);
phy_data = 0;
}
}
for (i = 19; i < 32; i++)
ar9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2),
0x0);
}
ar9170_regwrite_finish();
return ar9170_regwrite_result();
}
static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
struct ar9170_calctl_edges edges[],
u32 freq)
{
/* TODO: move somewhere else */
#define AR5416_MAX_RATE_POWER 63
int i;
u8 rc = AR5416_MAX_RATE_POWER;
u8 f;
if (freq < 3000)
f = freq - 2300;
else
f = (freq - 4800) / 5;
for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
if (edges[i].channel == 0xff)
break;
if (f == edges[i].channel) {
/* exact freq match */
rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS;
break;
}
if (i > 0 && f < edges[i].channel) {
if (f > edges[i-1].channel &&
edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
/* lower channel has the inband flag set */
rc = edges[i-1].power_flags &
~AR9170_CALCTL_EDGE_FLAGS;
}
break;
}
}
if (i == AR5416_NUM_BAND_EDGES) {
if (f > edges[i-1].channel &&
edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
/* lower channel has the inband flag set */
rc = edges[i-1].power_flags &
~AR9170_CALCTL_EDGE_FLAGS;
}
}
return rc;
}
/* calculate the conformance test limits and apply them to ar->power*
* (derived from otus hal/hpmain.c, line 3706 ff.)
*/
static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
{
u8 ctl_grp; /* CTL group */
u8 ctl_idx; /* CTL index */
int i, j;
struct ctl_modes {
u8 ctl_mode;
u8 max_power;
u8 *pwr_cal_data;
int pwr_cal_len;
} *modes;
/* order is relevant in the mode_list_*: we fall back to the
* lower indices if any mode is missed in the EEPROM.
*/
struct ctl_modes mode_list_2ghz[] = {
{ CTL_11B, 0, ar->power_2G_cck, 4 },
{ CTL_11G, 0, ar->power_2G_ofdm, 4 },
{ CTL_2GHT20, 0, ar->power_2G_ht20, 8 },
{ CTL_2GHT40, 0, ar->power_2G_ht40, 8 },
};
struct ctl_modes mode_list_5ghz[] = {
{ CTL_11A, 0, ar->power_5G_leg, 4 },
{ CTL_5GHT20, 0, ar->power_5G_ht20, 8 },
{ CTL_5GHT40, 0, ar->power_5G_ht40, 8 },
};
int nr_modes;
#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
/* TODO: investigate the differences between OTUS'
* hpreg.c::zfHpGetRegulatoryDomain() and
* ath/regd.c::ath_regd_get_band_ctl() -
* e.g. for FCC3_WORLD the OTUS procedure
* always returns CTL_FCC, while the one in ath/ delivers
* CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
*/
ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
ar->hw->conf.channel->band);
/* ctl group not found - either invalid band (NO_CTL) or ww roaming */
if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
ctl_grp = CTL_FCC;
if (ctl_grp != CTL_FCC)
/* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
return;
if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
modes = mode_list_2ghz;
nr_modes = ARRAY_SIZE(mode_list_2ghz);
} else {
modes = mode_list_5ghz;
nr_modes = ARRAY_SIZE(mode_list_5ghz);
}
for (i = 0; i < nr_modes; i++) {
u8 c = ctl_grp | modes[i].ctl_mode;
for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++)
if (c == ar->eeprom.ctl_index[ctl_idx])
break;
if (ctl_idx < AR5416_NUM_CTLS) {
int f_off = 0;
/* adjust freq for 40MHz */
if (modes[i].ctl_mode == CTL_2GHT40 ||
modes[i].ctl_mode == CTL_5GHT40) {
if (bw == AR9170_BW_40_BELOW)
f_off = -10;
else
f_off = 10;
}
modes[i].max_power =
ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1),
freq+f_off);
/* TODO: check if the regulatory max. power is
* controlled by cfg80211 for DFS
* (hpmain applies it to max_power itself for DFS freq)
*/
} else {
/* Workaround in otus driver, hpmain.c, line 3906:
* if no data for 5GHT20 are found, take the
* legacy 5G value.
* We extend this here to fallback from any other *HT or
* 11G, too.
*/
int k = i;
modes[i].max_power = AR5416_MAX_RATE_POWER;
while (k-- > 0) {
if (modes[k].max_power !=
AR5416_MAX_RATE_POWER) {
modes[i].max_power = modes[k].max_power;
break;
}
}
}
/* apply max power to pwr_cal_data (ar->power_*) */
for (j = 0; j < modes[i].pwr_cal_len; j++) {
modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j],
modes[i].max_power);
}
}
#undef EDGES
}
static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
{
struct ar9170_calibration_target_power_legacy *ctpl;
@ -1089,6 +1497,12 @@ static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
ctph[idx + 1].power[n]);
}
/* calc. conformance test limits and apply to ar->power*[] */
ar9170_calc_ctl(ar, freq, bw);
/* TODO: (heavy clip) regulatory domain power level fine-tuning. */
/* set ACK/CTS TX power */
ar9170_regwrite_begin(ar);
@ -1207,6 +1621,10 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
if (err)
return err;
err = ar9170_set_freq_cal_data(ar, channel);
if (err)
return err;
err = ar9170_set_power_cal(ar, channel->center_freq, bw);
if (err)
return err;

View File

@ -119,17 +119,15 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->bus_ops = &ath_ahb_bus_ops;
sc->irq = irq;
ret = ath_init_device(AR5416_AR9100_DEVID, sc);
if (ret != 0) {
dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
ret = -ENODEV;
ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0);
if (ret) {
dev_err(&pdev->dev, "failed to initialize device\n");
goto err_free_hw;
}
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed, err=%d\n", ret);
ret = -EIO;
dev_err(&pdev->dev, "request_irq failed\n");
goto err_detach;
}

View File

@ -658,7 +658,7 @@ extern struct ieee80211_ops ath9k_ops;
irqreturn_t ath_isr(int irq, void *dev);
void ath_cleanup(struct ath_softc *sc);
int ath_init_device(u16 devid, struct ath_softc *sc);
int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid);
void ath_detach(struct ath_softc *sc);
const char *ath_mac_bb_name(u32 mac_bb_version);
const char *ath_rf_name(u16 rf_version);

View File

@ -19,6 +19,29 @@
static const struct ath_btcoex_config ath_bt_config = { 0, true, true,
ATH_BT_COEX_MODE_SLOTTED, true, true, 2, 5, true };
static const u16 ath_subsysid_tbl[] = {
AR9280_COEX2WIRE_SUBSYSID,
AT9285_COEX3WIRE_SA_SUBSYSID,
AT9285_COEX3WIRE_DA_SUBSYSID
};
/*
* Checks the subsystem id of the device to see if it
* supports btcoex
*/
bool ath_btcoex_supported(u16 subsysid)
{
int i;
if (!subsysid)
return false;
for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++)
if (subsysid == ath_subsysid_tbl[i])
return true;
return false;
}
/*
* Detects if there is any priority bt traffic

View File

@ -19,6 +19,7 @@
#define ATH_WLANACTIVE_GPIO 5
#define ATH_BTACTIVE_GPIO 6
#define ATH_BTPRIORITY_GPIO 7
#define ATH_BTCOEX_DEF_BT_PERIOD 45
#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
@ -79,6 +80,7 @@ struct ath_btcoex_info {
struct ath_gen_timer *no_stomp_timer; /*Timer for no BT stomping*/
};
bool ath_btcoex_supported(u16 subsysid);
int ath9k_hw_btcoex_init(struct ath_hw *ah);
void ath9k_hw_btcoex_enable(struct ath_hw *ah);
void ath9k_hw_btcoex_disable(struct ath_hw *ah);

View File

@ -16,14 +16,11 @@
#include <linux/io.h>
#include <asm/unaligned.h>
#include <linux/pci.h>
#include "ath9k.h"
#include "initvals.h"
static int btcoex_enable;
module_param(btcoex_enable, bool, 0);
MODULE_PARM_DESC(btcoex_enable, "Enable Bluetooth coexistence support");
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
@ -3689,14 +3686,17 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->num_antcfg_2ghz =
ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
if (AR_SREV_9280_10_OR_LATER(ah) && btcoex_enable) {
if (AR_SREV_9280_10_OR_LATER(ah) &&
ath_btcoex_supported(ah->hw_version.subsysid)) {
btcoex_info->btactive_gpio = ATH_BTACTIVE_GPIO;
btcoex_info->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
if (AR_SREV_9285(ah))
if (AR_SREV_9285(ah)) {
btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_3WIRE;
else
btcoex_info->btpriority_gpio = ATH_BTPRIORITY_GPIO;
} else {
btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_2WIRE;
}
} else {
btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_NONE;
}
@ -3967,7 +3967,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
{
u32 phybits;
REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
if (bits & ATH9K_RX_FILTER_PHYRADAR)
phybits |= AR_PHY_ERR_RADAR;
@ -4297,3 +4298,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
timer->trigger(timer->arg);
}
}
/*
* Primitive to disable ASPM
*/
void ath_pcie_aspm_disable(struct ath_softc *sc)
{
struct pci_dev *pdev = to_pci_dev(sc->dev);
u8 aspm;
pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}

View File

@ -45,6 +45,10 @@
#define AR5416_DEVID_AR9287_PCI 0x002D
#define AR5416_DEVID_AR9287_PCIE 0x002E
#define AR9280_COEX2WIRE_SUBSYSID 0x309b
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
/* Register read/write primitives */
#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val))
#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg))
@ -390,6 +394,7 @@ struct ath9k_hw_version {
u16 phyRev;
u16 analog5GhzRev;
u16 analog2GhzRev;
u16 subsysid;
};
/* Generic TSF timer definitions */
@ -665,4 +670,9 @@ void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
void ath_gen_timer_isr(struct ath_hw *hw);
u32 ath9k_hw_gettsf32(struct ath_hw *ah);
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
void ath_pcie_aspm_disable(struct ath_softc *sc);
#endif

View File

@ -568,6 +568,7 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
ATH9K_RX_FILTER_PHYERR = 0x00000100,
ATH9K_RX_FILTER_MYBEACON = 0x00000200,
ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
ATH9K_RX_FILTER_PSPOLL = 0x00004000,
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,

View File

@ -1310,7 +1310,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
* to allow the separation between hardware specific
* variables (now in ath_hw) and driver specific variables.
*/
static int ath_init_softc(u16 devid, struct ath_softc *sc)
static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
{
struct ath_hw *ah = NULL;
int r = 0, i;
@ -1348,6 +1348,7 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc)
ah->ah_sc = sc;
ah->hw_version.devid = devid;
ah->hw_version.subsysid = subsysid;
sc->sc_ah = ah;
r = ath9k_hw_init(ah);
@ -1577,7 +1578,7 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
}
/* Device driver core initialization */
int ath_init_device(u16 devid, struct ath_softc *sc)
int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid)
{
struct ieee80211_hw *hw = sc->hw;
int error = 0, i;
@ -1585,7 +1586,7 @@ int ath_init_device(u16 devid, struct ath_softc *sc)
DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
error = ath_init_softc(devid, sc);
error = ath_init_softc(devid, sc, subsysid);
if (error != 0)
return error;
@ -1879,7 +1880,7 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (chan->band == IEEE80211_BAND_2GHZ) {
ichan->chanmode = CHANNEL_G;
ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
} else {
ichan->chanmode = CHANNEL_A;
ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
@ -2010,6 +2011,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(sc->sc_ah);
ath_pcie_aspm_disable(sc);
if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
ath_btcoex_timer_resume(sc, &sc->btcoex_info);
}
@ -2433,7 +2435,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
ath9k_ps_restore(sc);
DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", rfilt);
}
static void ath9k_sta_notify(struct ieee80211_hw *hw,

View File

@ -35,8 +35,7 @@ static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz)
{
u8 u8tmp;
pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE,
(u8 *)&u8tmp);
pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
/*
@ -89,6 +88,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
u16 subsysid;
u32 val;
int ret = 0;
struct ath_hw *ah;
@ -160,8 +160,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
if (!hw) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
goto bad2;
}
@ -178,17 +179,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = mem;
sc->bus_ops = &ath_pci_bus_ops;
if (ath_init_device(id->device, sc) != 0) {
ret = -ENODEV;
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
ret = ath_init_device(id->device, sc, subsysid);
if (ret) {
dev_err(&pdev->dev, "failed to initialize device\n");
goto bad3;
}
/* setup interrupt service routine */
if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
printk(KERN_ERR "%s: request_irq failed\n",
wiphy_name(hw->wiphy));
ret = -EIO;
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
goto bad4;
}

View File

@ -423,6 +423,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (sc->rx.rxfilter & FIF_PSPOLL)
rfilt |= ATH9K_RX_FILTER_PSPOLL;
if (conf_is_ht(&sc->hw->conf))
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
/* TODO: only needed if more than one BSSID is in use in
* station/adhoc mode */

View File

@ -1325,7 +1325,6 @@ enum {
#define AR_CFP_VAL 0x0000FFFF
#define AR_RX_FILTER 0x803C
#define AR_RX_COMPR_BAR 0x00000400
#define AR_MCAST_FIL0 0x8040
#define AR_MCAST_FIL1 0x8044

View File

@ -22,6 +22,12 @@
#include "ath.h"
enum ctl_group {
CTL_FCC = 0x10,
CTL_MKK = 0x40,
CTL_ETSI = 0x30,
};
#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff

View File

@ -154,12 +154,6 @@ enum EnumRd {
DEBUG_REG_DMN = 0x01ff,
};
enum ctl_group {
CTL_FCC = 0x10,
CTL_MKK = 0x40,
CTL_ETSI = 0x30,
};
/* Regpair to CTL band mapping */
static struct reg_dmn_pair_mapping regDomainPairs[] = {
/* regpair, 5 GHz CTL, 2 GHz CTL */

View File

@ -42,8 +42,8 @@ config B43_PCICORE_AUTOSELECT
default y
config B43_PCMCIA
bool "Broadcom 43xx PCMCIA device support (EXPERIMENTAL)"
depends on B43 && SSB_PCMCIAHOST_POSSIBLE && EXPERIMENTAL
bool "Broadcom 43xx PCMCIA device support"
depends on B43 && SSB_PCMCIAHOST_POSSIBLE
select SSB_PCMCIAHOST
---help---
Broadcom 43xx PCMCIA device support.

View File

@ -616,6 +616,12 @@ struct b43_wl {
/* Pointer to the ieee80211 hardware data structure */
struct ieee80211_hw *hw;
/* Global driver mutex. Every operation must run with this mutex locked. */
struct mutex mutex;
/* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ
* handler, only. This basically is just the IRQ mask register. */
spinlock_t hardirq_lock;
/* The number of queues that were registered with the mac80211 subsystem
* initially. This is a backup copy of hw->queues in case hw->queues has
* to be dynamically lowered at runtime (Firmware does not support QoS).
@ -623,16 +629,12 @@ struct b43_wl {
* from the mac80211 subsystem. */
u16 mac80211_initially_registered_queues;
struct mutex mutex;
spinlock_t irq_lock;
/* R/W lock for data transmission.
* Transmissions on 2+ queues can run concurrently, but somebody else
* might sync with TX by write_lock_irqsave()'ing. */
rwlock_t tx_lock;
/* Lock for LEDs access. */
spinlock_t leds_lock;
/* Lock for SHM access. */
spinlock_t shm_lock;
/* We can only have one operating interface (802.11 core)
* at a time. General information about this interface follows.
@ -665,8 +667,7 @@ struct b43_wl {
bool radiotap_enabled;
bool radio_enabled;
/* The beacon we are currently using (AP or IBSS mode).
* This beacon stuff is protected by the irq_lock. */
/* The beacon we are currently using (AP or IBSS mode). */
struct sk_buff *current_beacon;
bool beacon0_uploaded;
bool beacon1_uploaded;
@ -680,6 +681,11 @@ struct b43_wl {
* This is scheduled when we determine that the actual TX output
* power doesn't match what we want. */
struct work_struct txpower_adjust_work;
/* Packet transmit work */
struct work_struct tx_work;
/* Queue of packets to be transmitted. */
struct sk_buff_head tx_queue;
};
/* The type of the firmware file. */
@ -754,14 +760,6 @@ enum {
smp_wmb(); \
} while (0)
/* XXX--- HOW LOCKING WORKS IN B43 ---XXX
*
* You should always acquire both, wl->mutex and wl->irq_lock unless:
* - You don't need to acquire wl->irq_lock, if the interface is stopped.
* - You don't need to acquire wl->mutex in the IRQ handler, IRQ tasklet
* and packet TX path (and _ONLY_ there.)
*/
/* Data structure for one wireless device (802.11 core) */
struct b43_wldev {
struct ssb_device *dev;
@ -807,14 +805,12 @@ struct b43_wldev {
u32 dma_reason[6];
/* The currently active generic-interrupt mask. */
u32 irq_mask;
/* Link Quality calculation context. */
struct b43_noise_calculation noisecalc;
/* if > 0 MAC is suspended. if == 0 MAC is enabled. */
int mac_suspended;
/* Interrupt Service Routine tasklet (bottom-half) */
struct tasklet_struct isr_tasklet;
/* Periodic tasks */
struct delayed_work periodic_work;
unsigned int periodic_state;

View File

@ -46,8 +46,6 @@ struct b43_debugfs_fops {
struct file_operations fops;
/* Offset of struct b43_dfs_file in struct b43_dfsentry */
size_t file_struct_offset;
/* Take wl->irq_lock before calling read/write? */
bool take_irqlock;
};
static inline
@ -127,7 +125,6 @@ static int shm16write__write_file(struct b43_wldev *dev,
unsigned int routing, addr, mask, set;
u16 val;
int res;
unsigned long flags;
res = sscanf(buf, "0x%X 0x%X 0x%X 0x%X",
&routing, &addr, &mask, &set);
@ -144,15 +141,13 @@ static int shm16write__write_file(struct b43_wldev *dev,
if ((mask > 0xFFFF) || (set > 0xFFFF))
return -E2BIG;
spin_lock_irqsave(&dev->wl->shm_lock, flags);
if (mask == 0)
val = 0;
else
val = __b43_shm_read16(dev, routing, addr);
val = b43_shm_read16(dev, routing, addr);
val &= mask;
val |= set;
__b43_shm_write16(dev, routing, addr, val);
spin_unlock_irqrestore(&dev->wl->shm_lock, flags);
b43_shm_write16(dev, routing, addr, val);
return 0;
}
@ -206,7 +201,6 @@ static int shm32write__write_file(struct b43_wldev *dev,
unsigned int routing, addr, mask, set;
u32 val;
int res;
unsigned long flags;
res = sscanf(buf, "0x%X 0x%X 0x%X 0x%X",
&routing, &addr, &mask, &set);
@ -223,15 +217,13 @@ static int shm32write__write_file(struct b43_wldev *dev,
if ((mask > 0xFFFFFFFF) || (set > 0xFFFFFFFF))
return -E2BIG;
spin_lock_irqsave(&dev->wl->shm_lock, flags);
if (mask == 0)
val = 0;
else
val = __b43_shm_read32(dev, routing, addr);
val = b43_shm_read32(dev, routing, addr);
val &= mask;
val |= set;
__b43_shm_write32(dev, routing, addr, val);
spin_unlock_irqrestore(&dev->wl->shm_lock, flags);
b43_shm_write32(dev, routing, addr, val);
return 0;
}
@ -372,14 +364,12 @@ static ssize_t txstat_read_file(struct b43_wldev *dev,
{
struct b43_txstatus_log *log = &dev->dfsentry->txstatlog;
ssize_t count = 0;
unsigned long flags;
int i, idx;
struct b43_txstatus *stat;
spin_lock_irqsave(&log->lock, flags);
if (log->end < 0) {
fappend("Nothing transmitted, yet\n");
goto out_unlock;
goto out;
}
fappend("b43 TX status reports:\n\n"
"index | cookie | seq | phy_stat | frame_count | "
@ -409,13 +399,11 @@ static ssize_t txstat_read_file(struct b43_wldev *dev,
break;
i++;
}
out_unlock:
spin_unlock_irqrestore(&log->lock, flags);
out:
return count;
}
/* wl->irq_lock is locked */
static int restart_write_file(struct b43_wldev *dev,
const char *buf, size_t count)
{
@ -556,12 +544,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
goto out_unlock;
}
memset(buf, 0, bufsize);
if (dfops->take_irqlock) {
spin_lock_irq(&dev->wl->irq_lock);
ret = dfops->read(dev, buf, bufsize);
spin_unlock_irq(&dev->wl->irq_lock);
} else
ret = dfops->read(dev, buf, bufsize);
ret = dfops->read(dev, buf, bufsize);
if (ret <= 0) {
free_pages((unsigned long)buf, buforder);
err = ret;
@ -623,12 +606,7 @@ static ssize_t b43_debugfs_write(struct file *file,
err = -EFAULT;
goto out_freepage;
}
if (dfops->take_irqlock) {
spin_lock_irq(&dev->wl->irq_lock);
err = dfops->write(dev, buf, count);
spin_unlock_irq(&dev->wl->irq_lock);
} else
err = dfops->write(dev, buf, count);
err = dfops->write(dev, buf, count);
if (err)
goto out_freepage;
@ -641,7 +619,7 @@ out_unlock:
}
#define B43_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \
#define B43_DEBUGFS_FOPS(name, _read, _write) \
static struct b43_debugfs_fops fops_##name = { \
.read = _read, \
.write = _write, \
@ -652,20 +630,19 @@ out_unlock:
}, \
.file_struct_offset = offsetof(struct b43_dfsentry, \
file_##name), \
.take_irqlock = _take_irqlock, \
}
B43_DEBUGFS_FOPS(shm16read, shm16read__read_file, shm16read__write_file, 1);
B43_DEBUGFS_FOPS(shm16write, NULL, shm16write__write_file, 1);
B43_DEBUGFS_FOPS(shm32read, shm32read__read_file, shm32read__write_file, 1);
B43_DEBUGFS_FOPS(shm32write, NULL, shm32write__write_file, 1);
B43_DEBUGFS_FOPS(mmio16read, mmio16read__read_file, mmio16read__write_file, 1);
B43_DEBUGFS_FOPS(mmio16write, NULL, mmio16write__write_file, 1);
B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file, 1);
B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1);
B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0);
B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1);
B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0);
B43_DEBUGFS_FOPS(shm16read, shm16read__read_file, shm16read__write_file);
B43_DEBUGFS_FOPS(shm16write, NULL, shm16write__write_file);
B43_DEBUGFS_FOPS(shm32read, shm32read__read_file, shm32read__write_file);
B43_DEBUGFS_FOPS(shm32write, NULL, shm32write__write_file);
B43_DEBUGFS_FOPS(mmio16read, mmio16read__read_file, mmio16read__write_file);
B43_DEBUGFS_FOPS(mmio16write, NULL, mmio16write__write_file);
B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file);
B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file);
B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL);
B43_DEBUGFS_FOPS(restart, NULL, restart_write_file);
B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL);
bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
@ -738,7 +715,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
return;
}
log->end = -1;
spin_lock_init(&log->lock);
dev->dfsentry = e;
@ -822,7 +798,6 @@ void b43_debugfs_remove_device(struct b43_wldev *dev)
kfree(e);
}
/* Called with IRQs disabled. */
void b43_debugfs_log_txstat(struct b43_wldev *dev,
const struct b43_txstatus *status)
{
@ -834,14 +809,12 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
if (!e)
return;
log = &e->txstatlog;
spin_lock(&log->lock); /* IRQs are already disabled. */
i = log->end + 1;
if (i == B43_NR_LOGGED_TXSTATUS)
i = 0;
log->end = i;
cur = &(log->log[i]);
memcpy(cur, status, sizeof(*cur));
spin_unlock(&log->lock);
}
void b43_debugfs_init(void)

View File

@ -23,9 +23,10 @@ struct dentry;
#define B43_NR_LOGGED_TXSTATUS 100
struct b43_txstatus_log {
/* This structure is protected by wl->mutex */
struct b43_txstatus *log;
int end;
spinlock_t lock;
};
struct b43_dfs_file {

View File

@ -856,7 +856,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
} else
B43_WARN_ON(1);
}
spin_lock_init(&ring->lock);
#ifdef CONFIG_B43_DEBUG
ring->last_injected_overflow = jiffies;
#endif
@ -1315,7 +1314,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
struct b43_dmaring *ring;
struct ieee80211_hdr *hdr;
int err = 0;
unsigned long flags;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
hdr = (struct ieee80211_hdr *)skb->data;
@ -1331,8 +1329,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
dev, skb_get_queue_mapping(skb));
}
spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) {
@ -1343,7 +1339,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (b43_debug(dev, B43_DBG_DMAVERBOSE))
b43err(dev->wl, "Packet after queue stopped\n");
err = -ENOSPC;
goto out_unlock;
goto out;
}
if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
@ -1351,7 +1347,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
* full, but queues not stopped. */
b43err(dev->wl, "DMA queue overflow\n");
err = -ENOSPC;
goto out_unlock;
goto out;
}
/* Assign the queue number to the ring (if not already done before)
@ -1365,11 +1361,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
err = 0;
goto out_unlock;
goto out;
}
if (unlikely(err)) {
b43err(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
goto out;
}
ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
@ -1381,13 +1377,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
}
out_unlock:
spin_unlock_irqrestore(&ring->lock, flags);
out:
return err;
}
/* Called with IRQs disabled. */
void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status)
{
@ -1402,8 +1396,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
if (unlikely(!ring))
return;
spin_lock(&ring->lock); /* IRQs are already disabled. */
B43_WARN_ON(!ring->tx);
ops = ring->ops;
while (1) {
@ -1462,8 +1454,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
}
spin_unlock(&ring->lock);
}
void b43_dma_get_tx_stats(struct b43_wldev *dev,
@ -1471,17 +1461,14 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
{
const int nr_queues = dev->wl->hw->queues;
struct b43_dmaring *ring;
unsigned long flags;
int i;
for (i = 0; i < nr_queues; i++) {
ring = select_ring_by_priority(dev, i);
spin_lock_irqsave(&ring->lock, flags);
stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
stats[i].count = ring->nr_tx_packets;
spin_unlock_irqrestore(&ring->lock, flags);
}
}
@ -1592,22 +1579,14 @@ void b43_dma_rx(struct b43_dmaring *ring)
static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
{
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx);
ring->ops->tx_suspend(ring);
spin_unlock_irqrestore(&ring->lock, flags);
}
static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
{
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx);
ring->ops->tx_resume(ring);
spin_unlock_irqrestore(&ring->lock, flags);
}
void b43_dma_tx_suspend(struct b43_wldev *dev)

View File

@ -2,7 +2,6 @@
#define B43_DMA_H_
#include <linux/ieee80211.h>
#include <linux/spinlock.h>
#include "b43.h"
@ -244,8 +243,6 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
/* Lock, only used for TX. */
spinlock_t lock;
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */

View File

@ -291,7 +291,7 @@ static struct ieee80211_supported_band b43_band_2GHz = {
static void b43_wireless_core_exit(struct b43_wldev *dev);
static int b43_wireless_core_init(struct b43_wldev *dev);
static void b43_wireless_core_stop(struct b43_wldev *dev);
static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev);
static int b43_wireless_core_start(struct b43_wldev *dev);
static int b43_ratelimit(struct b43_wl *wl)
@ -390,7 +390,7 @@ static inline void b43_shm_control_word(struct b43_wldev *dev,
b43_write32(dev, B43_MMIO_SHM_CONTROL, control);
}
u32 __b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset)
u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset)
{
u32 ret;
@ -413,20 +413,7 @@ out:
return ret;
}
u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset)
{
struct b43_wl *wl = dev->wl;
unsigned long flags;
u32 ret;
spin_lock_irqsave(&wl->shm_lock, flags);
ret = __b43_shm_read32(dev, routing, offset);
spin_unlock_irqrestore(&wl->shm_lock, flags);
return ret;
}
u16 __b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset)
u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset)
{
u16 ret;
@ -447,20 +434,7 @@ out:
return ret;
}
u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset)
{
struct b43_wl *wl = dev->wl;
unsigned long flags;
u16 ret;
spin_lock_irqsave(&wl->shm_lock, flags);
ret = __b43_shm_read16(dev, routing, offset);
spin_unlock_irqrestore(&wl->shm_lock, flags);
return ret;
}
void __b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value)
void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value)
{
if (routing == B43_SHM_SHARED) {
B43_WARN_ON(offset & 0x0001);
@ -480,17 +454,7 @@ void __b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value
b43_write32(dev, B43_MMIO_SHM_DATA, value);
}
void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value)
{
struct b43_wl *wl = dev->wl;
unsigned long flags;
spin_lock_irqsave(&wl->shm_lock, flags);
__b43_shm_write32(dev, routing, offset, value);
spin_unlock_irqrestore(&wl->shm_lock, flags);
}
void __b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value)
void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value)
{
if (routing == B43_SHM_SHARED) {
B43_WARN_ON(offset & 0x0001);
@ -506,16 +470,6 @@ void __b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value
b43_write16(dev, B43_MMIO_SHM_DATA, value);
}
void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value)
{
struct b43_wl *wl = dev->wl;
unsigned long flags;
spin_lock_irqsave(&wl->shm_lock, flags);
__b43_shm_write16(dev, routing, offset, value);
spin_unlock_irqrestore(&wl->shm_lock, flags);
}
/* Read HostFlags */
u64 b43_hf_read(struct b43_wldev *dev)
{
@ -685,22 +639,11 @@ static void b43_short_slot_timing_disable(struct b43_wldev *dev)
b43_set_slot_time(dev, 20);
}
/* Synchronize IRQ top- and bottom-half.
* IRQs must be masked before calling this.
* This must not be called with the irq_lock held.
*/
static void b43_synchronize_irq(struct b43_wldev *dev)
{
synchronize_irq(dev->dev->irq);
tasklet_kill(&dev->isr_tasklet);
}
/* DummyTransmission function, as documented on
* http://bcm-v4.sipsolutions.net/802.11/DummyTransmission
*/
void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
{
struct b43_wl *wl = dev->wl;
struct b43_phy *phy = &dev->phy;
unsigned int i, max_loop;
u16 value;
@ -720,9 +663,6 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
buffer[0] = 0x000B846E;
}
spin_lock_irq(&wl->irq_lock);
write_lock(&wl->tx_lock);
for (i = 0; i < 5; i++)
b43_ram_write(dev, i * 4, buffer[i]);
@ -778,9 +718,6 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
}
if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5)
b43_radio_write16(dev, 0x0051, 0x0037);
write_unlock(&wl->tx_lock);
spin_unlock_irq(&wl->irq_lock);
}
static void key_write(struct b43_wldev *dev,
@ -1620,6 +1557,27 @@ static void handle_irq_beacon(struct b43_wldev *dev)
}
}
static void b43_do_beacon_update_trigger_work(struct b43_wldev *dev)
{
u32 old_irq_mask = dev->irq_mask;
/* update beacon right away or defer to irq */
handle_irq_beacon(dev);
if (old_irq_mask != dev->irq_mask) {
/* The handler updated the IRQ mask. */
B43_WARN_ON(!dev->irq_mask);
if (b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)) {
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
} else {
/* Device interrupts are currently disabled. That means
* we just ran the hardirq handler and scheduled the
* IRQ thread. The thread will write the IRQ mask when
* it finished, so there's nothing to do here. Writing
* the mask _here_ would incorrectly re-enable IRQs. */
}
}
}
static void b43_beacon_update_trigger_work(struct work_struct *work)
{
struct b43_wl *wl = container_of(work, struct b43_wl,
@ -1629,19 +1587,22 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
spin_lock_irq(&wl->irq_lock);
/* update beacon right away or defer to irq */
handle_irq_beacon(dev);
/* The handler might have updated the IRQ mask. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irq(&wl->irq_lock);
if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
mmiowb();
} else {
spin_lock_irq(&wl->hardirq_lock);
b43_do_beacon_update_trigger_work(dev);
mmiowb();
spin_unlock_irq(&wl->hardirq_lock);
}
}
mutex_unlock(&wl->mutex);
}
/* Asynchronously update the packet templates in template RAM.
* Locking: Requires wl->irq_lock to be locked. */
* Locking: Requires wl->mutex to be locked. */
static void b43_update_templates(struct b43_wl *wl)
{
struct sk_buff *beacon;
@ -1778,18 +1739,15 @@ out:
B43_DEBUGIRQ_REASON_REG, B43_DEBUGIRQ_ACK);
}
/* Interrupt handler bottom-half */
static void b43_interrupt_tasklet(struct b43_wldev *dev)
static void b43_do_interrupt_thread(struct b43_wldev *dev)
{
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
int i;
unsigned long flags;
spin_lock_irqsave(&dev->wl->irq_lock, flags);
B43_WARN_ON(b43_status(dev) != B43_STAT_STARTED);
if (unlikely(b43_status(dev) != B43_STAT_STARTED))
return;
reason = dev->irq_reason;
for (i = 0; i < ARRAY_SIZE(dma_reason); i++) {
@ -1822,8 +1780,6 @@ static void b43_interrupt_tasklet(struct b43_wldev *dev)
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43_controller_restart(dev, "DMA error");
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@ -1867,47 +1823,36 @@ static void b43_interrupt_tasklet(struct b43_wldev *dev)
if (reason & B43_IRQ_TX_OK)
handle_irq_transmit_status(dev);
/* Re-enable interrupts on the device by restoring the current interrupt mask. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
static void b43_interrupt_ack(struct b43_wldev *dev, u32 reason)
/* Interrupt thread handler. Handles device interrupts in thread context. */
static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id)
{
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason);
b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]);
b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]);
b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]);
b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]);
b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]);
/* Unused ring
b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]);
*/
}
/* Interrupt handler top-half */
static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
{
irqreturn_t ret = IRQ_NONE;
struct b43_wldev *dev = dev_id;
mutex_lock(&dev->wl->mutex);
b43_do_interrupt_thread(dev);
mmiowb();
mutex_unlock(&dev->wl->mutex);
return IRQ_HANDLED;
}
static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
{
u32 reason;
B43_WARN_ON(!dev);
/* This code runs under wl->hardirq_lock, but _only_ on non-SDIO busses.
* On SDIO, this runs under wl->mutex. */
spin_lock(&dev->wl->irq_lock);
if (unlikely(b43_status(dev) < B43_STAT_STARTED)) {
/* This can only happen on shared IRQ lines. */
goto out;
}
reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
if (reason == 0xffffffff) /* shared IRQ */
goto out;
ret = IRQ_HANDLED;
return IRQ_NONE;
reason &= dev->irq_mask;
if (!reason)
goto out;
return IRQ_HANDLED;
dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
& 0x0001DC00;
@ -1924,15 +1869,38 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
& 0x0000DC00;
*/
b43_interrupt_ack(dev, reason);
/* disable all IRQs. They are enabled again in the bottom half. */
/* ACK the interrupt. */
b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason);
b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]);
b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]);
b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]);
b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]);
b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]);
/* Unused ring
b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]);
*/
/* Disable IRQs on the device. The IRQ thread handler will re-enable them. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
/* save the reason code and call our bottom half. */
/* Save the reason bitmasks for the IRQ thread handler. */
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
return IRQ_WAKE_THREAD;
}
/* Interrupt handler top-half. This runs with interrupts disabled. */
static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
{
struct b43_wldev *dev = dev_id;
irqreturn_t ret;
if (unlikely(b43_status(dev) < B43_STAT_STARTED))
return IRQ_NONE;
spin_lock(&dev->wl->hardirq_lock);
ret = b43_do_interrupt(dev);
mmiowb();
spin_unlock(&dev->wl->irq_lock);
spin_unlock(&dev->wl->hardirq_lock);
return ret;
}
@ -3038,15 +3006,12 @@ static void b43_security_init(struct b43_wldev *dev)
static int b43_rng_read(struct hwrng *rng, u32 *data)
{
struct b43_wl *wl = (struct b43_wl *)rng->priv;
unsigned long flags;
/* Don't take wl->mutex here, as it could deadlock with
* hwrng internal locking. It's not needed to take
* wl->mutex here, anyway. */
/* FIXME: We need to take wl->mutex here to make sure the device
* is not going away from under our ass. However it could deadlock
* with hwrng internal locking. */
spin_lock_irqsave(&wl->irq_lock, flags);
*data = b43_read16(wl->current_dev, B43_MMIO_RNG);
spin_unlock_irqrestore(&wl->irq_lock, flags);
return (sizeof(u16));
}
@ -3082,46 +3047,52 @@ static int b43_rng_init(struct b43_wl *wl)
return err;
}
static int b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
static void b43_tx_work(struct work_struct *work)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
unsigned long flags;
int err;
struct b43_wl *wl = container_of(work, struct b43_wl, tx_work);
struct b43_wldev *dev;
struct sk_buff *skb;
int err = 0;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
goto drop_packet;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED)) {
mutex_unlock(&wl->mutex);
return;
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
if (unlikely(!dev))
goto drop_packet;
/* Transmissions on seperate queues can run concurrently. */
read_lock_irqsave(&wl->tx_lock, flags);
while (skb_queue_len(&wl->tx_queue)) {
skb = skb_dequeue(&wl->tx_queue);
err = -ENODEV;
if (likely(b43_status(dev) >= B43_STAT_STARTED)) {
if (b43_using_pio_transfers(dev))
err = b43_pio_tx(dev, skb);
else
err = b43_dma_tx(dev, skb);
if (unlikely(err))
dev_kfree_skb(skb); /* Drop it */
}
read_unlock_irqrestore(&wl->tx_lock, flags);
mutex_unlock(&wl->mutex);
}
if (unlikely(err))
goto drop_packet;
return NETDEV_TX_OK;
static int b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
skb_queue_tail(&wl->tx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->tx_work);
drop_packet:
/* We can not transmit this packet. Drop it. */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Locking: wl->irq_lock */
static void b43_qos_params_upload(struct b43_wldev *dev,
const struct ieee80211_tx_queue_params *p,
u16 shm_offset)
@ -3130,6 +3101,9 @@ static void b43_qos_params_upload(struct b43_wldev *dev,
int bslots, tmp;
unsigned int i;
if (!dev->qos_enabled)
return;
bslots = b43_read16(dev, B43_MMIO_RNG) & p->cw_min;
memset(&params, 0, sizeof(params));
@ -3175,6 +3149,9 @@ static void b43_qos_upload_all(struct b43_wldev *dev)
struct b43_qos_params *params;
unsigned int i;
if (!dev->qos_enabled)
return;
BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
ARRAY_SIZE(wl->qos_params));
@ -3234,6 +3211,16 @@ static void b43_qos_clear(struct b43_wl *wl)
/* Initialize the core's QOS capabilities */
static void b43_qos_init(struct b43_wldev *dev)
{
if (!dev->qos_enabled) {
/* Disable QOS support. */
b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_EDCF);
b43_write16(dev, B43_MMIO_IFSCTL,
b43_read16(dev, B43_MMIO_IFSCTL)
& ~B43_MMIO_IFSCTL_USE_EDCF);
b43dbg(dev->wl, "QoS disabled\n");
return;
}
/* Upload the current QOS parameters. */
b43_qos_upload_all(dev);
@ -3242,6 +3229,7 @@ static void b43_qos_init(struct b43_wldev *dev)
b43_write16(dev, B43_MMIO_IFSCTL,
b43_read16(dev, B43_MMIO_IFSCTL)
| B43_MMIO_IFSCTL_USE_EDCF);
b43dbg(dev->wl, "QoS enabled\n");
}
static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
@ -3283,22 +3271,20 @@ static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
struct ieee80211_tx_queue_stats *stats)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
unsigned long flags;
struct b43_wldev *dev;
int err = -ENODEV;
if (!dev)
goto out;
spin_lock_irqsave(&wl->irq_lock, flags);
if (likely(b43_status(dev) >= B43_STAT_STARTED)) {
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (dev && b43_status(dev) >= B43_STAT_STARTED) {
if (b43_using_pio_transfers(dev))
b43_pio_get_tx_stats(dev, stats);
else
b43_dma_get_tx_stats(dev, stats);
err = 0;
}
spin_unlock_irqrestore(&wl->irq_lock, flags);
out:
mutex_unlock(&wl->mutex);
return err;
}
@ -3306,11 +3292,10 @@ static int b43_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
unsigned long flags;
spin_lock_irqsave(&wl->irq_lock, flags);
mutex_lock(&wl->mutex);
memcpy(stats, &wl->ieee_stats, sizeof(*stats));
spin_unlock_irqrestore(&wl->irq_lock, flags);
mutex_unlock(&wl->mutex);
return 0;
}
@ -3322,7 +3307,6 @@ static u64 b43_op_get_tsf(struct ieee80211_hw *hw)
u64 tsf;
mutex_lock(&wl->mutex);
spin_lock_irq(&wl->irq_lock);
dev = wl->current_dev;
if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED))
@ -3330,7 +3314,6 @@ static u64 b43_op_get_tsf(struct ieee80211_hw *hw)
else
tsf = 0;
spin_unlock_irq(&wl->irq_lock);
mutex_unlock(&wl->mutex);
return tsf;
@ -3342,13 +3325,11 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
struct b43_wldev *dev;
mutex_lock(&wl->mutex);
spin_lock_irq(&wl->irq_lock);
dev = wl->current_dev;
if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED))
b43_tsf_write(dev, tsf);
spin_unlock_irq(&wl->irq_lock);
mutex_unlock(&wl->mutex);
}
@ -3434,7 +3415,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
prev_status = b43_status(down_dev);
/* Shutdown the currently running core. */
if (prev_status >= B43_STAT_STARTED)
b43_wireless_core_stop(down_dev);
down_dev = b43_wireless_core_stop(down_dev);
if (prev_status >= B43_STAT_INITIALIZED)
b43_wireless_core_exit(down_dev);
@ -3498,7 +3479,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
struct b43_wldev *dev;
struct b43_phy *phy;
struct ieee80211_conf *conf = &hw->conf;
unsigned long flags;
int antenna;
int err = 0;
@ -3529,13 +3509,11 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
/* Adjust the desired TX power level. */
if (conf->power_level != 0) {
spin_lock_irqsave(&wl->irq_lock, flags);
if (conf->power_level != phy->desired_txpower) {
phy->desired_txpower = conf->power_level;
b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME |
B43_TXPWR_IGNORE_TSSI);
}
spin_unlock_irqrestore(&wl->irq_lock, flags);
}
/* Antennas for RX and management frame TX. */
@ -3620,7 +3598,6 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
unsigned long flags;
mutex_lock(&wl->mutex);
@ -3630,7 +3607,6 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
B43_WARN_ON(wl->vif != vif);
spin_lock_irqsave(&wl->irq_lock, flags);
if (changed & BSS_CHANGED_BSSID) {
if (conf->bssid)
memcpy(wl->bssid, conf->bssid, ETH_ALEN);
@ -3648,7 +3624,6 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID)
b43_write_mac_bssid_templates(dev);
}
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43_mac_suspend(dev);
@ -3689,15 +3664,6 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -ENOSPC; /* User disabled HW-crypto */
mutex_lock(&wl->mutex);
spin_lock_irq(&wl->irq_lock);
write_lock(&wl->tx_lock);
/* Why do we need all this locking here?
* mutex -> Every config operation must take it.
* irq_lock -> We modify the dev->key array, which is accessed
* in the IRQ handlers.
* tx_lock -> We modify the dev->key array, which is accessed
* in the TX handler.
*/
dev = wl->current_dev;
err = -ENODEV;
@ -3789,8 +3755,6 @@ out_unlock:
sta ? sta->addr : bcast_addr);
b43_dump_keymemory(dev);
}
write_unlock(&wl->tx_lock);
spin_unlock_irq(&wl->irq_lock);
mutex_unlock(&wl->mutex);
return err;
@ -3801,15 +3765,15 @@ static void b43_op_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
unsigned long flags;
struct b43_wldev *dev;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev) {
*fflags = 0;
return;
goto out_unlock;
}
spin_lock_irqsave(&wl->irq_lock, flags);
*fflags &= FIF_PROMISC_IN_BSS |
FIF_ALLMULTI |
FIF_FCSFAIL |
@ -3830,41 +3794,70 @@ static void b43_op_configure_filter(struct ieee80211_hw *hw,
if (changed && b43_status(dev) >= B43_STAT_INITIALIZED)
b43_adjust_opmode(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock:
mutex_unlock(&wl->mutex);
}
/* Locking: wl->mutex */
static void b43_wireless_core_stop(struct b43_wldev *dev)
/* Locking: wl->mutex
* Returns the current dev. This might be different from the passed in dev,
* because the core might be gone away while we unlocked the mutex. */
static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
unsigned long flags;
struct b43_wldev *orig_dev;
if (b43_status(dev) < B43_STAT_STARTED)
return;
redo:
if (!dev || b43_status(dev) < B43_STAT_STARTED)
return dev;
/* Disable and sync interrupts. We must do this before than
* setting the status to INITIALIZED, as the interrupt handler
* won't care about IRQs then. */
spin_lock_irqsave(&wl->irq_lock, flags);
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* flush */
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43_synchronize_irq(dev);
write_lock_irqsave(&wl->tx_lock, flags);
b43_set_status(dev, B43_STAT_INITIALIZED);
write_unlock_irqrestore(&wl->tx_lock, flags);
b43_pio_stop(dev);
/* Cancel work. Unlock to avoid deadlocks. */
mutex_unlock(&wl->mutex);
/* Must unlock as it would otherwise deadlock. No races here.
* Cancel the possibly running self-rearming periodic work. */
cancel_delayed_work_sync(&dev->periodic_work);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev || b43_status(dev) < B43_STAT_STARTED) {
/* Whoops, aliens ate up the device while we were unlocked. */
return dev;
}
/* Disable interrupts on the device. */
b43_set_status(dev, B43_STAT_INITIALIZED);
if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) {
/* wl->mutex is locked. That is enough. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
} else {
spin_lock_irq(&wl->hardirq_lock);
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
spin_unlock_irq(&wl->hardirq_lock);
}
/* Synchronize the interrupt handlers. Unlock to avoid deadlocks. */
orig_dev = dev;
mutex_unlock(&wl->mutex);
synchronize_irq(dev->dev->irq);
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev)
return dev;
if (dev != orig_dev) {
if (b43_status(dev) >= B43_STAT_STARTED)
goto redo;
return dev;
}
B43_WARN_ON(b43_read32(dev, B43_MMIO_GEN_IRQ_MASK));
/* Drain the TX queue */
while (skb_queue_len(&wl->tx_queue))
dev_kfree_skb(skb_dequeue(&wl->tx_queue));
b43_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43dbg(wl, "Wireless interface stopped\n");
return dev;
}
/* Locking: wl->mutex */
@ -3875,8 +3868,9 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
drain_txstatus_queue(dev);
err = request_irq(dev->dev->irq, b43_interrupt_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
b43_interrupt_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq);
goto out;
@ -4098,16 +4092,20 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
bus->pcicore.dev->id.revision <= 5) {
/* IMCFGLO timeouts workaround. */
tmp = ssb_read32(dev->dev, SSB_IMCFGLO);
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
case SSB_BUSTYPE_PCMCIA:
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
tmp |= 0x32;
break;
case SSB_BUSTYPE_SSB:
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
tmp |= 0x53;
break;
default:
break;
}
ssb_write32(dev->dev, SSB_IMCFGLO, tmp);
}
@ -4155,8 +4153,8 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
{
u32 macctl;
B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED);
if (b43_status(dev) != B43_STAT_INITIALIZED)
B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED);
if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
return;
b43_set_status(dev, B43_STAT_UNINIT);
@ -4309,7 +4307,6 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
unsigned long flags;
int err = -EOPNOTSUPP;
/* TODO: allow WDS/AP devices to coexist */
@ -4333,12 +4330,10 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
wl->if_type = conf->type;
memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
spin_lock_irqsave(&wl->irq_lock, flags);
b43_adjust_opmode(dev);
b43_set_pretbtt(dev);
b43_set_synth_pu_delay(dev, 0);
b43_upload_card_macaddress(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
err = 0;
out_mutex_unlock:
@ -4352,7 +4347,6 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
unsigned long flags;
b43dbg(wl, "Removing Interface type %d\n", conf->type);
@ -4364,11 +4358,9 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
wl->operating = 0;
spin_lock_irqsave(&wl->irq_lock, flags);
b43_adjust_opmode(dev);
memset(wl->mac_addr, 0, ETH_ALEN);
b43_upload_card_macaddress(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
mutex_unlock(&wl->mutex);
}
@ -4428,10 +4420,15 @@ static void b43_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&(wl->beacon_update_trigger));
mutex_lock(&wl->mutex);
if (b43_status(dev) >= B43_STAT_STARTED)
b43_wireless_core_stop(dev);
if (b43_status(dev) >= B43_STAT_STARTED) {
dev = b43_wireless_core_stop(dev);
if (!dev)
goto out_unlock;
}
b43_wireless_core_exit(dev);
wl->radio_enabled = 0;
out_unlock:
mutex_unlock(&wl->mutex);
cancel_work_sync(&(wl->txpower_adjust_work));
@ -4441,11 +4438,10 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, bool set)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
unsigned long flags;
spin_lock_irqsave(&wl->irq_lock, flags);
mutex_lock(&wl->mutex);
b43_update_templates(wl);
spin_unlock_irqrestore(&wl->irq_lock, flags);
mutex_unlock(&wl->mutex);
return 0;
}
@ -4526,8 +4522,13 @@ static void b43_chip_reset(struct work_struct *work)
prev_status = b43_status(dev);
/* Bring the device down... */
if (prev_status >= B43_STAT_STARTED)
b43_wireless_core_stop(dev);
if (prev_status >= B43_STAT_STARTED) {
dev = b43_wireless_core_stop(dev);
if (!dev) {
err = -ENODEV;
goto out;
}
}
if (prev_status >= B43_STAT_INITIALIZED)
b43_wireless_core_exit(dev);
@ -4742,9 +4743,6 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
wldev->wl = wl;
b43_set_status(wldev, B43_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
tasklet_init(&wldev->isr_tasklet,
(void (*)(unsigned long))b43_interrupt_tasklet,
(unsigned long)wldev);
INIT_LIST_HEAD(&wldev->list);
err = b43_wireless_core_attach(wldev);
@ -4841,14 +4839,14 @@ static int b43_wireless_init(struct ssb_device *dev)
/* Initialize struct b43_wl */
wl->hw = hw;
spin_lock_init(&wl->irq_lock);
rwlock_init(&wl->tx_lock);
spin_lock_init(&wl->leds_lock);
spin_lock_init(&wl->shm_lock);
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
skb_queue_head_init(&wl->tx_queue);
ssb_set_devtypedata(dev, wl);
b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n",
@ -4946,8 +4944,8 @@ static int b43_suspend(struct ssb_device *dev, pm_message_t state)
wldev->suspend_in_progress = true;
wldev->suspend_init_status = b43_status(wldev);
if (wldev->suspend_init_status >= B43_STAT_STARTED)
b43_wireless_core_stop(wldev);
if (wldev->suspend_init_status >= B43_STAT_INITIALIZED)
wldev = b43_wireless_core_stop(wldev);
if (wldev && wldev->suspend_init_status >= B43_STAT_INITIALIZED)
b43_wireless_core_exit(wldev);
mutex_unlock(&wl->mutex);

View File

@ -112,13 +112,9 @@ void b43_tsf_read(struct b43_wldev *dev, u64 * tsf);
void b43_tsf_write(struct b43_wldev *dev, u64 tsf);
u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset);
u32 __b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset);
u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset);
u16 __b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset);
void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value);
void __b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value);
void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value);
void __b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value);
u64 b43_hf_read(struct b43_wldev *dev);
void b43_hf_write(struct b43_wldev *dev, u64 value);

View File

@ -347,7 +347,6 @@ void b43_phy_txpower_adjust_work(struct work_struct *work)
mutex_unlock(&wl->mutex);
}
/* Called with wl->irq_lock locked */
void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags)
{
struct b43_phy *phy = &dev->phy;

View File

@ -131,7 +131,7 @@ enum b43_txpwr_result {
* If the parameter "ignore_tssi" is true, the TSSI values should
* be ignored and a recalculation of the power settings should be
* done even if the TSSI values did not change.
* This callback is called with wl->irq_lock held and must not sleep.
* This function may sleep, but should not.
* Must not be NULL.
* @adjust_txpower: Write the previously calculated TX power settings
* (from @recalc_txpower) to the hardware.
@ -379,7 +379,6 @@ void b43_software_rfkill(struct b43_wldev *dev, bool blocked);
*
* Compare the current TX power output to the desired power emission
* and schedule an adjustment in case it mismatches.
* Requires wl->irq_lock locked.
*
* @flags: OR'ed enum b43_phy_txpower_check_flags flags.
* See the docs below.

View File

@ -2823,8 +2823,6 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
b43_mac_suspend(dev);
spin_lock_irq(&dev->wl->irq_lock);
/* Calculate the new attenuation values. */
bbatt = gphy->bbatt.att;
bbatt += gphy->bbatt_delta;
@ -2864,11 +2862,6 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
gphy->rfatt.att = rfatt;
gphy->bbatt.att = bbatt;
/* We drop the lock early, so we can sleep during hardware
* adjustment. Possible races with op_recalc_txpower are harmless,
* as we will be called once again in case we raced. */
spin_unlock_irq(&dev->wl->irq_lock);
if (b43_debug(dev, B43_DBG_XMITPOWER))
b43dbg(dev->wl, "Adjusting TX power\n");

View File

@ -141,8 +141,7 @@ struct b43_phy_g {
int tgt_idle_tssi;
/* Current idle TSSI */
int cur_idle_tssi;
/* The current average TSSI.
* Needs irq_lock, as it's updated in the IRQ path. */
/* The current average TSSI. */
u8 average_tssi;
/* Current TX power level attenuation control values */
struct b43_bbatt bbatt;

View File

@ -32,9 +32,6 @@
#include <linux/delay.h>
static void b43_pio_rx_work(struct work_struct *work);
static u16 generate_cookie(struct b43_pio_txqueue *q,
struct b43_pio_txpacket *pack)
{
@ -144,7 +141,6 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q)
return NULL;
spin_lock_init(&q->lock);
q->dev = dev;
q->rev = dev->dev->id.revision;
q->mmio_base = index_to_pioqueue_base(dev, index) +
@ -179,12 +175,10 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q)
return NULL;
spin_lock_init(&q->lock);
q->dev = dev;
q->rev = dev->dev->id.revision;
q->mmio_base = index_to_pioqueue_base(dev, index) +
pio_rxqueue_offset(dev);
INIT_WORK(&q->rx_work, b43_pio_rx_work);
/* Enable Direct FIFO RX (PIO) on the engine. */
b43_dma_direct_fifo_rx(dev, index, 1);
@ -249,13 +243,6 @@ void b43_pio_free(struct b43_wldev *dev)
destroy_queue_tx(pio, tx_queue_AC_BK);
}
void b43_pio_stop(struct b43_wldev *dev)
{
if (!b43_using_pio_transfers(dev))
return;
cancel_work_sync(&dev->pio.rx_queue->rx_work);
}
int b43_pio_init(struct b43_wldev *dev)
{
struct b43_pio *pio = &dev->pio;
@ -494,7 +481,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
struct b43_pio_txqueue *q;
struct ieee80211_hdr *hdr;
unsigned long flags;
unsigned int hdrlen, total_len;
int err = 0;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@ -512,20 +498,18 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
}
spin_lock_irqsave(&q->lock, flags);
hdrlen = b43_txhdr_size(dev);
total_len = roundup(skb->len + hdrlen, 4);
if (unlikely(total_len > q->buffer_size)) {
err = -ENOBUFS;
b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
goto out_unlock;
goto out;
}
if (unlikely(q->free_packet_slots == 0)) {
err = -ENOBUFS;
b43warn(dev->wl, "PIO: TX packet overflow.\n");
goto out_unlock;
goto out;
}
B43_WARN_ON(q->buffer_used > q->buffer_size);
@ -534,7 +518,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
q->stopped = 1;
goto out_unlock;
goto out;
}
/* Assign the queue number to the ring (if not already done before)
@ -548,11 +532,11 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
err = 0;
goto out_unlock;
goto out;
}
if (unlikely(err)) {
b43err(dev->wl, "PIO transmission failure\n");
goto out_unlock;
goto out;
}
q->nr_tx_packets++;
@ -564,13 +548,10 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
q->stopped = 1;
}
out_unlock:
spin_unlock_irqrestore(&q->lock, flags);
out:
return err;
}
/* Called with IRQs disabled. */
void b43_pio_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status)
{
@ -584,8 +565,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
return;
B43_WARN_ON(!pack);
spin_lock(&q->lock); /* IRQs are already disabled. */
info = IEEE80211_SKB_CB(pack->skb);
b43_fill_txstatus_report(dev, info, status);
@ -603,8 +582,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
q->stopped = 0;
}
spin_unlock(&q->lock);
}
void b43_pio_get_tx_stats(struct b43_wldev *dev,
@ -612,17 +589,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
{
const int nr_queues = dev->wl->hw->queues;
struct b43_pio_txqueue *q;
unsigned long flags;
int i;
for (i = 0; i < nr_queues; i++) {
q = select_queue_by_priority(dev, i);
spin_lock_irqsave(&q->lock, flags);
stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
stats[i].count = q->nr_tx_packets;
spin_unlock_irqrestore(&q->lock, flags);
}
}
@ -760,37 +734,23 @@ rx_error:
return 1;
}
/* RX workqueue. We can sleep, yay! */
static void b43_pio_rx_work(struct work_struct *work)
{
struct b43_pio_rxqueue *q = container_of(work, struct b43_pio_rxqueue,
rx_work);
unsigned int budget = 50;
bool stop;
do {
spin_lock_irq(&q->lock);
stop = (pio_rx_frame(q) == 0);
spin_unlock_irq(&q->lock);
cond_resched();
if (stop)
break;
} while (--budget);
}
/* Called with IRQs disabled. */
void b43_pio_rx(struct b43_pio_rxqueue *q)
{
/* Due to latency issues we must run the RX path in
* a workqueue to be able to schedule between packets. */
ieee80211_queue_work(q->dev->wl->hw, &q->rx_work);
unsigned int count = 0;
bool stop;
while (1) {
stop = (pio_rx_frame(q) == 0);
if (stop)
break;
cond_resched();
if (WARN_ON_ONCE(++count > 10000))
break;
}
}
static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->rev >= 8) {
b43_piotx_write32(q, B43_PIO8_TXCTL,
b43_piotx_read32(q, B43_PIO8_TXCTL)
@ -800,14 +760,10 @@ static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
b43_piotx_read16(q, B43_PIO_TXCTL)
| B43_PIO_TXCTL_SUSPREQ);
}
spin_unlock_irqrestore(&q->lock, flags);
}
static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->rev >= 8) {
b43_piotx_write32(q, B43_PIO8_TXCTL,
b43_piotx_read32(q, B43_PIO8_TXCTL)
@ -817,7 +773,6 @@ static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
b43_piotx_read16(q, B43_PIO_TXCTL)
& ~B43_PIO_TXCTL_SUSPREQ);
}
spin_unlock_irqrestore(&q->lock, flags);
}
void b43_pio_tx_suspend(struct b43_wldev *dev)

View File

@ -70,7 +70,6 @@ struct b43_pio_txpacket {
struct b43_pio_txqueue {
struct b43_wldev *dev;
spinlock_t lock;
u16 mmio_base;
/* The device queue buffer size in bytes. */
@ -103,12 +102,8 @@ struct b43_pio_txqueue {
struct b43_pio_rxqueue {
struct b43_wldev *dev;
spinlock_t lock;
u16 mmio_base;
/* Work to reduce latency issues on RX. */
struct work_struct rx_work;
/* Shortcut to the 802.11 core revision. This is to
* avoid horrible pointer dereferencing in the fastpaths. */
u8 rev;
@ -162,7 +157,6 @@ static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
int b43_pio_init(struct b43_wldev *dev);
void b43_pio_stop(struct b43_wldev *dev);
void b43_pio_free(struct b43_wldev *dev);
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);

View File

@ -94,7 +94,6 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
const char *buf, size_t count)
{
struct b43_wldev *wldev = dev_to_b43_wldev(dev);
unsigned long flags;
int err;
int mode;
@ -120,7 +119,6 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
}
mutex_lock(&wldev->wl->mutex);
spin_lock_irqsave(&wldev->wl->irq_lock, flags);
if (wldev->phy.ops->interf_mitigation) {
err = wldev->phy.ops->interf_mitigation(wldev, mode);
@ -132,7 +130,6 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
err = -ENOSYS;
mmiowb();
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
return err ? err : count;

View File

@ -267,11 +267,11 @@ int b43_generate_txhdr(struct b43_wldev *dev,
*/
ieee80211_get_tkip_key(info->control.hw_key, skb_frag,
IEEE80211_TKIP_P1_KEY, (u8*)phase1key);
/* phase1key is in host endian */
for (i = 0; i < 5; i++)
phase1key[i] = cpu_to_le16(phase1key[i]);
memcpy(txhdr->iv, phase1key, 10);
/* phase1key is in host endian. Copy to little-endian txhdr->iv. */
for (i = 0; i < 5; i++) {
txhdr->iv[i * 2 + 0] = phase1key[i];
txhdr->iv[i * 2 + 1] = phase1key[i] >> 8;
}
/* iv16 */
memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3);
} else {

View File

@ -3106,16 +3106,20 @@ static void b43legacy_imcfglo_timeouts_workaround(struct b43legacy_wldev *dev)
bus->pcicore.dev->id.revision <= 5) {
/* IMCFGLO timeouts workaround. */
tmp = ssb_read32(dev->dev, SSB_IMCFGLO);
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
case SSB_BUSTYPE_PCMCIA:
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
tmp |= 0x32;
break;
case SSB_BUSTYPE_SSB:
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
tmp |= 0x53;
break;
default:
break;
}
ssb_write32(dev->dev, SSB_IMCFGLO, tmp);
}

View File

@ -331,9 +331,8 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
preamble_mask = erp->short_preamble << 3;
rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, erp->ack_timeout);
rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME,
erp->ack_consume_time);
rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff);
rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a);
rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);

View File

@ -337,9 +337,8 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
preamble_mask = erp->short_preamble << 3;
rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, erp->ack_timeout);
rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME,
erp->ack_consume_time);
rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162);
rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2);
rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);

View File

@ -488,10 +488,6 @@ static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev,
{
u16 reg;
rt2500usb_register_read(rt2x00dev, TXRX_CSR1, &reg);
rt2x00_set_field16(&reg, TXRX_CSR1_ACK_TIMEOUT, erp->ack_timeout);
rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg);
rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg);
rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);

View File

@ -580,8 +580,7 @@ static void rt2800usb_config_erp(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT,
DIV_ROUND_UP(erp->ack_timeout, erp->slot_time));
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);

View File

@ -417,9 +417,6 @@ struct rt2x00lib_erp {
int short_preamble;
int cts_protection;
int ack_timeout;
int ack_consume_time;
u32 basic_rates;
int slot_time;

View File

@ -94,17 +94,6 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
erp.difs = bss_conf->use_short_slot ? SHORT_DIFS : DIFS;
erp.eifs = bss_conf->use_short_slot ? SHORT_EIFS : EIFS;
erp.ack_timeout = PLCP + erp.difs + GET_DURATION(ACK_SIZE, 10);
erp.ack_consume_time = SIFS + PLCP + GET_DURATION(ACK_SIZE, 10);
if (bss_conf->use_short_preamble) {
erp.ack_timeout += SHORT_PREAMBLE;
erp.ack_consume_time += SHORT_PREAMBLE;
} else {
erp.ack_timeout += PREAMBLE;
erp.ack_consume_time += PREAMBLE;
}
erp.basic_rates = bss_conf->basic_rates;
erp.beacon_int = bss_conf->beacon_int;

View File

@ -598,7 +598,7 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32);
rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);

View File

@ -561,7 +561,7 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32);
rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);

View File

@ -66,6 +66,20 @@ config SSB_PCMCIAHOST
If unsure, say N
config SSB_SDIOHOST_POSSIBLE
bool
depends on SSB && (MMC = y || MMC = SSB)
default y
config SSB_SDIOHOST
bool "Support for SSB on SDIO-bus host"
depends on SSB_SDIOHOST_POSSIBLE
help
Support for a Sonics Silicon Backplane on top
of a SDIO device.
If unsure, say N
config SSB_SILENT
bool "No SSB kernel messages"
depends on SSB && EMBEDDED

View File

@ -6,6 +6,7 @@ ssb-$(CONFIG_SSB_SPROM) += sprom.o
# host support
ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o
ssb-$(CONFIG_SSB_PCMCIAHOST) += pcmcia.o
ssb-$(CONFIG_SSB_SDIOHOST) += sdio.o
# built-in drivers
ssb-y += driver_chipcommon.o

View File

@ -17,6 +17,7 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/mmc/sdio_func.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
@ -88,6 +89,25 @@ found:
}
#endif /* CONFIG_SSB_PCMCIAHOST */
#ifdef CONFIG_SSB_SDIOHOST
struct ssb_bus *ssb_sdio_func_to_bus(struct sdio_func *func)
{
struct ssb_bus *bus;
ssb_buses_lock();
list_for_each_entry(bus, &buses, list) {
if (bus->bustype == SSB_BUSTYPE_SDIO &&
bus->host_sdio == func)
goto found;
}
bus = NULL;
found:
ssb_buses_unlock();
return bus;
}
#endif /* CONFIG_SSB_SDIOHOST */
int ssb_for_each_bus_call(unsigned long data,
int (*func)(struct ssb_bus *bus, unsigned long data))
{
@ -467,6 +487,12 @@ static int ssb_devices_register(struct ssb_bus *bus)
#ifdef CONFIG_SSB_PCMCIAHOST
sdev->irq = bus->host_pcmcia->irq.AssignedIRQ;
dev->parent = &bus->host_pcmcia->dev;
#endif
break;
case SSB_BUSTYPE_SDIO:
#ifdef CONFIG_SSB_SDIO
sdev->irq = bus->host_sdio->dev.irq;
dev->parent = &bus->host_sdio->dev;
#endif
break;
case SSB_BUSTYPE_SSB:
@ -724,12 +750,18 @@ static int ssb_bus_register(struct ssb_bus *bus,
err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1);
if (err)
goto out;
/* Init SDIO-host device (if any), before the scan */
err = ssb_sdio_init(bus);
if (err)
goto err_disable_xtal;
ssb_buses_lock();
bus->busnumber = next_busnumber;
/* Scan for devices (cores) */
err = ssb_bus_scan(bus, baseaddr);
if (err)
goto err_disable_xtal;
goto err_sdio_exit;
/* Init PCI-host device (if any) */
err = ssb_pci_init(bus);
@ -776,6 +808,8 @@ err_pci_exit:
ssb_pci_exit(bus);
err_unmap:
ssb_iounmap(bus);
err_sdio_exit:
ssb_sdio_exit(bus);
err_disable_xtal:
ssb_buses_unlock();
ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0);
@ -825,6 +859,28 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
EXPORT_SYMBOL(ssb_bus_pcmciabus_register);
#endif /* CONFIG_SSB_PCMCIAHOST */
#ifdef CONFIG_SSB_SDIOHOST
int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
unsigned int quirks)
{
int err;
bus->bustype = SSB_BUSTYPE_SDIO;
bus->host_sdio = func;
bus->ops = &ssb_sdio_ops;
bus->quirks = quirks;
err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
if (!err) {
ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
"SDIO device %s\n", sdio_func_id(func));
}
return err;
}
EXPORT_SYMBOL(ssb_bus_sdiobus_register);
#endif /* CONFIG_SSB_PCMCIAHOST */
int ssb_bus_ssbbus_register(struct ssb_bus *bus,
unsigned long baseaddr,
ssb_invariants_func_t get_invariants)
@ -1358,8 +1414,10 @@ static int __init ssb_modinit(void)
ssb_buses_lock();
err = ssb_attach_queued_buses();
ssb_buses_unlock();
if (err)
if (err) {
bus_unregister(&ssb_bustype);
goto out;
}
err = b43_pci_ssb_bridge_init();
if (err) {
@ -1375,7 +1433,7 @@ static int __init ssb_modinit(void)
/* don't fail SSB init because of this */
err = 0;
}
out:
return err;
}
/* ssb must be initialized after PCI but before the ssb drivers.

View File

@ -175,6 +175,9 @@ static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx,
} else
ssb_pcmcia_switch_segment(bus, 0);
break;
case SSB_BUSTYPE_SDIO:
offset += current_coreidx * SSB_CORE_SIZE;
return ssb_sdio_scan_read32(bus, offset);
}
return readl(bus->mmio + offset);
}
@ -188,6 +191,8 @@ static int scan_switchcore(struct ssb_bus *bus, u8 coreidx)
return ssb_pci_switch_coreidx(bus, coreidx);
case SSB_BUSTYPE_PCMCIA:
return ssb_pcmcia_switch_coreidx(bus, coreidx);
case SSB_BUSTYPE_SDIO:
return ssb_sdio_scan_switch_coreidx(bus, coreidx);
}
return 0;
}
@ -206,6 +211,8 @@ void ssb_iounmap(struct ssb_bus *bus)
SSB_BUG_ON(1); /* Can't reach this code. */
#endif
break;
case SSB_BUSTYPE_SDIO:
break;
}
bus->mmio = NULL;
bus->mapped_device = NULL;
@ -230,6 +237,10 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus,
SSB_BUG_ON(1); /* Can't reach this code. */
#endif
break;
case SSB_BUSTYPE_SDIO:
/* Nothing to ioremap in the SDIO case, just fake it */
mmio = (void __iomem *)baseaddr;
break;
}
return mmio;

610
drivers/ssb/sdio.c Normal file
View File

@ -0,0 +1,610 @@
/*
* Sonics Silicon Backplane
* SDIO-Hostbus related functions
*
* Copyright 2009 Albert Herranz <albert_herranz@yahoo.es>
*
* Based on drivers/ssb/pcmcia.c
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2007-2008 Michael Buesch <mb@bu3sch.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*
*/
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#include <linux/mmc/sdio_func.h>
#include "ssb_private.h"
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 1
/* Hardware invariants CIS tuples */
#define SSB_SDIO_CIS 0x80
#define SSB_SDIO_CIS_SROMREV 0x00
#define SSB_SDIO_CIS_ID 0x01
#define SSB_SDIO_CIS_BOARDREV 0x02
#define SSB_SDIO_CIS_PA 0x03
#define SSB_SDIO_CIS_PA_PA0B0_LO 0
#define SSB_SDIO_CIS_PA_PA0B0_HI 1
#define SSB_SDIO_CIS_PA_PA0B1_LO 2
#define SSB_SDIO_CIS_PA_PA0B1_HI 3
#define SSB_SDIO_CIS_PA_PA0B2_LO 4
#define SSB_SDIO_CIS_PA_PA0B2_HI 5
#define SSB_SDIO_CIS_PA_ITSSI 6
#define SSB_SDIO_CIS_PA_MAXPOW 7
#define SSB_SDIO_CIS_OEMNAME 0x04
#define SSB_SDIO_CIS_CCODE 0x05
#define SSB_SDIO_CIS_ANTENNA 0x06
#define SSB_SDIO_CIS_ANTGAIN 0x07
#define SSB_SDIO_CIS_BFLAGS 0x08
#define SSB_SDIO_CIS_LEDS 0x09
#define CISTPL_FUNCE_LAN_NODE_ID 0x04 /* same as in PCMCIA */
/*
* Function 1 miscellaneous registers.
*
* Definitions match src/include/sbsdio.h from the
* Android Open Source Project
* http://android.git.kernel.org/?p=platform/system/wlan/broadcom.git
*
*/
#define SBSDIO_FUNC1_SBADDRLOW 0x1000a /* SB Address window Low (b15) */
#define SBSDIO_FUNC1_SBADDRMID 0x1000b /* SB Address window Mid (b23-b16) */
#define SBSDIO_FUNC1_SBADDRHIGH 0x1000c /* SB Address window High (b24-b31) */
/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid address bits in SBADDRLOW */
#define SBSDIO_SBADDRMID_MASK 0xff /* Valid address bits in SBADDRMID */
#define SBSDIO_SBADDRHIGH_MASK 0xff /* Valid address bits in SBADDRHIGH */
#define SBSDIO_SB_OFT_ADDR_MASK 0x7FFF /* sb offset addr is <= 15 bits, 32k */
/* REVISIT: this flag doesn't seem to matter */
#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x8000 /* forces 32-bit SB access */
/*
* Address map within the SDIO function address space (128K).
*
* Start End Description
* ------- ------- ------------------------------------------
* 0x00000 0x0ffff selected backplane address window (64K)
* 0x10000 0x1ffff backplane control registers (max 64K)
*
* The current address window is configured by writing to registers
* SBADDRLOW, SBADDRMID and SBADDRHIGH.
*
* In order to access the contents of a 32-bit Silicon Backplane address
* the backplane address window must be first loaded with the highest
* 16 bits of the target address. Then, an access must be done to the
* SDIO function address space using the lower 15 bits of the address.
* Bit 15 of the address must be set when doing 32 bit accesses.
*
* 10987654321098765432109876543210
* WWWWWWWWWWWWWWWWW SB Address Window
* OOOOOOOOOOOOOOOO Offset within SB Address Window
* a 32-bit access flag
*/
/*
* SSB I/O via SDIO.
*
* NOTE: SDIO address @addr is 17 bits long (SDIO address space is 128K).
*/
static inline struct device *ssb_sdio_dev(struct ssb_bus *bus)
{
return &bus->host_sdio->dev;
}
/* host claimed */
static int ssb_sdio_writeb(struct ssb_bus *bus, unsigned int addr, u8 val)
{
int error = 0;
sdio_writeb(bus->host_sdio, val, addr, &error);
if (unlikely(error)) {
dev_dbg(ssb_sdio_dev(bus), "%08X <- %02x, error %d\n",
addr, val, error);
}
return error;
}
#if 0
static u8 ssb_sdio_readb(struct ssb_bus *bus, unsigned int addr)
{
u8 val;
int error = 0;
val = sdio_readb(bus->host_sdio, addr, &error);
if (unlikely(error)) {
dev_dbg(ssb_sdio_dev(bus), "%08X -> %02x, error %d\n",
addr, val, error);
}
return val;
}
#endif
/* host claimed */
static int ssb_sdio_set_sbaddr_window(struct ssb_bus *bus, u32 address)
{
int error;
error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRLOW,
(address >> 8) & SBSDIO_SBADDRLOW_MASK);
if (error)
goto out;
error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRMID,
(address >> 16) & SBSDIO_SBADDRMID_MASK);
if (error)
goto out;
error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRHIGH,
(address >> 24) & SBSDIO_SBADDRHIGH_MASK);
if (error)
goto out;
bus->sdio_sbaddr = address;
out:
if (error) {
dev_dbg(ssb_sdio_dev(bus), "failed to set address window"
" to 0x%08x, error %d\n", address, error);
}
return error;
}
/* for enumeration use only */
u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset)
{
u32 val;
int error;
sdio_claim_host(bus->host_sdio);
val = sdio_readl(bus->host_sdio, offset, &error);
sdio_release_host(bus->host_sdio);
if (unlikely(error)) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
return val;
}
/* for enumeration use only */
int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
{
u32 sbaddr;
int error;
sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE;
sdio_claim_host(bus->host_sdio);
error = ssb_sdio_set_sbaddr_window(bus, sbaddr);
sdio_release_host(bus->host_sdio);
if (error) {
dev_err(ssb_sdio_dev(bus), "failed to switch to core %u,"
" error %d\n", coreidx, error);
goto out;
}
out:
return error;
}
/* host must be already claimed */
int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
{
u8 coreidx = dev->core_index;
u32 sbaddr;
int error = 0;
sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE;
if (unlikely(bus->sdio_sbaddr != sbaddr)) {
#if SSB_VERBOSE_SDIOCORESWITCH_DEBUG
dev_info(ssb_sdio_dev(bus),
"switching to %s core, index %d\n",
ssb_core_name(dev->id.coreid), coreidx);
#endif
error = ssb_sdio_set_sbaddr_window(bus, sbaddr);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "failed to switch to"
" core %u, error %d\n", coreidx, error);
goto out;
}
bus->mapped_device = dev;
}
out:
return error;
}
static u8 ssb_sdio_read8(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
u8 val = 0xff;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
val = sdio_readb(bus->host_sdio, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %02x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
return val;
}
static u16 ssb_sdio_read16(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
u16 val = 0xffff;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
val = sdio_readw(bus->host_sdio, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %04x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
return val;
}
static u32 ssb_sdio_read32(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
u32 val = 0xffffffff;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
val = sdio_readl(bus->host_sdio, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
return val;
}
#ifdef CONFIG_SSB_BLOCKIO
static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
size_t saved_count = count;
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev))) {
error = -EIO;
memset(buffer, 0xff, count);
goto err_out;
}
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
switch (reg_width) {
case sizeof(u8): {
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
case sizeof(u16): {
SSB_WARN_ON(count & 1);
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
case sizeof(u32): {
SSB_WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
default:
SSB_WARN_ON(1);
}
if (!error)
goto out;
err_out:
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%u), error %d\n",
bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error);
out:
sdio_release_host(bus->host_sdio);
}
#endif /* CONFIG_SSB_BLOCKIO */
static void ssb_sdio_write8(struct ssb_device *dev, u16 offset, u8 val)
{
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
sdio_writeb(bus->host_sdio, val, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %02x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
}
static void ssb_sdio_write16(struct ssb_device *dev, u16 offset, u16 val)
{
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
sdio_writew(bus->host_sdio, val, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %04x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
}
static void ssb_sdio_write32(struct ssb_device *dev, u16 offset, u32 val)
{
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
sdio_writel(bus->host_sdio, val, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %08x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
if (bus->quirks & SSB_QUIRK_SDIO_READ_AFTER_WRITE32)
sdio_readl(bus->host_sdio, 0, &error);
out:
sdio_release_host(bus->host_sdio);
}
#ifdef CONFIG_SSB_BLOCKIO
static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
size_t count, u16 offset, u8 reg_width)
{
size_t saved_count = count;
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev))) {
error = -EIO;
memset((void *)buffer, 0xff, count);
goto err_out;
}
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
switch (reg_width) {
case sizeof(u8):
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
case sizeof(u16):
SSB_WARN_ON(count & 1);
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
case sizeof(u32):
SSB_WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
default:
SSB_WARN_ON(1);
}
if (!error)
goto out;
err_out:
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%u), error %d\n",
bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error);
out:
sdio_release_host(bus->host_sdio);
}
#endif /* CONFIG_SSB_BLOCKIO */
/* Not "static", as it's used in main.c */
const struct ssb_bus_ops ssb_sdio_ops = {
.read8 = ssb_sdio_read8,
.read16 = ssb_sdio_read16,
.read32 = ssb_sdio_read32,
.write8 = ssb_sdio_write8,
.write16 = ssb_sdio_write16,
.write32 = ssb_sdio_write32,
#ifdef CONFIG_SSB_BLOCKIO
.block_read = ssb_sdio_block_read,
.block_write = ssb_sdio_block_write,
#endif
};
#define GOTO_ERROR_ON(condition, description) do { \
if (unlikely(condition)) { \
error_description = description; \
goto error; \
} \
} while (0)
int ssb_sdio_get_invariants(struct ssb_bus *bus,
struct ssb_init_invariants *iv)
{
struct ssb_sprom *sprom = &iv->sprom;
struct ssb_boardinfo *bi = &iv->boardinfo;
const char *error_description = "none";
struct sdio_func_tuple *tuple;
void *mac;
memset(sprom, 0xFF, sizeof(*sprom));
sprom->boardflags_lo = 0;
sprom->boardflags_hi = 0;
tuple = bus->host_sdio->tuples;
while (tuple) {
switch (tuple->code) {
case 0x22: /* extended function */
switch (tuple->data[0]) {
case CISTPL_FUNCE_LAN_NODE_ID:
GOTO_ERROR_ON((tuple->size != 7) &&
(tuple->data[1] != 6),
"mac tpl size");
/* fetch the MAC address. */
mac = tuple->data + 2;
memcpy(sprom->il0mac, mac, ETH_ALEN);
memcpy(sprom->et1mac, mac, ETH_ALEN);
break;
default:
break;
}
break;
case 0x80: /* vendor specific tuple */
switch (tuple->data[0]) {
case SSB_SDIO_CIS_SROMREV:
GOTO_ERROR_ON(tuple->size != 2,
"sromrev tpl size");
sprom->revision = tuple->data[1];
break;
case SSB_SDIO_CIS_ID:
GOTO_ERROR_ON((tuple->size != 5) &&
(tuple->size != 7),
"id tpl size");
bi->vendor = tuple->data[1] |
(tuple->data[2]<<8);
break;
case SSB_SDIO_CIS_BOARDREV:
GOTO_ERROR_ON(tuple->size != 2,
"boardrev tpl size");
sprom->board_rev = tuple->data[1];
break;
case SSB_SDIO_CIS_PA:
GOTO_ERROR_ON((tuple->size != 9) &&
(tuple->size != 10),
"pa tpl size");
sprom->pa0b0 = tuple->data[1] |
((u16)tuple->data[2] << 8);
sprom->pa0b1 = tuple->data[3] |
((u16)tuple->data[4] << 8);
sprom->pa0b2 = tuple->data[5] |
((u16)tuple->data[6] << 8);
sprom->itssi_a = tuple->data[7];
sprom->itssi_bg = tuple->data[7];
sprom->maxpwr_a = tuple->data[8];
sprom->maxpwr_bg = tuple->data[8];
break;
case SSB_SDIO_CIS_OEMNAME:
/* Not present */
break;
case SSB_SDIO_CIS_CCODE:
GOTO_ERROR_ON(tuple->size != 2,
"ccode tpl size");
sprom->country_code = tuple->data[1];
break;
case SSB_SDIO_CIS_ANTENNA:
GOTO_ERROR_ON(tuple->size != 2,
"ant tpl size");
sprom->ant_available_a = tuple->data[1];
sprom->ant_available_bg = tuple->data[1];
break;
case SSB_SDIO_CIS_ANTGAIN:
GOTO_ERROR_ON(tuple->size != 2,
"antg tpl size");
sprom->antenna_gain.ghz24.a0 = tuple->data[1];
sprom->antenna_gain.ghz24.a1 = tuple->data[1];
sprom->antenna_gain.ghz24.a2 = tuple->data[1];
sprom->antenna_gain.ghz24.a3 = tuple->data[1];
sprom->antenna_gain.ghz5.a0 = tuple->data[1];
sprom->antenna_gain.ghz5.a1 = tuple->data[1];
sprom->antenna_gain.ghz5.a2 = tuple->data[1];
sprom->antenna_gain.ghz5.a3 = tuple->data[1];
break;
case SSB_SDIO_CIS_BFLAGS:
GOTO_ERROR_ON((tuple->size != 3) &&
(tuple->size != 5),
"bfl tpl size");
sprom->boardflags_lo = tuple->data[1] |
((u16)tuple->data[2] << 8);
break;
case SSB_SDIO_CIS_LEDS:
GOTO_ERROR_ON(tuple->size != 5,
"leds tpl size");
sprom->gpio0 = tuple->data[1];
sprom->gpio1 = tuple->data[2];
sprom->gpio2 = tuple->data[3];
sprom->gpio3 = tuple->data[4];
break;
default:
break;
}
break;
default:
break;
}
tuple = tuple->next;
}
return 0;
error:
dev_err(ssb_sdio_dev(bus), "failed to fetch device invariants: %s\n",
error_description);
return -ENODEV;
}
void ssb_sdio_exit(struct ssb_bus *bus)
{
if (bus->bustype != SSB_BUSTYPE_SDIO)
return;
/* Nothing to do here. */
}
int ssb_sdio_init(struct ssb_bus *bus)
{
if (bus->bustype != SSB_BUSTYPE_SDIO)
return 0;
bus->sdio_sbaddr = ~0;
return 0;
}

View File

@ -114,6 +114,46 @@ static inline int ssb_pcmcia_init(struct ssb_bus *bus)
}
#endif /* CONFIG_SSB_PCMCIAHOST */
/* sdio.c */
#ifdef CONFIG_SSB_SDIOHOST
extern int ssb_sdio_get_invariants(struct ssb_bus *bus,
struct ssb_init_invariants *iv);
extern u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset);
extern int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev);
extern int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx);
extern int ssb_sdio_hardware_setup(struct ssb_bus *bus);
extern void ssb_sdio_exit(struct ssb_bus *bus);
extern int ssb_sdio_init(struct ssb_bus *bus);
extern const struct ssb_bus_ops ssb_sdio_ops;
#else /* CONFIG_SSB_SDIOHOST */
static inline u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset)
{
return 0;
}
static inline int ssb_sdio_switch_core(struct ssb_bus *bus,
struct ssb_device *dev)
{
return 0;
}
static inline int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
{
return 0;
}
static inline int ssb_sdio_hardware_setup(struct ssb_bus *bus)
{
return 0;
}
static inline void ssb_sdio_exit(struct ssb_bus *bus)
{
}
static inline int ssb_sdio_init(struct ssb_bus *bus)
{
return 0;
}
#endif /* CONFIG_SSB_SDIOHOST */
/* scan.c */
extern const char *ssb_core_name(u16 coreid);

View File

@ -238,6 +238,7 @@ enum ssb_bustype {
SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */
SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */
SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */
SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */
};
/* board_vendor */
@ -270,8 +271,12 @@ struct ssb_bus {
/* The core in the basic address register window. (PCI bus only) */
struct ssb_device *mapped_device;
/* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */
u8 mapped_pcmcia_seg;
union {
/* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */
u8 mapped_pcmcia_seg;
/* Current SSB base address window for SDIO. */
u32 sdio_sbaddr;
};
/* Lock for core and segment switching.
* On PCMCIA-host busses this is used to protect the whole MMIO access. */
spinlock_t bar_lock;
@ -282,6 +287,11 @@ struct ssb_bus {
struct pci_dev *host_pci;
/* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */
struct pcmcia_device *host_pcmcia;
/* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */
struct sdio_func *host_sdio;
/* See enum ssb_quirks */
unsigned int quirks;
#ifdef CONFIG_SSB_SPROM
/* Mutex to protect the SPROM writing. */
@ -336,6 +346,11 @@ struct ssb_bus {
#endif /* DEBUG */
};
enum ssb_quirks {
/* SDIO connected card requires performing a read after writing a 32-bit value */
SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0),
};
/* The initialization-invariants. */
struct ssb_init_invariants {
/* Versioning information about the PCB. */
@ -366,6 +381,12 @@ extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
struct pcmcia_device *pcmcia_dev,
unsigned long baseaddr);
#endif /* CONFIG_SSB_PCMCIAHOST */
#ifdef CONFIG_SSB_SDIOHOST
extern int ssb_bus_sdiobus_register(struct ssb_bus *bus,
struct sdio_func *sdio_func,
unsigned int quirks);
#endif /* CONFIG_SSB_SDIOHOST */
extern void ssb_bus_unregister(struct ssb_bus *bus);

View File

@ -1,6 +1,15 @@
config CFG80211
tristate "Improved wireless configuration API"
tristate "cfg80211 - wireless configuration API"
depends on RFKILL || !RFKILL
---help---
cfg80211 is the Linux wireless LAN (802.11) configuration API.
Enable this if you have a wireless device.
For more information refer to documentation on the wireless wiki:
http://wireless.kernel.org/en/developers/Documentation/cfg80211
When built as a module it will be called cfg80211.
config NL80211_TESTMODE
bool "nl80211 testmode command"

View File

@ -607,6 +607,9 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (!netif_running(dev))
return -ENETDOWN;
if (wrqu->data.length == sizeof(struct iw_scan_req))
wreq = (struct iw_scan_req *)extra;
rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex);
if (IS_ERR(rdev))
@ -619,9 +622,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
wiphy = &rdev->wiphy;
for (band = 0; band < IEEE80211_NUM_BANDS; band++)
if (wiphy->bands[band])
n_channels += wiphy->bands[band]->n_channels;
/* Determine number of channels, needed to allocate creq */
if (wreq && wreq->num_channels)
n_channels = wreq->num_channels;
else {
for (band = 0; band < IEEE80211_NUM_BANDS; band++)
if (wiphy->bands[band])
n_channels += wiphy->bands[band]->n_channels;
}
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
@ -638,22 +646,41 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq->n_channels = n_channels;
creq->n_ssids = 1;
/* all channels */
/* translate "Scan on frequencies" request */
i = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
int j;
if (!wiphy->bands[band])
continue;
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
/* If we have a wireless request structure and the
* wireless request specifies frequencies, then search
* for the matching hardware channel.
*/
if (wreq && wreq->num_channels) {
int k;
int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
for (k = 0; k < wreq->num_channels; k++) {
int wext_freq = wreq->channel_list[k].m / 100000;
if (wext_freq == wiphy_freq)
goto wext_freq_found;
}
goto wext_freq_not_found;
}
wext_freq_found:
creq->channels[i] = &wiphy->bands[band]->channels[j];
i++;
wext_freq_not_found: ;
}
}
/* translate scan request */
if (wrqu->data.length == sizeof(struct iw_scan_req)) {
wreq = (struct iw_scan_req *)extra;
/* Set real number of channels specified in creq->channels[] */
creq->n_channels = i;
/* translate "Scan for SSID" request */
if (wreq) {
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
return -EINVAL;