mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 00:52:01 +00:00
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
This commit is contained in:
commit
d1138cf035
@ -1696,11 +1696,13 @@ M: mtk-manpages@gmx.net
|
||||
W: ftp://ftp.kernel.org/pub/linux/docs/manpages
|
||||
S: Maintained
|
||||
|
||||
MARVELL MV64340 ETHERNET DRIVER
|
||||
MARVELL MV643XX ETHERNET DRIVER
|
||||
P: Dale Farnsworth
|
||||
M: dale@farnsworth.org
|
||||
P: Manish Lachwani
|
||||
L: linux-mips@linux-mips.org
|
||||
M: mlachwani@mvista.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
S: Odd Fixes for 2.4; Maintained for 2.6.
|
||||
|
||||
MATROX FRAMEBUFFER DRIVER
|
||||
P: Petr Vandrovec
|
||||
|
@ -2136,7 +2136,7 @@ static int __init b44_init(void)
|
||||
|
||||
/* Setup paramaters for syncing RX/TX DMA descriptors */
|
||||
dma_desc_align_mask = ~(dma_desc_align_size - 1);
|
||||
dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
|
||||
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
|
||||
|
||||
return pci_module_init(&b44_driver);
|
||||
}
|
||||
|
@ -1,25 +1,25 @@
|
||||
/*******************************************************************************
|
||||
|
||||
|
||||
|
||||
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
Software Foundation; either version 2 of the License, or (at your option)
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms of the GNU General Public License as published by the Free
|
||||
Software Foundation; either version 2 of the License, or (at your option)
|
||||
any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc., 59
|
||||
this program; if not, write to the Free Software Foundation, Inc., 59
|
||||
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
|
||||
|
||||
The full GNU General Public License is included in this distribution in the
|
||||
file called LICENSE.
|
||||
|
||||
|
||||
Contact Information:
|
||||
Linux NICS <linux.nics@intel.com>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
@ -160,7 +160,7 @@
|
||||
|
||||
#define DRV_NAME "e100"
|
||||
#define DRV_EXT "-NAPI"
|
||||
#define DRV_VERSION "3.4.14-k4"DRV_EXT
|
||||
#define DRV_VERSION "3.5.10-k2"DRV_EXT
|
||||
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
|
||||
#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
|
||||
#define PFX DRV_NAME ": "
|
||||
@ -320,7 +320,7 @@ enum cuc_dump {
|
||||
cuc_dump_complete = 0x0000A005,
|
||||
cuc_dump_reset_complete = 0x0000A007,
|
||||
};
|
||||
|
||||
|
||||
enum port {
|
||||
software_reset = 0x0000,
|
||||
selftest = 0x0001,
|
||||
@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
|
||||
ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
|
||||
writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
|
||||
e100_write_flush(nic); udelay(4);
|
||||
|
||||
|
||||
writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
|
||||
e100_write_flush(nic); udelay(4);
|
||||
|
||||
|
||||
/* Eeprom drives a dummy zero to EEDO after receiving
|
||||
* complete address. Use this to adjust addr_len. */
|
||||
ctrl = readb(&nic->csr->eeprom_ctrl_lo);
|
||||
@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
|
||||
*addr_len -= (i - 16);
|
||||
i = 17;
|
||||
}
|
||||
|
||||
|
||||
data = (data << 1) | (ctrl & eedo ? 1 : 0);
|
||||
}
|
||||
|
||||
@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
|
||||
}
|
||||
|
||||
static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
{
|
||||
/* *INDENT-OFF* */
|
||||
static struct {
|
||||
@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
* driver can change the algorithm.
|
||||
*
|
||||
* INTDELAY - This loads the dead-man timer with its inital value.
|
||||
* When this timer expires the interrupt is asserted, and the
|
||||
* When this timer expires the interrupt is asserted, and the
|
||||
* timer is reset each time a new packet is received. (see
|
||||
* BUNDLEMAX below to set the limit on number of chained packets)
|
||||
* The current default is 0x600 or 1536. Experiments show that
|
||||
* the value should probably stay within the 0x200 - 0x1000.
|
||||
*
|
||||
* BUNDLEMAX -
|
||||
* BUNDLEMAX -
|
||||
* This sets the maximum number of frames that will be bundled. In
|
||||
* some situations, such as the TCP windowing algorithm, it may be
|
||||
* better to limit the growth of the bundle size than let it go as
|
||||
@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
* an interrupt for every frame received. If you do not want to put
|
||||
* a limit on the bundle size, set this value to xFFFF.
|
||||
*
|
||||
* BUNDLESMALL -
|
||||
* BUNDLESMALL -
|
||||
* This contains a bit-mask describing the minimum size frame that
|
||||
* will be bundled. The default masks the lower 7 bits, which means
|
||||
* that any frame less than 128 bytes in length will not be bundled,
|
||||
@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
*
|
||||
* The current default is 0xFF80, which masks out the lower 7 bits.
|
||||
* This means that any frame which is x7F (127) bytes or smaller
|
||||
* will cause an immediate interrupt. Because this value must be a
|
||||
* will cause an immediate interrupt. Because this value must be a
|
||||
* bit mask, there are only a few valid values that can be used. To
|
||||
* turn this feature off, the driver can write the value xFFFF to the
|
||||
* lower word of this instruction (in the same way that the other
|
||||
@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
* standard Ethernet frames are <= 2047 bytes in length.
|
||||
*************************************************************************/
|
||||
|
||||
/* if you wish to disable the ucode functionality, while maintaining the
|
||||
/* if you wish to disable the ucode functionality, while maintaining the
|
||||
* workarounds it provides, set the following defines to:
|
||||
* BUNDLESMALL 0
|
||||
* BUNDLEMAX 1
|
||||
@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
|
||||
|
||||
for (i = 0; i < UCODE_SIZE; i++)
|
||||
cb->u.ucode[i] = cpu_to_le32(ucode[i]);
|
||||
cb->command = cpu_to_le16(cb_ucode);
|
||||
cb->command = cpu_to_le16(cb_ucode | cb_el);
|
||||
return;
|
||||
}
|
||||
|
||||
noloaducode:
|
||||
cb->command = cpu_to_le16(cb_nop);
|
||||
cb->command = cpu_to_le16(cb_nop | cb_el);
|
||||
}
|
||||
|
||||
static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
|
||||
void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
|
||||
{
|
||||
int err = 0, counter = 50;
|
||||
struct cb *cb = nic->cb_to_clean;
|
||||
|
||||
if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
|
||||
DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
|
||||
|
||||
/* must restart cuc */
|
||||
nic->cuc_cmd = cuc_start;
|
||||
|
||||
/* wait for completion */
|
||||
e100_write_flush(nic);
|
||||
udelay(10);
|
||||
|
||||
/* wait for possibly (ouch) 500ms */
|
||||
while (!(cb->status & cpu_to_le16(cb_complete))) {
|
||||
msleep(10);
|
||||
if (!--counter) break;
|
||||
}
|
||||
|
||||
/* ack any interupts, something could have been set */
|
||||
writeb(~0, &nic->csr->scb.stat_ack);
|
||||
|
||||
/* if the command failed, or is not OK, notify and return */
|
||||
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
|
||||
DPRINTK(PROBE,ERR, "ucode load failed\n");
|
||||
err = -EPERM;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
|
||||
@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
|
||||
mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
|
||||
}
|
||||
|
||||
if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
|
||||
if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
|
||||
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
|
||||
/* enable/disable MDI/MDI-X auto-switching.
|
||||
MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
|
||||
if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
|
||||
(nic->mac == mac_82551_10) || (nic->mii.force_media) ||
|
||||
!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
|
||||
(nic->mac == mac_82551_10) || (nic->mii.force_media) ||
|
||||
!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
|
||||
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
|
||||
else
|
||||
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
|
||||
@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
|
||||
return err;
|
||||
if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
|
||||
return err;
|
||||
if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
|
||||
if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
|
||||
return err;
|
||||
if((err = e100_exec_cb(nic, NULL, e100_configure)))
|
||||
return err;
|
||||
@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if(e100_exec_cmd(nic, cuc_dump_reset, 0))
|
||||
DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
|
||||
}
|
||||
@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
|
||||
mii_check_link(&nic->mii);
|
||||
|
||||
/* Software generated interrupt to recover from (rare) Rx
|
||||
* allocation failure.
|
||||
* Unfortunately have to use a spinlock to not re-enable interrupts
|
||||
* accidentally, due to hardware that shares a register between the
|
||||
* interrupt mask bit and the SW Interrupt generation bit */
|
||||
* allocation failure.
|
||||
* Unfortunately have to use a spinlock to not re-enable interrupts
|
||||
* accidentally, due to hardware that shares a register between the
|
||||
* interrupt mask bit and the SW Interrupt generation bit */
|
||||
spin_lock_irq(&nic->cmd_lock);
|
||||
writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
|
||||
spin_unlock_irq(&nic->cmd_lock);
|
||||
@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
|
||||
struct rx *rx_to_start = NULL;
|
||||
|
||||
/* are we already rnr? then pay attention!!! this ensures that
|
||||
* the state machine progression never allows a start with a
|
||||
* the state machine progression never allows a start with a
|
||||
* partially cleaned list, avoiding a race between hardware
|
||||
* and rx_to_clean when in NAPI mode */
|
||||
if(RU_SUSPENDED == nic->ru_running)
|
||||
@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
|
||||
{
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
|
||||
/* Reset outside of interrupt context, to avoid request_irq
|
||||
/* Reset outside of interrupt context, to avoid request_irq
|
||||
* in interrupt context */
|
||||
schedule_work(&nic->tx_timeout_task);
|
||||
}
|
||||
@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
|
||||
struct param_range *rfds = &nic->params.rfds;
|
||||
struct param_range *cbs = &nic->params.cbs;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
return -EINVAL;
|
||||
|
||||
if(netif_running(netdev))
|
||||
@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
|
||||
nic->flags |= wol_magic;
|
||||
|
||||
/* ack any pending wake events, disable PME */
|
||||
pci_enable_wake(pdev, 0, 0);
|
||||
err = pci_enable_wake(pdev, 0, 0);
|
||||
if (err)
|
||||
DPRINTK(PROBE, ERR, "Error clearing wake event\n");
|
||||
|
||||
strcpy(netdev->name, "eth%d");
|
||||
if((err = register_netdev(netdev))) {
|
||||
@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
int retval;
|
||||
|
||||
if(netif_running(netdev))
|
||||
e100_down(nic);
|
||||
@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
netif_device_detach(netdev);
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic)));
|
||||
retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
|
||||
nic->flags & (wol_magic | e100_asf(nic)));
|
||||
if (retval)
|
||||
DPRINTK(PROBE,ERR, "Error enabling wake\n");
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
if (retval)
|
||||
DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
int retval;
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
retval = pci_set_power_state(pdev, PCI_D0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE,ERR, "Error waking adapter\n");
|
||||
pci_restore_state(pdev);
|
||||
/* ack any pending wake events, disable PME */
|
||||
pci_enable_wake(pdev, 0, 0);
|
||||
retval = pci_enable_wake(pdev, 0, 0);
|
||||
if (retval)
|
||||
DPRINTK(PROBE,ERR, "Error clearing wake events\n");
|
||||
if(e100_hw_init(nic))
|
||||
DPRINTK(HW, ERR, "e100_hw_init failed\n");
|
||||
|
||||
@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
int retval;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
|
||||
retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
|
||||
#else
|
||||
pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
|
||||
retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
|
||||
#endif
|
||||
if (retval)
|
||||
DPRINTK(PROBE,ERR, "Error enabling wake\n");
|
||||
}
|
||||
|
||||
|
||||
@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
|
||||
.suspend = e100_suspend,
|
||||
.resume = e100_resume,
|
||||
#endif
|
||||
.shutdown = e100_shutdown,
|
||||
.shutdown = e100_shutdown,
|
||||
};
|
||||
|
||||
static int __init e100_init_module(void)
|
||||
|
@ -72,10 +72,6 @@
|
||||
#include <linux/mii.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#endif
|
||||
|
||||
#define BAR_0 0
|
||||
#define BAR_1 1
|
||||
@ -87,6 +83,10 @@
|
||||
struct e1000_adapter;
|
||||
|
||||
#include "e1000_hw.h"
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#endif
|
||||
|
||||
#ifdef DBG
|
||||
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
|
||||
@ -169,6 +169,13 @@ struct e1000_buffer {
|
||||
uint16_t next_to_watch;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
struct e1000_queue_stats {
|
||||
uint64_t packets;
|
||||
uint64_t bytes;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
|
||||
struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
|
||||
|
||||
@ -191,10 +198,12 @@ struct e1000_tx_ring {
|
||||
spinlock_t tx_lock;
|
||||
uint16_t tdh;
|
||||
uint16_t tdt;
|
||||
uint64_t pkt;
|
||||
|
||||
boolean_t last_tx_tso;
|
||||
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
struct e1000_queue_stats tx_stats;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct e1000_rx_ring {
|
||||
@ -216,9 +225,17 @@ struct e1000_rx_ring {
|
||||
struct e1000_ps_page *ps_page;
|
||||
struct e1000_ps_page_dma *ps_page_dma;
|
||||
|
||||
struct sk_buff *rx_skb_top;
|
||||
struct sk_buff *rx_skb_prev;
|
||||
|
||||
/* cpu for rx queue */
|
||||
int cpu;
|
||||
|
||||
uint16_t rdh;
|
||||
uint16_t rdt;
|
||||
uint64_t pkt;
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
struct e1000_queue_stats rx_stats;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define E1000_DESC_UNUSED(R) \
|
||||
@ -251,6 +268,9 @@ struct e1000_adapter {
|
||||
uint16_t link_speed;
|
||||
uint16_t link_duplex;
|
||||
spinlock_t stats_lock;
|
||||
#ifdef CONFIG_E1000_NAPI
|
||||
spinlock_t tx_queue_lock;
|
||||
#endif
|
||||
atomic_t irq_sem;
|
||||
struct work_struct tx_timeout_task;
|
||||
struct work_struct watchdog_task;
|
||||
@ -264,6 +284,7 @@ struct e1000_adapter {
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
|
||||
#endif
|
||||
unsigned long tx_queue_len;
|
||||
uint32_t txd_cmd;
|
||||
uint32_t tx_int_delay;
|
||||
uint32_t tx_abs_int_delay;
|
||||
@ -271,9 +292,11 @@ struct e1000_adapter {
|
||||
uint64_t gotcl_old;
|
||||
uint64_t tpt_old;
|
||||
uint64_t colc_old;
|
||||
uint32_t tx_timeout_count;
|
||||
uint32_t tx_fifo_head;
|
||||
uint32_t tx_head_addr;
|
||||
uint32_t tx_fifo_size;
|
||||
uint8_t tx_timeout_factor;
|
||||
atomic_t tx_fifo_stall;
|
||||
boolean_t pcix_82544;
|
||||
boolean_t detect_tx_hung;
|
||||
@ -281,14 +304,15 @@ struct e1000_adapter {
|
||||
/* RX */
|
||||
#ifdef CONFIG_E1000_NAPI
|
||||
boolean_t (*clean_rx) (struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring,
|
||||
int *work_done, int work_to_do);
|
||||
struct e1000_rx_ring *rx_ring,
|
||||
int *work_done, int work_to_do);
|
||||
#else
|
||||
boolean_t (*clean_rx) (struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring);
|
||||
struct e1000_rx_ring *rx_ring);
|
||||
#endif
|
||||
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
|
||||
struct e1000_rx_ring *rx_ring);
|
||||
struct e1000_rx_ring *rx_ring,
|
||||
int cleaned_count);
|
||||
struct e1000_rx_ring *rx_ring; /* One per active queue */
|
||||
#ifdef CONFIG_E1000_NAPI
|
||||
struct net_device *polling_netdev; /* One per active queue */
|
||||
@ -296,13 +320,15 @@ struct e1000_adapter {
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
struct net_device **cpu_netdev; /* per-cpu */
|
||||
struct call_async_data_struct rx_sched_call_data;
|
||||
int cpu_for_queue[4];
|
||||
cpumask_t cpumask;
|
||||
#endif
|
||||
int num_queues;
|
||||
int num_tx_queues;
|
||||
int num_rx_queues;
|
||||
|
||||
uint64_t hw_csum_err;
|
||||
uint64_t hw_csum_good;
|
||||
uint64_t rx_hdr_split;
|
||||
uint32_t alloc_rx_buff_failed;
|
||||
uint32_t rx_int_delay;
|
||||
uint32_t rx_abs_int_delay;
|
||||
boolean_t rx_csum;
|
||||
@ -330,6 +356,7 @@ struct e1000_adapter {
|
||||
struct e1000_rx_ring test_rx_ring;
|
||||
|
||||
|
||||
u32 *config_space;
|
||||
int msg_enable;
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
boolean_t have_msi;
|
||||
|
@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
|
||||
{ "tx_deferred_ok", E1000_STAT(stats.dc) },
|
||||
{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
|
||||
{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
|
||||
{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
|
||||
{ "rx_long_length_errors", E1000_STAT(stats.roc) },
|
||||
{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
|
||||
{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
|
||||
@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
|
||||
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
|
||||
{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
|
||||
{ "rx_header_split", E1000_STAT(rx_hdr_split) },
|
||||
{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
|
||||
};
|
||||
#define E1000_STATS_LEN \
|
||||
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
#define E1000_QUEUE_STATS_LEN \
|
||||
(((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
|
||||
((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
|
||||
* (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
|
||||
#else
|
||||
#define E1000_QUEUE_STATS_LEN 0
|
||||
#endif
|
||||
#define E1000_GLOBAL_STATS_LEN \
|
||||
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
|
||||
#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
|
||||
static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
|
||||
"Register test (offline)", "Eeprom test (offline)",
|
||||
"Interrupt test (offline)", "Loopback test (offline)",
|
||||
@ -183,7 +195,15 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
if(ecmd->autoneg == AUTONEG_ENABLE) {
|
||||
/* When SoL/IDER sessions are active, autoneg/speed/duplex
|
||||
* cannot be changed */
|
||||
if (e1000_check_phy_reset_block(hw)) {
|
||||
DPRINTK(DRV, ERR, "Cannot change link characteristics "
|
||||
"when SoL/IDER is active.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ecmd->autoneg == AUTONEG_ENABLE) {
|
||||
hw->autoneg = 1;
|
||||
if(hw->media_type == e1000_media_type_fiber)
|
||||
hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
|
||||
@ -567,21 +587,21 @@ e1000_get_drvinfo(struct net_device *netdev,
|
||||
|
||||
strncpy(drvinfo->driver, e1000_driver_name, 32);
|
||||
strncpy(drvinfo->version, e1000_driver_version, 32);
|
||||
|
||||
/* EEPROM image version # is reported as firware version # for
|
||||
|
||||
/* EEPROM image version # is reported as firmware version # for
|
||||
* 8257{1|2|3} controllers */
|
||||
e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
|
||||
switch (adapter->hw.mac_type) {
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
case e1000_82573:
|
||||
sprintf(firmware_version, "%d.%d-%d",
|
||||
sprintf(firmware_version, "%d.%d-%d",
|
||||
(eeprom_data & 0xF000) >> 12,
|
||||
(eeprom_data & 0x0FF0) >> 4,
|
||||
eeprom_data & 0x000F);
|
||||
break;
|
||||
default:
|
||||
sprintf(firmware_version, "n/a");
|
||||
sprintf(firmware_version, "N/A");
|
||||
}
|
||||
|
||||
strncpy(drvinfo->fw_version, firmware_version, 32);
|
||||
@ -623,8 +643,8 @@ e1000_set_ringparam(struct net_device *netdev,
|
||||
struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
|
||||
int i, err, tx_ring_size, rx_ring_size;
|
||||
|
||||
tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
|
||||
rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
|
||||
tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
|
||||
rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
|
||||
|
||||
if (netif_running(adapter->netdev))
|
||||
e1000_down(adapter);
|
||||
@ -663,10 +683,10 @@ e1000_set_ringparam(struct net_device *netdev,
|
||||
E1000_MAX_TXD : E1000_MAX_82544_TXD));
|
||||
E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
|
||||
|
||||
for (i = 0; i < adapter->num_queues; i++) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
txdr[i].count = txdr->count;
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
rxdr[i].count = rxdr->count;
|
||||
}
|
||||
|
||||
if(netif_running(adapter->netdev)) {
|
||||
/* Try to get new resources before deleting old */
|
||||
@ -979,18 +999,17 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
|
||||
}
|
||||
}
|
||||
|
||||
if(txdr->desc) {
|
||||
if (txdr->desc) {
|
||||
pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
|
||||
txdr->desc = NULL;
|
||||
}
|
||||
if(rxdr->desc) {
|
||||
if (rxdr->desc) {
|
||||
pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
|
||||
rxdr->desc = NULL;
|
||||
}
|
||||
|
||||
kfree(txdr->buffer_info);
|
||||
txdr->buffer_info = NULL;
|
||||
|
||||
kfree(rxdr->buffer_info);
|
||||
rxdr->buffer_info = NULL;
|
||||
|
||||
@ -1327,11 +1346,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
|
||||
static int
|
||||
e1000_setup_loopback_test(struct e1000_adapter *adapter)
|
||||
{
|
||||
uint32_t rctl;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
uint32_t rctl;
|
||||
|
||||
if (hw->media_type == e1000_media_type_fiber ||
|
||||
hw->media_type == e1000_media_type_internal_serdes) {
|
||||
hw->media_type == e1000_media_type_internal_serdes) {
|
||||
switch (hw->mac_type) {
|
||||
case e1000_82545:
|
||||
case e1000_82546:
|
||||
@ -1362,25 +1381,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
|
||||
static void
|
||||
e1000_loopback_cleanup(struct e1000_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
uint32_t rctl;
|
||||
uint16_t phy_reg;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
rctl = E1000_READ_REG(&adapter->hw, RCTL);
|
||||
rctl = E1000_READ_REG(hw, RCTL);
|
||||
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
|
||||
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
|
||||
E1000_WRITE_REG(hw, RCTL, rctl);
|
||||
|
||||
switch (hw->mac_type) {
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
if (hw->media_type == e1000_media_type_fiber ||
|
||||
hw->media_type == e1000_media_type_internal_serdes){
|
||||
hw->media_type == e1000_media_type_internal_serdes) {
|
||||
#define E1000_SERDES_LB_OFF 0x400
|
||||
E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
|
||||
msec_delay(10);
|
||||
break;
|
||||
}
|
||||
/* fall thru for Cu adapters */
|
||||
/* Fall Through */
|
||||
case e1000_82545:
|
||||
case e1000_82546:
|
||||
case e1000_82545_rev_3:
|
||||
@ -1401,7 +1420,7 @@ static void
|
||||
e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
|
||||
{
|
||||
memset(skb->data, 0xFF, frame_size);
|
||||
frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
|
||||
frame_size &= ~1;
|
||||
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
|
||||
memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
|
||||
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
|
||||
@ -1410,7 +1429,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
|
||||
static int
|
||||
e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
|
||||
{
|
||||
frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
|
||||
frame_size &= ~1;
|
||||
if(*(skb->data + 3) == 0xFF) {
|
||||
if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
|
||||
(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
|
||||
@ -1488,14 +1507,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
|
||||
static int
|
||||
e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
|
||||
{
|
||||
if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback;
|
||||
if((*data = e1000_setup_loopback_test(adapter)))
|
||||
goto err_loopback_setup;
|
||||
/* PHY loopback cannot be performed if SoL/IDER
|
||||
* sessions are active */
|
||||
if (e1000_check_phy_reset_block(&adapter->hw)) {
|
||||
DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
|
||||
"when SoL/IDER is active.\n");
|
||||
*data = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((*data = e1000_setup_desc_rings(adapter)))
|
||||
goto out;
|
||||
if ((*data = e1000_setup_loopback_test(adapter)))
|
||||
goto err_loopback;
|
||||
*data = e1000_run_loopback_test(adapter);
|
||||
e1000_loopback_cleanup(adapter);
|
||||
err_loopback_setup:
|
||||
e1000_free_desc_rings(adapter);
|
||||
|
||||
err_loopback:
|
||||
e1000_free_desc_rings(adapter);
|
||||
out:
|
||||
return *data;
|
||||
}
|
||||
|
||||
@ -1617,6 +1647,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
||||
|
||||
case E1000_DEV_ID_82546EB_FIBER:
|
||||
case E1000_DEV_ID_82546GB_FIBER:
|
||||
case E1000_DEV_ID_82571EB_FIBER:
|
||||
/* Wake events only supported on port A for dual fiber */
|
||||
if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
|
||||
wol->supported = 0;
|
||||
@ -1660,6 +1691,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
||||
|
||||
case E1000_DEV_ID_82546EB_FIBER:
|
||||
case E1000_DEV_ID_82546GB_FIBER:
|
||||
case E1000_DEV_ID_82571EB_FIBER:
|
||||
/* Wake events only supported on port A for dual fiber */
|
||||
if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
|
||||
return wol->wolopts ? -EOPNOTSUPP : 0;
|
||||
@ -1721,21 +1753,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
|
||||
mod_timer(&adapter->blink_timer, jiffies);
|
||||
msleep_interruptible(data * 1000);
|
||||
del_timer_sync(&adapter->blink_timer);
|
||||
}
|
||||
else if(adapter->hw.mac_type < e1000_82573) {
|
||||
E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
|
||||
E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
|
||||
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
|
||||
} else if (adapter->hw.mac_type < e1000_82573) {
|
||||
E1000_WRITE_REG(&adapter->hw, LEDCTL,
|
||||
(E1000_LEDCTL_LED2_BLINK_RATE |
|
||||
E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
|
||||
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
|
||||
msleep_interruptible(data * 1000);
|
||||
}
|
||||
else {
|
||||
E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
|
||||
E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
|
||||
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
|
||||
} else {
|
||||
E1000_WRITE_REG(&adapter->hw, LEDCTL,
|
||||
(E1000_LEDCTL_LED2_BLINK_RATE |
|
||||
E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
|
||||
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
|
||||
(E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
|
||||
msleep_interruptible(data * 1000);
|
||||
}
|
||||
|
||||
@ -1768,19 +1800,43 @@ e1000_get_ethtool_stats(struct net_device *netdev,
|
||||
struct ethtool_stats *stats, uint64_t *data)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
uint64_t *queue_stat;
|
||||
int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
|
||||
int j, k;
|
||||
#endif
|
||||
int i;
|
||||
|
||||
e1000_update_stats(adapter);
|
||||
for(i = 0; i < E1000_STATS_LEN; i++) {
|
||||
char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
|
||||
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
|
||||
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
|
||||
char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
|
||||
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
|
||||
}
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
for (j = 0; j < adapter->num_tx_queues; j++) {
|
||||
queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
|
||||
for (k = 0; k < stat_count; k++)
|
||||
data[i + k] = queue_stat[k];
|
||||
i += k;
|
||||
}
|
||||
for (j = 0; j < adapter->num_rx_queues; j++) {
|
||||
queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
|
||||
for (k = 0; k < stat_count; k++)
|
||||
data[i + k] = queue_stat[k];
|
||||
i += k;
|
||||
}
|
||||
#endif
|
||||
/* BUG_ON(i != E1000_STATS_LEN); */
|
||||
}
|
||||
|
||||
static void
|
||||
e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
|
||||
{
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
#endif
|
||||
uint8_t *p = data;
|
||||
int i;
|
||||
|
||||
switch(stringset) {
|
||||
@ -1789,11 +1845,26 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
|
||||
E1000_TEST_LEN*ETH_GSTRING_LEN);
|
||||
break;
|
||||
case ETH_SS_STATS:
|
||||
for (i=0; i < E1000_STATS_LEN; i++) {
|
||||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
e1000_gstrings_stats[i].stat_string,
|
||||
ETH_GSTRING_LEN);
|
||||
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
|
||||
memcpy(p, e1000_gstrings_stats[i].stat_string,
|
||||
ETH_GSTRING_LEN);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
#ifdef CONFIG_E1000_MQ
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
sprintf(p, "tx_queue_%u_packets", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "tx_queue_%u_bytes", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
sprintf(p, "rx_queue_%u_packets", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
sprintf(p, "rx_queue_%u_bytes", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
#endif
|
||||
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
|
||||
case E1000_DEV_ID_82546GB_FIBER:
|
||||
case E1000_DEV_ID_82546GB_SERDES:
|
||||
case E1000_DEV_ID_82546GB_PCIE:
|
||||
case E1000_DEV_ID_82546GB_QUAD_COPPER:
|
||||
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
|
||||
hw->mac_type = e1000_82546_rev_3;
|
||||
break;
|
||||
case E1000_DEV_ID_82541EI:
|
||||
@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
|
||||
uint16_t cmd_mmrbc;
|
||||
uint16_t stat_mmrbc;
|
||||
uint32_t mta_size;
|
||||
uint32_t ctrl_ext;
|
||||
|
||||
DEBUGFUNC("e1000_init_hw");
|
||||
|
||||
@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
|
||||
break;
|
||||
case e1000_82571:
|
||||
case e1000_82572:
|
||||
ctrl |= (1 << 22);
|
||||
case e1000_82573:
|
||||
ctrl |= E1000_TXDCTL_COUNT_DESC;
|
||||
break;
|
||||
@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
|
||||
*/
|
||||
e1000_clear_hw_cntrs(hw);
|
||||
|
||||
if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
|
||||
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
|
||||
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
|
||||
/* Relaxed ordering must be disabled to avoid a parity
|
||||
* error crash in a PCI slot. */
|
||||
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
||||
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
|
||||
|
||||
DEBUGFUNC("e1000_setup_link");
|
||||
|
||||
/* In the case of the phy reset being blocked, we already have a link.
|
||||
* We do not have to set it up again. */
|
||||
if (e1000_check_phy_reset_block(hw))
|
||||
return E1000_SUCCESS;
|
||||
|
||||
/* Read and store word 0x0F of the EEPROM. This word contains bits
|
||||
* that determine the hardware's default PAUSE (flow control) mode,
|
||||
* a bit that determines whether the HW defaults to enabling or
|
||||
@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
|
||||
void
|
||||
e1000_config_collision_dist(struct e1000_hw *hw)
|
||||
{
|
||||
uint32_t tctl;
|
||||
uint32_t tctl, coll_dist;
|
||||
|
||||
DEBUGFUNC("e1000_config_collision_dist");
|
||||
|
||||
if (hw->mac_type < e1000_82543)
|
||||
coll_dist = E1000_COLLISION_DISTANCE_82542;
|
||||
else
|
||||
coll_dist = E1000_COLLISION_DISTANCE;
|
||||
|
||||
tctl = E1000_READ_REG(hw, TCTL);
|
||||
|
||||
tctl &= ~E1000_TCTL_COLD;
|
||||
tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
|
||||
tctl |= coll_dist << E1000_COLD_SHIFT;
|
||||
|
||||
E1000_WRITE_REG(hw, TCTL, tctl);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
|
||||
|
||||
if (hw->mac_type < e1000_82571)
|
||||
msec_delay(10);
|
||||
else
|
||||
udelay(100);
|
||||
|
||||
E1000_WRITE_REG(hw, CTRL, ctrl);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
@ -3881,14 +3904,16 @@ e1000_read_eeprom(struct e1000_hw *hw,
|
||||
return -E1000_ERR_EEPROM;
|
||||
}
|
||||
|
||||
/* FLASH reads without acquiring the semaphore are safe in 82573-based
|
||||
* controllers.
|
||||
*/
|
||||
if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
|
||||
(hw->mac_type != e1000_82573)) {
|
||||
/* Prepare the EEPROM for reading */
|
||||
if(e1000_acquire_eeprom(hw) != E1000_SUCCESS)
|
||||
return -E1000_ERR_EEPROM;
|
||||
/* FLASH reads without acquiring the semaphore are safe */
|
||||
if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
|
||||
hw->eeprom.use_eerd == FALSE) {
|
||||
switch (hw->mac_type) {
|
||||
default:
|
||||
/* Prepare the EEPROM for reading */
|
||||
if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
|
||||
return -E1000_ERR_EEPROM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(eeprom->use_eerd == TRUE) {
|
||||
@ -6720,6 +6745,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
|
||||
break;
|
||||
}
|
||||
|
||||
/* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
|
||||
* Need to wait for PHY configuration completion before accessing NVM
|
||||
* and PHY. */
|
||||
if (hw->mac_type == e1000_82573)
|
||||
msec_delay(25);
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -439,6 +439,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
|
||||
#define E1000_DEV_ID_82546GB_FIBER 0x107A
|
||||
#define E1000_DEV_ID_82546GB_SERDES 0x107B
|
||||
#define E1000_DEV_ID_82546GB_PCIE 0x108A
|
||||
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
|
||||
#define E1000_DEV_ID_82547EI 0x1019
|
||||
#define E1000_DEV_ID_82571EB_COPPER 0x105E
|
||||
#define E1000_DEV_ID_82571EB_FIBER 0x105F
|
||||
@ -449,6 +450,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
|
||||
#define E1000_DEV_ID_82573E 0x108B
|
||||
#define E1000_DEV_ID_82573E_IAMT 0x108C
|
||||
#define E1000_DEV_ID_82573L 0x109A
|
||||
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
|
||||
|
||||
|
||||
#define NODE_ADDRESS_SIZE 6
|
||||
@ -1497,6 +1499,7 @@ struct e1000_hw {
|
||||
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
|
||||
#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
|
||||
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
|
||||
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
|
||||
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
|
||||
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
|
||||
#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
|
||||
@ -1954,6 +1957,23 @@ struct e1000_host_command_info {
|
||||
|
||||
#define E1000_MDALIGN 4096
|
||||
|
||||
/* PCI-Ex registers */
|
||||
|
||||
/* PCI-Ex Control Register */
|
||||
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
|
||||
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
|
||||
#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
|
||||
#define E1000_GCR_TXD_NO_SNOOP 0x00000008
|
||||
#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
|
||||
#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
|
||||
|
||||
#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
|
||||
E1000_GCR_RXDSCW_NO_SNOOP | \
|
||||
E1000_GCR_RXDSCR_NO_SNOOP | \
|
||||
E1000_GCR TXD_NO_SNOOP | \
|
||||
E1000_GCR_TXDSCW_NO_SNOOP | \
|
||||
E1000_GCR_TXDSCR_NO_SNOOP)
|
||||
|
||||
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
|
||||
/* Function Active and Power State to MNG */
|
||||
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
|
||||
@ -2077,7 +2097,10 @@ struct e1000_host_command_info {
|
||||
/* Collision related configuration parameters */
|
||||
#define E1000_COLLISION_THRESHOLD 15
|
||||
#define E1000_CT_SHIFT 4
|
||||
#define E1000_COLLISION_DISTANCE 64
|
||||
/* Collision distance is a 0-based value that applies to
|
||||
* half-duplex-capable hardware only. */
|
||||
#define E1000_COLLISION_DISTANCE 63
|
||||
#define E1000_COLLISION_DISTANCE_82542 64
|
||||
#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
|
||||
#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
|
||||
#define E1000_COLD_SHIFT 12
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
|
||||
*
|
||||
* Valid Range: 100-100000 (0=off, 1=dynamic)
|
||||
*
|
||||
* Default Value: 1
|
||||
* Default Value: 8000
|
||||
*/
|
||||
|
||||
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
|
||||
@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter)
|
||||
} else {
|
||||
tx_ring->count = opt.def;
|
||||
}
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
tx_ring[i].count = tx_ring->count;
|
||||
}
|
||||
{ /* Receive Descriptor Count */
|
||||
@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter)
|
||||
} else {
|
||||
rx_ring->count = opt.def;
|
||||
}
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
rx_ring[i].count = rx_ring->count;
|
||||
}
|
||||
{ /* Checksum Offload Enable/Disable */
|
||||
@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
|
||||
e1000_validate_option(&fc, &opt, adapter);
|
||||
adapter->hw.fc = adapter->hw.original_fc = fc;
|
||||
} else {
|
||||
adapter->hw.fc = opt.def;
|
||||
adapter->hw.fc = adapter->hw.original_fc = opt.def;
|
||||
}
|
||||
}
|
||||
{ /* Transmit Interrupt Delay */
|
||||
@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
|
||||
.p = dplx_list }}
|
||||
};
|
||||
|
||||
if (e1000_check_phy_reset_block(&adapter->hw)) {
|
||||
DPRINTK(PROBE, INFO,
|
||||
"Link active due to SoL/IDER Session. "
|
||||
"Speed/Duplex/AutoNeg parameter ignored.\n");
|
||||
return;
|
||||
}
|
||||
if (num_Duplex > bd) {
|
||||
dplx = Duplex[bd];
|
||||
e1000_validate_option(&dplx, &opt, adapter);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)))
|
||||
if (sizeof(dma_addr_t) > sizeof(u32) &&
|
||||
!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
|
||||
using_dac = 1;
|
||||
else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
|
||||
printk(KERN_ERR PFX "%s no usable DMA configuration\n",
|
||||
pci_name(pdev));
|
||||
goto err_out_free_regions;
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
|
||||
"for consistent allocations\n", pci_name(pdev));
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
} else {
|
||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX "%s no usable DMA configuration\n",
|
||||
pci_name(pdev));
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
@ -57,7 +57,7 @@
|
||||
#include "sky2.h"
|
||||
|
||||
#define DRV_NAME "sky2"
|
||||
#define DRV_VERSION "0.11"
|
||||
#define DRV_VERSION "0.13"
|
||||
#define PFX DRV_NAME " "
|
||||
|
||||
/*
|
||||
@ -75,6 +75,7 @@
|
||||
#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
|
||||
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
|
||||
#define RX_DEF_PENDING RX_MAX_PENDING
|
||||
#define RX_SKB_ALIGN 8
|
||||
|
||||
#define TX_RING_SIZE 512
|
||||
#define TX_DEF_PENDING (TX_RING_SIZE - 1)
|
||||
@ -91,7 +92,7 @@
|
||||
static const u32 default_msg =
|
||||
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
|
||||
| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
|
||||
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
|
||||
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
|
||||
|
||||
static int debug = -1; /* defaults above */
|
||||
module_param(debug, int, 0);
|
||||
@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
|
||||
|
||||
}
|
||||
|
||||
static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
|
||||
/* Assign Ram Buffer allocation.
|
||||
* start and end are in units of 4k bytes
|
||||
* ram registers are in units of 64bit words
|
||||
*/
|
||||
static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
|
||||
{
|
||||
u32 end;
|
||||
u32 start, end;
|
||||
|
||||
start /= 8;
|
||||
len /= 8;
|
||||
end = start + len - 1;
|
||||
start = startk * 4096/8;
|
||||
end = (endk * 4096/8) - 1;
|
||||
|
||||
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
|
||||
sky2_write32(hw, RB_ADDR(q, RB_START), start);
|
||||
@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RP), start);
|
||||
|
||||
if (q == Q_R1 || q == Q_R2) {
|
||||
u32 rxup, rxlo;
|
||||
u32 space = (endk - startk) * 4096/8;
|
||||
u32 tp = space - space/4;
|
||||
|
||||
rxlo = len/2;
|
||||
rxup = rxlo + len/4;
|
||||
/* On receive queue's set the thresholds
|
||||
* give receiver priority when > 3/4 full
|
||||
* send pause when down to 2K
|
||||
*/
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
|
||||
|
||||
/* Set thresholds on receive queue's */
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
|
||||
tp = space - 2048/8;
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
|
||||
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
|
||||
} else {
|
||||
/* Enable store & forward on Tx queue's because
|
||||
* Tx FIFO is only 1K on Yukon
|
||||
@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
|
||||
* This is a workaround code taken from SysKonnect sk98lin driver
|
||||
* to deal with chip bug on Yukon EC rev 0 in the wraparound case.
|
||||
*/
|
||||
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
|
||||
static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
|
||||
u16 idx, u16 *last, u16 size)
|
||||
{
|
||||
wmb();
|
||||
if (is_ec_a1(hw) && idx < *last) {
|
||||
u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
|
||||
|
||||
@ -721,6 +731,7 @@ setnew:
|
||||
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
|
||||
}
|
||||
*last = idx;
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
|
||||
@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
|
||||
/* Return high part of DMA address (could be 32 or 64 bit) */
|
||||
static inline u32 high32(dma_addr_t a)
|
||||
{
|
||||
return (a >> 16) >> 16;
|
||||
return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
|
||||
}
|
||||
|
||||
/* Build description to hardware about buffer */
|
||||
static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
|
||||
static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
|
||||
{
|
||||
struct sky2_rx_le *le;
|
||||
u32 hi = high32(map);
|
||||
@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
|
||||
struct sky2_hw *hw = sky2->hw;
|
||||
u16 port = sky2->port;
|
||||
|
||||
spin_lock(&sky2->tx_lock);
|
||||
spin_lock_bh(&sky2->tx_lock);
|
||||
|
||||
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
|
||||
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
|
||||
sky2->vlgrp = grp;
|
||||
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
spin_unlock_bh(&sky2->tx_lock);
|
||||
}
|
||||
|
||||
static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
struct sky2_hw *hw = sky2->hw;
|
||||
u16 port = sky2->port;
|
||||
|
||||
spin_lock(&sky2->tx_lock);
|
||||
spin_lock_bh(&sky2->tx_lock);
|
||||
|
||||
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
|
||||
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
|
||||
if (sky2->vlgrp)
|
||||
sky2->vlgrp->vlan_devices[vid] = NULL;
|
||||
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
spin_unlock_bh(&sky2->tx_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* It appears the hardware has a bug in the FIFO logic that
|
||||
* cause it to hang if the FIFO gets overrun and the receive buffer
|
||||
* is not aligned. ALso alloc_skb() won't align properly if slab
|
||||
* debugging is enabled.
|
||||
*/
|
||||
static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
|
||||
if (likely(skb)) {
|
||||
unsigned long p = (unsigned long) skb->data;
|
||||
skb_reserve(skb,
|
||||
((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and setup receiver buffer pool.
|
||||
* In case of 64 bit dma, there are 2X as many list elements
|
||||
* available as ring entries
|
||||
* and need to reserve one list element so we don't wrap around.
|
||||
*
|
||||
* It appears the hardware has a bug in the FIFO logic that
|
||||
* cause it to hang if the FIFO gets overrun and the receive buffer
|
||||
* is not aligned. This means we can't use skb_reserve to align
|
||||
* the IP header.
|
||||
*/
|
||||
static int sky2_rx_start(struct sky2_port *sky2)
|
||||
{
|
||||
@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
|
||||
for (i = 0; i < sky2->rx_pending; i++) {
|
||||
struct ring_info *re = sky2->rx_ring + i;
|
||||
|
||||
re->skb = dev_alloc_skb(sky2->rx_bufsize);
|
||||
re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
|
||||
if (!re->skb)
|
||||
goto nomem;
|
||||
|
||||
@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
|
||||
|
||||
sky2_mac_init(hw, port);
|
||||
|
||||
/* Configure RAM buffers */
|
||||
if (hw->chip_id == CHIP_ID_YUKON_FE ||
|
||||
(hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
|
||||
ramsize = 4096;
|
||||
else {
|
||||
u8 e0 = sky2_read8(hw, B2_E_0);
|
||||
ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
|
||||
}
|
||||
/* Determine available ram buffer space (in 4K blocks).
|
||||
* Note: not sure about the FE setting below yet
|
||||
*/
|
||||
if (hw->chip_id == CHIP_ID_YUKON_FE)
|
||||
ramsize = 4;
|
||||
else
|
||||
ramsize = sky2_read8(hw, B2_E_0);
|
||||
|
||||
/* Give transmitter one third (rounded up) */
|
||||
rxspace = ramsize - (ramsize + 2) / 3;
|
||||
|
||||
/* 2/3 for Rx */
|
||||
rxspace = (2 * ramsize) / 3;
|
||||
sky2_ramset(hw, rxqaddr[port], 0, rxspace);
|
||||
sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
|
||||
sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
|
||||
|
||||
/* Make sure SyncQ is disabled */
|
||||
sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
|
||||
@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
|
||||
}
|
||||
|
||||
/* Estimate of number of transmit list elements required */
|
||||
static inline unsigned tx_le_req(const struct sk_buff *skb)
|
||||
static unsigned tx_le_req(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned count;
|
||||
|
||||
@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
u16 mss;
|
||||
u8 ctrl;
|
||||
|
||||
/* No BH disabling for tx_lock here. We are running in BH disabled
|
||||
* context and TX reclaim runs via poll inside of a software
|
||||
* interrupt, and no related locks in IRQ processing.
|
||||
*/
|
||||
if (!spin_trylock(&sky2->tx_lock))
|
||||
return NETDEV_TX_LOCKED;
|
||||
|
||||
@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
netif_stop_queue(dev);
|
||||
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
|
||||
dev->name);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
|
||||
dev->name);
|
||||
}
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
|
||||
@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
|
||||
frag->size, PCI_DMA_TODEVICE);
|
||||
addr64 = (mapping >> 16) >> 16;
|
||||
addr64 = high32(mapping);
|
||||
if (addr64 != sky2->tx_addr64) {
|
||||
le = get_tx_le(sky2);
|
||||
le->tx.addr = cpu_to_le32(addr64);
|
||||
@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
out_unlock:
|
||||
mmiowb();
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
|
||||
dev->trans_start = jiffies;
|
||||
@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
spin_lock(&sky2->tx_lock);
|
||||
sky2->tx_cons = put;
|
||||
if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
|
||||
netif_wake_queue(dev);
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
}
|
||||
|
||||
/* Cleanup all untransmitted buffers, assume transmitter not running */
|
||||
static void sky2_tx_clean(struct sky2_port *sky2)
|
||||
{
|
||||
spin_lock_bh(&sky2->tx_lock);
|
||||
sky2_tx_complete(sky2, sky2->tx_prod);
|
||||
spin_unlock_bh(&sky2->tx_lock);
|
||||
}
|
||||
|
||||
/* Network shutdown */
|
||||
@ -1582,28 +1612,40 @@ out:
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
|
||||
/* Transmit timeout is only called if we are running, carries is up
|
||||
* and tx queue is full (stopped).
|
||||
*/
|
||||
static void sky2_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct sky2_port *sky2 = netdev_priv(dev);
|
||||
struct sky2_hw *hw = sky2->hw;
|
||||
unsigned txq = txqaddr[sky2->port];
|
||||
u16 ridx;
|
||||
|
||||
/* Maybe we just missed an status interrupt */
|
||||
spin_lock(&sky2->tx_lock);
|
||||
ridx = sky2_read16(hw,
|
||||
sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
|
||||
sky2_tx_complete(sky2, ridx);
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
|
||||
if (!netif_queue_stopped(dev)) {
|
||||
if (net_ratelimit())
|
||||
pr_info(PFX "transmit interrupt missed? recovered\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (netif_msg_timer(sky2))
|
||||
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
|
||||
|
||||
netif_stop_queue(dev);
|
||||
|
||||
sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
|
||||
sky2_read32(hw, Q_ADDR(txq, Q_CSR));
|
||||
|
||||
sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
|
||||
|
||||
sky2_tx_clean(sky2);
|
||||
|
||||
sky2_qset(hw, txq);
|
||||
sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
|
||||
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
|
||||
@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
|
||||
} else {
|
||||
struct sk_buff *nskb;
|
||||
|
||||
nskb = dev_alloc_skb(sky2->rx_bufsize);
|
||||
nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
goto resubmit;
|
||||
|
||||
@ -1745,7 +1787,7 @@ oversize:
|
||||
error:
|
||||
++sky2->net_stats.rx_errors;
|
||||
|
||||
if (netif_msg_rx_err(sky2))
|
||||
if (netif_msg_rx_err(sky2) && net_ratelimit())
|
||||
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
|
||||
sky2->netdev->name, status, length);
|
||||
|
||||
@ -1766,13 +1808,16 @@ error:
|
||||
*/
|
||||
#define TX_NO_STATUS 0xffff
|
||||
|
||||
static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
|
||||
static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
|
||||
{
|
||||
if (last != TX_NO_STATUS) {
|
||||
struct net_device *dev = hw->dev[port];
|
||||
if (dev && netif_running(dev)) {
|
||||
struct sky2_port *sky2 = netdev_priv(dev);
|
||||
|
||||
spin_lock(&sky2->tx_lock);
|
||||
sky2_tx_complete(sky2, last);
|
||||
spin_unlock(&sky2->tx_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
||||
struct sk_buff *skb;
|
||||
u32 status;
|
||||
u16 length;
|
||||
u8 op;
|
||||
|
||||
le = hw->st_le + hw->st_idx;
|
||||
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
|
||||
@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
||||
sky2 = netdev_priv(dev);
|
||||
status = le32_to_cpu(le->status);
|
||||
length = le16_to_cpu(le->length);
|
||||
op = le->opcode & ~HW_OWNER;
|
||||
le->opcode = 0;
|
||||
|
||||
switch (op) {
|
||||
switch (le->opcode & ~HW_OWNER) {
|
||||
case OP_RXSTAT:
|
||||
skb = sky2_receive(sky2, length, status);
|
||||
if (!skb)
|
||||
@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
||||
default:
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING PFX
|
||||
"unknown status opcode 0x%x\n", op);
|
||||
"unknown status opcode 0x%x\n", le->opcode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
exit_loop:
|
||||
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
|
||||
mmiowb();
|
||||
|
||||
sky2_tx_check(hw, 0, tx_done[0]);
|
||||
sky2_tx_check(hw, 1, tx_done[1]);
|
||||
@ -1887,7 +1928,6 @@ exit_loop:
|
||||
netif_rx_complete(dev0);
|
||||
hw->intr_mask |= Y2_IS_STAT_BMU;
|
||||
sky2_write32(hw, B0_IMSK, hw->intr_mask);
|
||||
mmiowb();
|
||||
return 0;
|
||||
} else {
|
||||
*budget -= work_done;
|
||||
@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
|
||||
{
|
||||
struct net_device *dev = hw->dev[port];
|
||||
|
||||
printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
|
||||
dev->name, status);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
|
||||
dev->name, status);
|
||||
|
||||
if (status & Y2_IS_PAR_RD1) {
|
||||
printk(KERN_ERR PFX "%s: ram data read parity error\n",
|
||||
dev->name);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: ram data read parity error\n",
|
||||
dev->name);
|
||||
/* Clear IRQ */
|
||||
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
|
||||
}
|
||||
|
||||
if (status & Y2_IS_PAR_WR1) {
|
||||
printk(KERN_ERR PFX "%s: ram data write parity error\n",
|
||||
dev->name);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: ram data write parity error\n",
|
||||
dev->name);
|
||||
|
||||
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
|
||||
}
|
||||
|
||||
if (status & Y2_IS_PAR_MAC1) {
|
||||
printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
|
||||
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
|
||||
}
|
||||
|
||||
if (status & Y2_IS_PAR_RX1) {
|
||||
printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
|
||||
sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
|
||||
}
|
||||
|
||||
if (status & Y2_IS_TCP_TXA1) {
|
||||
printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: TCP segmentation error\n",
|
||||
dev->name);
|
||||
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
|
||||
}
|
||||
}
|
||||
@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
|
||||
u16 pci_err;
|
||||
|
||||
pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
|
||||
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
|
||||
pci_name(hw->pdev), pci_err);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
|
||||
pci_name(hw->pdev), pci_err);
|
||||
|
||||
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
|
||||
pci_write_config_word(hw->pdev, PCI_STATUS,
|
||||
@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
|
||||
|
||||
pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
|
||||
|
||||
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
|
||||
pci_name(hw->pdev), pex_err);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
|
||||
pci_name(hw->pdev), pex_err);
|
||||
|
||||
/* clear the interrupt */
|
||||
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
|
||||
@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
|
||||
static u32 sky2_supported_modes(const struct sky2_hw *hw)
|
||||
{
|
||||
u32 modes;
|
||||
if (hw->copper) {
|
||||
@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
|
||||
return dev;
|
||||
}
|
||||
|
||||
static inline void sky2_show_addr(struct net_device *dev)
|
||||
static void __devinit sky2_show_addr(struct net_device *dev)
|
||||
{
|
||||
const struct sky2_port *sky2 = netdev_priv(dev);
|
||||
|
||||
@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
|
||||
if (sizeof(dma_addr_t) > sizeof(u32)) {
|
||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
||||
if (!err)
|
||||
using_dac = 1;
|
||||
}
|
||||
if (sizeof(dma_addr_t) > sizeof(u32) &&
|
||||
!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
|
||||
using_dac = 1;
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
|
||||
"for consistent allocations\n", pci_name(pdev));
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
|
||||
if (!using_dac) {
|
||||
} else {
|
||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX "%s no usable DMA configuration\n",
|
||||
@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
/* byte swap descriptors in hardware */
|
||||
{
|
||||
@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
||||
#endif
|
||||
|
||||
err = -ENOMEM;
|
||||
hw = kmalloc(sizeof(*hw), GFP_KERNEL);
|
||||
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
|
||||
if (!hw) {
|
||||
printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
|
||||
pci_name(pdev));
|
||||
goto err_out_free_regions;
|
||||
}
|
||||
|
||||
memset(hw, 0, sizeof(*hw));
|
||||
hw->pdev = pdev;
|
||||
|
||||
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
|
||||
|
@ -22,7 +22,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/delay.h>
|
||||
@ -30,6 +29,7 @@
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/ip.h>
|
||||
@ -43,6 +43,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/bitops.h>
|
||||
@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
|
||||
writel(value, card->regs + reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_write_reg_sync - writes to an SMMIO register of a card
|
||||
* @card: device structure
|
||||
* @reg: register to write to
|
||||
* @value: value to write into the specified SMMIO register
|
||||
*
|
||||
* Unlike spider_net_write_reg, this will also make sure the
|
||||
* data arrives on the card by reading the reg again.
|
||||
*/
|
||||
static void
|
||||
spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
|
||||
{
|
||||
value = cpu_to_le32(value);
|
||||
writel(value, card->regs + reg);
|
||||
(void)readl(card->regs + reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_rx_irq_off - switch off rx irq on this spider card
|
||||
* @card: device structure
|
||||
*
|
||||
* switches off rx irq by masking them out in the GHIINTnMSK register
|
||||
*/
|
||||
static void
|
||||
spider_net_rx_irq_off(struct spider_net_card *card)
|
||||
{
|
||||
u32 regvalue;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->intmask_lock, flags);
|
||||
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
|
||||
regvalue &= ~SPIDER_NET_RXINT;
|
||||
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
|
||||
spin_unlock_irqrestore(&card->intmask_lock, flags);
|
||||
}
|
||||
|
||||
/** spider_net_write_phy - write to phy register
|
||||
* @netdev: adapter to be written to
|
||||
* @mii_id: id of MII
|
||||
@ -198,6 +163,21 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
|
||||
return readvalue;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_rx_irq_off - switch off rx irq on this spider card
|
||||
* @card: device structure
|
||||
*
|
||||
* switches off rx irq by masking them out in the GHIINTnMSK register
|
||||
*/
|
||||
static void
|
||||
spider_net_rx_irq_off(struct spider_net_card *card)
|
||||
{
|
||||
u32 regvalue;
|
||||
|
||||
regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_rx_irq_on - switch on rx irq on this spider card
|
||||
* @card: device structure
|
||||
@ -208,51 +188,9 @@ static void
|
||||
spider_net_rx_irq_on(struct spider_net_card *card)
|
||||
{
|
||||
u32 regvalue;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->intmask_lock, flags);
|
||||
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
|
||||
regvalue |= SPIDER_NET_RXINT;
|
||||
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
|
||||
spin_unlock_irqrestore(&card->intmask_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_tx_irq_off - switch off tx irq on this spider card
|
||||
* @card: device structure
|
||||
*
|
||||
* switches off tx irq by masking them out in the GHIINTnMSK register
|
||||
*/
|
||||
static void
|
||||
spider_net_tx_irq_off(struct spider_net_card *card)
|
||||
{
|
||||
u32 regvalue;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->intmask_lock, flags);
|
||||
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
|
||||
regvalue &= ~SPIDER_NET_TXINT;
|
||||
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
|
||||
spin_unlock_irqrestore(&card->intmask_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_tx_irq_on - switch on tx irq on this spider card
|
||||
* @card: device structure
|
||||
*
|
||||
* switches on tx irq by enabling them in the GHIINTnMSK register
|
||||
*/
|
||||
static void
|
||||
spider_net_tx_irq_on(struct spider_net_card *card)
|
||||
{
|
||||
u32 regvalue;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->intmask_lock, flags);
|
||||
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
|
||||
regvalue |= SPIDER_NET_TXINT;
|
||||
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
|
||||
spin_unlock_irqrestore(&card->intmask_lock, flags);
|
||||
regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -326,9 +264,8 @@ static enum spider_net_descr_status
|
||||
spider_net_get_descr_status(struct spider_net_descr *descr)
|
||||
{
|
||||
u32 cmd_status;
|
||||
rmb();
|
||||
|
||||
cmd_status = descr->dmac_cmd_status;
|
||||
rmb();
|
||||
cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
|
||||
/* no need to mask out any bits, as cmd_status is 32 bits wide only
|
||||
* (and unsigned) */
|
||||
@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
|
||||
{
|
||||
u32 cmd_status;
|
||||
/* read the status */
|
||||
mb();
|
||||
cmd_status = descr->dmac_cmd_status;
|
||||
/* clean the upper 4 bits */
|
||||
cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
|
||||
@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
|
||||
cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
|
||||
/* and write it back */
|
||||
descr->dmac_cmd_status = cmd_status;
|
||||
wmb();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
|
||||
{
|
||||
int i;
|
||||
struct spider_net_descr *descr;
|
||||
dma_addr_t buf;
|
||||
|
||||
spin_lock_init(&card->chain_lock);
|
||||
atomic_set(&card->rx_chain_refill,0);
|
||||
|
||||
descr = start_descr;
|
||||
memset(descr, 0, sizeof(*descr) * no);
|
||||
@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
|
||||
for (i=0; i<no; i++, descr++) {
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
|
||||
descr->bus_addr =
|
||||
pci_map_single(card->pdev, descr,
|
||||
SPIDER_NET_DESCR_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
buf = pci_map_single(card->pdev, descr,
|
||||
SPIDER_NET_DESCR_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (descr->bus_addr == DMA_ERROR_CODE)
|
||||
if (buf == DMA_ERROR_CODE)
|
||||
goto iommu_error;
|
||||
|
||||
descr->bus_addr = buf;
|
||||
descr->next = descr + 1;
|
||||
descr->prev = descr - 1;
|
||||
|
||||
@ -439,7 +375,8 @@ iommu_error:
|
||||
for (i=0; i < no; i++, descr++)
|
||||
if (descr->bus_addr)
|
||||
pci_unmap_single(card->pdev, descr->bus_addr,
|
||||
SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
SPIDER_NET_DESCR_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
|
||||
if (descr->skb) {
|
||||
dev_kfree_skb(descr->skb);
|
||||
pci_unmap_single(card->pdev, descr->buf_addr,
|
||||
SPIDER_NET_MAX_MTU,
|
||||
SPIDER_NET_MAX_FRAME,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
descr = descr->next;
|
||||
@ -480,12 +417,13 @@ static int
|
||||
spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
struct spider_net_descr *descr)
|
||||
{
|
||||
dma_addr_t buf;
|
||||
int error = 0;
|
||||
int offset;
|
||||
int bufsize;
|
||||
|
||||
/* we need to round up the buffer size to a multiple of 128 */
|
||||
bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) &
|
||||
bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
|
||||
(~(SPIDER_NET_RXBUF_ALIGN - 1));
|
||||
|
||||
/* and we need to have it 128 byte aligned, therefore we allocate a
|
||||
@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
/* allocate an skb */
|
||||
descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
|
||||
if (!descr->skb) {
|
||||
if (net_ratelimit())
|
||||
if (netif_msg_rx_err(card))
|
||||
pr_err("Not enough memory to allocate "
|
||||
"rx buffer\n");
|
||||
if (netif_msg_rx_err(card) && net_ratelimit())
|
||||
pr_err("Not enough memory to allocate rx buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
descr->buf_size = bufsize;
|
||||
@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
if (offset)
|
||||
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
|
||||
/* io-mmu-map the skb */
|
||||
descr->buf_addr = pci_map_single(card->pdev, descr->skb->data,
|
||||
SPIDER_NET_MAX_MTU,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (descr->buf_addr == DMA_ERROR_CODE) {
|
||||
buf = pci_map_single(card->pdev, descr->skb->data,
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
|
||||
descr->buf_addr = buf;
|
||||
if (buf == DMA_ERROR_CODE) {
|
||||
dev_kfree_skb_any(descr->skb);
|
||||
if (netif_msg_rx_err(card))
|
||||
if (netif_msg_rx_err(card) && net_ratelimit())
|
||||
pr_err("Could not iommu-map rx buffer\n");
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
} else {
|
||||
@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_enable_rxctails - sets RX dmac chain tail addresses
|
||||
* spider_net_enable_rxchtails - sets RX dmac chain tail addresses
|
||||
* @card: card structure
|
||||
*
|
||||
* spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the
|
||||
* spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
|
||||
* chip by writing to the appropriate register. DMA is enabled in
|
||||
* spider_net_enable_rxdmac.
|
||||
*/
|
||||
@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
|
||||
static void
|
||||
spider_net_enable_rxdmac(struct spider_net_card *card)
|
||||
{
|
||||
wmb();
|
||||
spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
|
||||
SPIDER_NET_DMA_RX_VALUE);
|
||||
}
|
||||
@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
|
||||
* spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
|
||||
* @card: card structure
|
||||
*
|
||||
* refills descriptors in all chains (last used chain first): allocates skbs
|
||||
* and iommu-maps them.
|
||||
* refills descriptors in the rx chain: allocates skbs and iommu-maps them.
|
||||
*/
|
||||
static void
|
||||
spider_net_refill_rx_chain(struct spider_net_card *card)
|
||||
{
|
||||
struct spider_net_descr_chain *chain;
|
||||
int count = 0;
|
||||
unsigned long flags;
|
||||
|
||||
chain = &card->rx_chain;
|
||||
|
||||
spin_lock_irqsave(&card->chain_lock, flags);
|
||||
while (spider_net_get_descr_status(chain->head) ==
|
||||
SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
if (spider_net_prepare_rx_descr(card, chain->head))
|
||||
break;
|
||||
count++;
|
||||
chain->head = chain->head->next;
|
||||
}
|
||||
spin_unlock_irqrestore(&card->chain_lock, flags);
|
||||
/* one context doing the refill (and a second context seeing that
|
||||
* and omitting it) is ok. If called by NAPI, we'll be called again
|
||||
* as spider_net_decode_one_descr is called several times. If some
|
||||
* interrupt calls us, the NAPI is about to clean up anyway. */
|
||||
if (atomic_inc_return(&card->rx_chain_refill) == 1)
|
||||
while (spider_net_get_descr_status(chain->head) ==
|
||||
SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
if (spider_net_prepare_rx_descr(card, chain->head))
|
||||
break;
|
||||
chain->head = chain->head->next;
|
||||
}
|
||||
|
||||
/* could be optimized, only do that, if we know the DMA processing
|
||||
* has terminated */
|
||||
if (count)
|
||||
spider_net_enable_rxdmac(card);
|
||||
atomic_dec(&card->rx_chain_refill);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
|
||||
/* this will allocate the rest of the rx buffers; if not, it's
|
||||
* business as usual later on */
|
||||
spider_net_refill_rx_chain(card);
|
||||
spider_net_enable_rxdmac(card);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
|
||||
* @card: adapter structure
|
||||
* @brutal: if set, don't care about whether descriptor seems to be in use
|
||||
*
|
||||
* releases the tx descriptors that spider has finished with (if non-brutal)
|
||||
* or simply release tx descriptors (if brutal)
|
||||
* returns 0 if the tx ring is empty, otherwise 1.
|
||||
*
|
||||
* spider_net_release_tx_chain releases the tx descriptors that spider has
|
||||
* finished with (if non-brutal) or simply release tx descriptors (if brutal).
|
||||
* If some other context is calling this function, we return 1 so that we're
|
||||
* scheduled again (if we were scheduled) and will not loose initiative.
|
||||
*/
|
||||
static void
|
||||
static int
|
||||
spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
|
||||
{
|
||||
struct spider_net_descr_chain *tx_chain = &card->tx_chain;
|
||||
enum spider_net_descr_status status;
|
||||
|
||||
spider_net_tx_irq_off(card);
|
||||
if (atomic_inc_return(&card->tx_chain_release) != 1) {
|
||||
atomic_dec(&card->tx_chain_release);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* no lock for chain needed, if this is only executed once at a time */
|
||||
again:
|
||||
for (;;) {
|
||||
status = spider_net_get_descr_status(tx_chain->tail);
|
||||
switch (status) {
|
||||
case SPIDER_NET_DESCR_CARDOWNED:
|
||||
if (!brutal) goto out;
|
||||
if (!brutal)
|
||||
goto out;
|
||||
/* fallthrough, if we release the descriptors
|
||||
* brutally (then we don't care about
|
||||
* SPIDER_NET_DESCR_CARDOWNED) */
|
||||
@ -693,25 +633,30 @@ again:
|
||||
tx_chain->tail = tx_chain->tail->next;
|
||||
}
|
||||
out:
|
||||
atomic_dec(&card->tx_chain_release);
|
||||
|
||||
netif_wake_queue(card->netdev);
|
||||
|
||||
if (!brutal) {
|
||||
/* switch on tx irqs (while we are still in the interrupt
|
||||
* handler, so we don't get an interrupt), check again
|
||||
* for done descriptors. This results in fewer interrupts */
|
||||
spider_net_tx_irq_on(card);
|
||||
status = spider_net_get_descr_status(tx_chain->tail);
|
||||
switch (status) {
|
||||
case SPIDER_NET_DESCR_RESPONSE_ERROR:
|
||||
case SPIDER_NET_DESCR_PROTECTION_ERROR:
|
||||
case SPIDER_NET_DESCR_FORCE_END:
|
||||
case SPIDER_NET_DESCR_COMPLETE:
|
||||
goto again;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (status == SPIDER_NET_DESCR_CARDOWNED)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_cleanup_tx_ring - cleans up the TX ring
|
||||
* @card: card structure
|
||||
*
|
||||
* spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
|
||||
* interrupts to cleanup our TX ring) and returns sent packets to the stack
|
||||
* by freeing them
|
||||
*/
|
||||
static void
|
||||
spider_net_cleanup_tx_ring(struct spider_net_card *card)
|
||||
{
|
||||
if ( (spider_net_release_tx_chain(card, 0)) &&
|
||||
(card->netdev->flags & IFF_UP) ) {
|
||||
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -726,16 +671,22 @@ out:
|
||||
static u8
|
||||
spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
|
||||
{
|
||||
/* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
|
||||
* ff:ff:ff:ff:ff:ff must result in 0xfd */
|
||||
u32 crc;
|
||||
u8 hash;
|
||||
char addr_for_crc[ETH_ALEN] = { 0, };
|
||||
int i, bit;
|
||||
|
||||
crc = crc32_be(~0, addr, netdev->addr_len);
|
||||
for (i = 0; i < ETH_ALEN * 8; i++) {
|
||||
bit = (addr[i / 8] >> (i % 8)) & 1;
|
||||
addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
|
||||
}
|
||||
|
||||
crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
|
||||
|
||||
hash = (crc >> 27);
|
||||
hash <<= 3;
|
||||
hash |= crc & 7;
|
||||
hash &= 0xff;
|
||||
|
||||
return hash;
|
||||
}
|
||||
@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
|
||||
{
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
|
||||
tasklet_kill(&card->rxram_full_tl);
|
||||
netif_poll_disable(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
del_timer_sync(&card->tx_timer);
|
||||
|
||||
/* disable/mask all interrupts */
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
|
||||
@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
|
||||
* @skb: packet to consider
|
||||
*
|
||||
* fills out the command and status field of the descriptor structure,
|
||||
* depending on hardware checksum settings. This function assumes a wmb()
|
||||
* has executed before.
|
||||
* depending on hardware checksum settings.
|
||||
*/
|
||||
static void
|
||||
spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
/* make sure the other fields in the descriptor are written */
|
||||
wmb();
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_HW) {
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
|
||||
return;
|
||||
@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
|
||||
/* is packet ip?
|
||||
* if yes: tcp? udp? */
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (skb->nh.iph->protocol == IPPROTO_TCP) {
|
||||
if (skb->nh.iph->protocol == IPPROTO_TCP)
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
|
||||
} else if (skb->nh.iph->protocol == IPPROTO_UDP) {
|
||||
else if (skb->nh.iph->protocol == IPPROTO_UDP)
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
|
||||
} else { /* the stack should checksum non-tcp and non-udp
|
||||
packets on his own: NETIF_F_IP_CSUM */
|
||||
else /* the stack should checksum non-tcp and non-udp
|
||||
packets on his own: NETIF_F_IP_CSUM */
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
|
||||
struct spider_net_descr *descr,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
descr->buf_addr = pci_map_single(card->pdev, skb->data,
|
||||
skb->len, PCI_DMA_BIDIRECTIONAL);
|
||||
if (descr->buf_addr == DMA_ERROR_CODE) {
|
||||
if (netif_msg_tx_err(card))
|
||||
dma_addr_t buf;
|
||||
|
||||
buf = pci_map_single(card->pdev, skb->data,
|
||||
skb->len, PCI_DMA_BIDIRECTIONAL);
|
||||
if (buf == DMA_ERROR_CODE) {
|
||||
if (netif_msg_tx_err(card) && net_ratelimit())
|
||||
pr_err("could not iommu-map packet (%p, %i). "
|
||||
"Dropping packet\n", skb->data, skb->len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
descr->buf_addr = buf;
|
||||
descr->buf_size = skb->len;
|
||||
descr->skb = skb;
|
||||
descr->data_status = 0;
|
||||
|
||||
/* make sure the above values are in memory before we change the
|
||||
* status */
|
||||
wmb();
|
||||
|
||||
spider_net_set_txdescr_cmdstat(descr,skb);
|
||||
|
||||
return 0;
|
||||
@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
struct spider_net_descr *descr;
|
||||
int result;
|
||||
|
||||
spider_net_release_tx_chain(card, 0);
|
||||
|
||||
descr = spider_net_get_next_tx_descr(card);
|
||||
|
||||
if (!descr) {
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
descr = spider_net_get_next_tx_descr(card);
|
||||
if (!descr)
|
||||
goto error;
|
||||
else
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
if (!descr)
|
||||
goto error;
|
||||
|
||||
result = spider_net_prepare_tx_descr(card, descr, skb);
|
||||
if (result)
|
||||
@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
card->tx_chain.head = card->tx_chain.head->next;
|
||||
|
||||
/* make sure the status from spider_net_prepare_tx_descr is in
|
||||
* memory before we check out the previous descriptor */
|
||||
wmb();
|
||||
|
||||
if (spider_net_get_descr_status(descr->prev) !=
|
||||
SPIDER_NET_DESCR_CARDOWNED)
|
||||
spider_net_kick_tx_dma(card, descr);
|
||||
SPIDER_NET_DESCR_CARDOWNED) {
|
||||
/* make sure the current descriptor is in memory. Then
|
||||
* kicking it on again makes sense, if the previous is not
|
||||
* card-owned anymore. Check the previous descriptor twice
|
||||
* to omit an mb() in heavy traffic cases */
|
||||
mb();
|
||||
if (spider_net_get_descr_status(descr->prev) !=
|
||||
SPIDER_NET_DESCR_CARDOWNED)
|
||||
spider_net_kick_tx_dma(card, descr);
|
||||
}
|
||||
|
||||
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
error:
|
||||
card->netdev_stats.tx_dropped++;
|
||||
return NETDEV_TX_LOCKED;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
* spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
|
||||
* @descr: descriptor to process
|
||||
* @card: card structure
|
||||
* @napi: whether caller is in NAPI context
|
||||
*
|
||||
* returns 1 on success, 0 if no packet was passed to the stack
|
||||
*
|
||||
@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
|
||||
*/
|
||||
static int
|
||||
spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
struct spider_net_card *card)
|
||||
struct spider_net_card *card, int napi)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct net_device *netdev;
|
||||
@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
|
||||
netdev = card->netdev;
|
||||
|
||||
/* check for errors in the data_error flag */
|
||||
if ((data_error & SPIDER_NET_DATA_ERROR_MASK) &&
|
||||
netif_msg_rx_err(card))
|
||||
pr_err("error in received descriptor found, "
|
||||
"data_status=x%08x, data_error=x%08x\n",
|
||||
data_status, data_error);
|
||||
|
||||
/* prepare skb, unmap descriptor */
|
||||
skb = descr->skb;
|
||||
pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
|
||||
/* unmap descriptor */
|
||||
pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
/* the cases we'll throw away the packet immediately */
|
||||
if (data_error & SPIDER_NET_DESTROY_RX_FLAGS)
|
||||
if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
|
||||
if (netif_msg_rx_err(card))
|
||||
pr_err("error in received descriptor found, "
|
||||
"data_status=x%08x, data_error=x%08x\n",
|
||||
data_status, data_error);
|
||||
return 0;
|
||||
}
|
||||
|
||||
skb = descr->skb;
|
||||
skb->dev = netdev;
|
||||
skb_put(skb, descr->valid_size);
|
||||
|
||||
@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
|
||||
/* checksum offload */
|
||||
if (card->options.rx_csum) {
|
||||
if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) &&
|
||||
(!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) )
|
||||
if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
|
||||
SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
|
||||
!(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
} else {
|
||||
} else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
}
|
||||
|
||||
if (data_status & SPIDER_NET_VLAN_PACKET) {
|
||||
/* further enhancements: HW-accel VLAN
|
||||
@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
}
|
||||
|
||||
/* pass skb up to stack */
|
||||
netif_receive_skb(skb);
|
||||
if (napi)
|
||||
netif_receive_skb(skb);
|
||||
else
|
||||
netif_rx_ni(skb);
|
||||
|
||||
/* update netdevice statistics */
|
||||
card->netdev_stats.rx_packets++;
|
||||
@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_decode_descr - processes an rx descriptor
|
||||
* spider_net_decode_one_descr - processes an rx descriptor
|
||||
* @card: card structure
|
||||
* @napi: whether caller is in NAPI context
|
||||
*
|
||||
* returns 1 if a packet has been sent to the stack, otherwise 0
|
||||
*
|
||||
* processes an rx descriptor by iommu-unmapping the data buffer and passing
|
||||
* the packet up to the stack
|
||||
* the packet up to the stack. This function is called in softirq
|
||||
* context, e.g. either bottom half from interrupt or NAPI polling context
|
||||
*/
|
||||
static int
|
||||
spider_net_decode_one_descr(struct spider_net_card *card)
|
||||
spider_net_decode_one_descr(struct spider_net_card *card, int napi)
|
||||
{
|
||||
enum spider_net_descr_status status;
|
||||
struct spider_net_descr *descr;
|
||||
@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
|
||||
|
||||
if (status == SPIDER_NET_DESCR_CARDOWNED) {
|
||||
/* nothing in the descriptor yet */
|
||||
return 0;
|
||||
result=0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
/* not initialized yet, I bet chain->tail == chain->head
|
||||
* and the ring is empty */
|
||||
/* not initialized yet, the ring must be empty */
|
||||
spider_net_refill_rx_chain(card);
|
||||
return 0;
|
||||
spider_net_enable_rxdmac(card);
|
||||
result=0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* descriptor definitively used -- move on head */
|
||||
/* descriptor definitively used -- move on tail */
|
||||
chain->tail = descr->next;
|
||||
|
||||
result = 0;
|
||||
@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
|
||||
pr_err("%s: dropping RX descriptor with state %d\n",
|
||||
card->netdev->name, status);
|
||||
card->netdev_stats.rx_dropped++;
|
||||
pci_unmap_single(card->pdev, descr->buf_addr,
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
|
||||
dev_kfree_skb_irq(descr->skb);
|
||||
goto refill;
|
||||
}
|
||||
|
||||
@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
|
||||
}
|
||||
|
||||
/* ok, we've got a packet in descr */
|
||||
result = spider_net_pass_skb_up(descr, card);
|
||||
result = spider_net_pass_skb_up(descr, card, napi);
|
||||
refill:
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
/* change the descriptor state: */
|
||||
spider_net_refill_rx_chain(card);
|
||||
|
||||
if (!napi)
|
||||
spider_net_refill_rx_chain(card);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
|
||||
packets_to_do = min(*budget, netdev->quota);
|
||||
|
||||
while (packets_to_do) {
|
||||
if (spider_net_decode_one_descr(card)) {
|
||||
if (spider_net_decode_one_descr(card, 1)) {
|
||||
packets_done++;
|
||||
packets_to_do--;
|
||||
} else {
|
||||
@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
|
||||
|
||||
netdev->quota -= packets_done;
|
||||
*budget -= packets_done;
|
||||
spider_net_refill_rx_chain(card);
|
||||
|
||||
/* if all packets are in the stack, enable interrupts and return 0 */
|
||||
/* if not, return 1 */
|
||||
@ -1341,6 +1306,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
|
||||
card->tx_chain.tail->bus_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
|
||||
* @card: card structure
|
||||
*
|
||||
* spider_net_handle_rxram_full empties the RX ring so that spider can put
|
||||
* more packets in it and empty its RX RAM. This is called in bottom half
|
||||
* context
|
||||
*/
|
||||
static void
|
||||
spider_net_handle_rxram_full(struct spider_net_card *card)
|
||||
{
|
||||
while (spider_net_decode_one_descr(card, 0))
|
||||
;
|
||||
spider_net_enable_rxchtails(card);
|
||||
spider_net_enable_rxdmac(card);
|
||||
netif_rx_schedule(card->netdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_handle_error_irq - handles errors raised by an interrupt
|
||||
* @card: card structure
|
||||
@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
|
||||
switch (i)
|
||||
{
|
||||
case SPIDER_NET_GTMFLLINT:
|
||||
if (netif_msg_intr(card))
|
||||
if (netif_msg_intr(card) && net_ratelimit())
|
||||
pr_err("Spider TX RAM full\n");
|
||||
show_error = 0;
|
||||
break;
|
||||
case SPIDER_NET_GRFDFLLINT: /* fallthrough */
|
||||
case SPIDER_NET_GRFCFLLINT: /* fallthrough */
|
||||
case SPIDER_NET_GRFBFLLINT: /* fallthrough */
|
||||
case SPIDER_NET_GRFAFLLINT: /* fallthrough */
|
||||
case SPIDER_NET_GRMFLLINT:
|
||||
if (netif_msg_intr(card))
|
||||
if (netif_msg_intr(card) && net_ratelimit())
|
||||
pr_err("Spider RX RAM full, incoming packets "
|
||||
"might be discarded !\n");
|
||||
netif_rx_schedule(card->netdev);
|
||||
spider_net_enable_rxchtails(card);
|
||||
spider_net_enable_rxdmac(card);
|
||||
"might be discarded!\n");
|
||||
spider_net_rx_irq_off(card);
|
||||
tasklet_schedule(&card->rxram_full_tl);
|
||||
show_error = 0;
|
||||
break;
|
||||
|
||||
/* case SPIDER_NET_GTMSHTINT: problem, print a message */
|
||||
@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
|
||||
/* allrighty. tx from previous descr ok */
|
||||
show_error = 0;
|
||||
break;
|
||||
/* case SPIDER_NET_GRFDFLLINT: print a message down there */
|
||||
/* case SPIDER_NET_GRFCFLLINT: print a message down there */
|
||||
/* case SPIDER_NET_GRFBFLLINT: print a message down there */
|
||||
/* case SPIDER_NET_GRFAFLLINT: print a message down there */
|
||||
|
||||
/* chain end */
|
||||
case SPIDER_NET_GDDDCEINT: /* fallthrough */
|
||||
@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
|
||||
"restarting DMAC %c.\n",
|
||||
'D'+i-SPIDER_NET_GDDDCEINT);
|
||||
spider_net_refill_rx_chain(card);
|
||||
spider_net_enable_rxdmac(card);
|
||||
show_error = 0;
|
||||
break;
|
||||
|
||||
@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
|
||||
case SPIDER_NET_GDAINVDINT:
|
||||
/* could happen when rx chain is full */
|
||||
spider_net_refill_rx_chain(card);
|
||||
spider_net_enable_rxdmac(card);
|
||||
show_error = 0;
|
||||
break;
|
||||
|
||||
@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
|
||||
if (!status_reg)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (status_reg & SPIDER_NET_TXINT)
|
||||
spider_net_release_tx_chain(card, 0);
|
||||
|
||||
if (status_reg & SPIDER_NET_RXINT ) {
|
||||
spider_net_rx_irq_off(card);
|
||||
netif_rx_schedule(netdev);
|
||||
}
|
||||
|
||||
/* we do this after rx and tx processing, as we want the tx chain
|
||||
* processed to see, whether we should restart tx dma processing */
|
||||
spider_net_handle_error_irq(card, status_reg);
|
||||
if (status_reg & SPIDER_NET_ERRINT )
|
||||
spider_net_handle_error_irq(card, status_reg);
|
||||
|
||||
/* clear interrupt sources */
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
|
||||
@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
|
||||
/**
|
||||
* spider_net_download_firmware - loads firmware into the adapter
|
||||
* @card: card structure
|
||||
* @firmware: firmware pointer
|
||||
* @firmware_ptr: pointer to firmware data
|
||||
*
|
||||
* spider_net_download_firmware loads the firmware opened by
|
||||
* spider_net_init_firmware into the adapter.
|
||||
* spider_net_download_firmware loads the firmware data into the
|
||||
* adapter. It assumes the length etc. to be allright.
|
||||
*/
|
||||
static void
|
||||
static int
|
||||
spider_net_download_firmware(struct spider_net_card *card,
|
||||
const struct firmware *firmware)
|
||||
u8 *firmware_ptr)
|
||||
{
|
||||
int sequencer, i;
|
||||
u32 *fw_ptr = (u32 *)firmware->data;
|
||||
u32 *fw_ptr = (u32 *)firmware_ptr;
|
||||
|
||||
/* stop sequencers */
|
||||
spider_net_write_reg(card, SPIDER_NET_GSINIT,
|
||||
SPIDER_NET_STOP_SEQ_VALUE);
|
||||
|
||||
for (sequencer = 0; sequencer < 6; sequencer++) {
|
||||
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
|
||||
sequencer++) {
|
||||
spider_net_write_reg(card,
|
||||
SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
|
||||
for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
|
||||
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
|
||||
sequencer * 8, *fw_ptr);
|
||||
fw_ptr++;
|
||||
}
|
||||
}
|
||||
|
||||
if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
|
||||
return -EIO;
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GSINIT,
|
||||
SPIDER_NET_RUN_SEQ_VALUE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
|
||||
static int
|
||||
spider_net_init_firmware(struct spider_net_card *card)
|
||||
{
|
||||
const struct firmware *firmware;
|
||||
int err = -EIO;
|
||||
struct firmware *firmware = NULL;
|
||||
struct device_node *dn;
|
||||
u8 *fw_prop = NULL;
|
||||
int err = -ENOENT;
|
||||
int fw_size;
|
||||
|
||||
if (request_firmware(&firmware,
|
||||
SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) {
|
||||
if (netif_msg_probe(card))
|
||||
pr_err("Couldn't read in sequencer data file %s.\n",
|
||||
SPIDER_NET_FIRMWARE_NAME);
|
||||
firmware = NULL;
|
||||
goto out;
|
||||
if (request_firmware((const struct firmware **)&firmware,
|
||||
SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
|
||||
if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
|
||||
netif_msg_probe(card) ) {
|
||||
pr_err("Incorrect size of spidernet firmware in " \
|
||||
"filesystem. Looking in host firmware...\n");
|
||||
goto try_host_fw;
|
||||
}
|
||||
err = spider_net_download_firmware(card, firmware->data);
|
||||
|
||||
release_firmware(firmware);
|
||||
if (err)
|
||||
goto try_host_fw;
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) {
|
||||
if (netif_msg_probe(card))
|
||||
pr_err("Invalid size of sequencer data file %s.\n",
|
||||
SPIDER_NET_FIRMWARE_NAME);
|
||||
goto out;
|
||||
try_host_fw:
|
||||
dn = pci_device_to_OF_node(card->pdev);
|
||||
if (!dn)
|
||||
goto out_err;
|
||||
|
||||
fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
|
||||
if (!fw_prop)
|
||||
goto out_err;
|
||||
|
||||
if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
|
||||
netif_msg_probe(card) ) {
|
||||
pr_err("Incorrect size of spidernet firmware in " \
|
||||
"host firmware\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
spider_net_download_firmware(card, firmware);
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
release_firmware(firmware);
|
||||
err = spider_net_download_firmware(card, fw_prop);
|
||||
|
||||
done:
|
||||
return err;
|
||||
out_err:
|
||||
if (netif_msg_probe(card))
|
||||
pr_err("Couldn't find spidernet firmware in filesystem " \
|
||||
"or host firmware\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
|
||||
SPIDER_NET_CKRCTRL_RUN_VALUE);
|
||||
|
||||
/* empty sequencer data */
|
||||
for (sequencer = 0; sequencer < 6; sequencer++) {
|
||||
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
|
||||
sequencer++) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
|
||||
sequencer * 8, 0x0);
|
||||
for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
|
||||
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
|
||||
sequencer * 8, 0x0);
|
||||
}
|
||||
@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
|
||||
SET_NETDEV_DEV(netdev, &card->pdev->dev);
|
||||
|
||||
pci_set_drvdata(card->pdev, netdev);
|
||||
spin_lock_init(&card->intmask_lock);
|
||||
|
||||
atomic_set(&card->tx_chain_release,0);
|
||||
card->rxram_full_tl.data = (unsigned long) card;
|
||||
card->rxram_full_tl.func =
|
||||
(void (*)(unsigned long)) spider_net_handle_rxram_full;
|
||||
init_timer(&card->tx_timer);
|
||||
card->tx_timer.function =
|
||||
(void (*)(unsigned long)) spider_net_cleanup_tx_ring;
|
||||
card->tx_timer.data = (unsigned long) card;
|
||||
netdev->irq = card->pdev->irq;
|
||||
|
||||
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
|
||||
|
@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
|
||||
|
||||
extern char spider_net_driver_name[];
|
||||
|
||||
#define SPIDER_NET_MAX_MTU 2308
|
||||
#define SPIDER_NET_MAX_FRAME 2312
|
||||
#define SPIDER_NET_MAX_MTU 2294
|
||||
#define SPIDER_NET_MIN_MTU 64
|
||||
|
||||
#define SPIDER_NET_RXBUF_ALIGN 128
|
||||
|
||||
#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64
|
||||
#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
|
||||
#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
|
||||
#define SPIDER_NET_RX_DESCRIPTORS_MAX 256
|
||||
#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
|
||||
|
||||
#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64
|
||||
#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
|
||||
#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
|
||||
#define SPIDER_NET_TX_DESCRIPTORS_MAX 256
|
||||
#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
|
||||
|
||||
#define SPIDER_NET_TX_TIMER 20
|
||||
|
||||
#define SPIDER_NET_RX_CSUM_DEFAULT 1
|
||||
|
||||
#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ
|
||||
#define SPIDER_NET_NAPI_WEIGHT 64
|
||||
#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
|
||||
#define SPIDER_NET_NAPI_WEIGHT 64
|
||||
|
||||
#define SPIDER_NET_FIRMWARE_LEN 1024
|
||||
#define SPIDER_NET_FIRMWARE_SEQS 6
|
||||
#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
|
||||
#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
|
||||
SPIDER_NET_FIRMWARE_SEQWORDS * \
|
||||
sizeof(u32))
|
||||
#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
|
||||
|
||||
/** spider_net SMMIO registers */
|
||||
@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
|
||||
/** SCONFIG registers */
|
||||
#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
|
||||
|
||||
/** hardcoded register values */
|
||||
#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff
|
||||
#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff
|
||||
/** interrupt mask registers */
|
||||
#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
|
||||
#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7
|
||||
/* no MAC aborts -> auto retransmission */
|
||||
#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1
|
||||
#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
|
||||
|
||||
/* clear counter when interrupt sources are cleared
|
||||
#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
|
||||
/* we rely on flagged descriptor interrupts */
|
||||
#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
|
||||
/* set this first, then the FRAMENUM_VALUE */
|
||||
@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
|
||||
#if 0
|
||||
#define SPIDER_NET_WOL_VALUE 0x00000000
|
||||
#endif
|
||||
#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8
|
||||
#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
|
||||
|
||||
/* pause frames: automatic, no upper retransmission count */
|
||||
/* outside loopback mode: ETOMOD signal dont matter, not connected */
|
||||
@ -318,6 +323,10 @@ enum spider_net_int2_status {
|
||||
#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
|
||||
(1 << SPIDER_NET_GRMFLLINT) )
|
||||
|
||||
#define SPIDER_NET_ERRINT ( 0xffffffff & \
|
||||
(~SPIDER_NET_TXINT) & \
|
||||
(~SPIDER_NET_RXINT) )
|
||||
|
||||
#define SPIDER_NET_GPREXEC 0x80000000
|
||||
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
|
||||
|
||||
@ -358,9 +367,6 @@ enum spider_net_int2_status {
|
||||
/* descr ready, descr is in middle of chain, get interrupt on completion */
|
||||
#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
|
||||
|
||||
/* multicast is no problem */
|
||||
#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
|
||||
|
||||
enum spider_net_descr_status {
|
||||
SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
|
||||
SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
|
||||
@ -373,9 +379,9 @@ enum spider_net_descr_status {
|
||||
|
||||
struct spider_net_descr {
|
||||
/* as defined by the hardware */
|
||||
dma_addr_t buf_addr;
|
||||
u32 buf_addr;
|
||||
u32 buf_size;
|
||||
dma_addr_t next_descr_addr;
|
||||
u32 next_descr_addr;
|
||||
u32 dmac_cmd_status;
|
||||
u32 result_size;
|
||||
u32 valid_size; /* all zeroes for tx */
|
||||
@ -384,7 +390,7 @@ struct spider_net_descr {
|
||||
|
||||
/* used in the driver */
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t bus_addr;
|
||||
u32 bus_addr;
|
||||
struct spider_net_descr *next;
|
||||
struct spider_net_descr *prev;
|
||||
} __attribute__((aligned(32)));
|
||||
@ -396,21 +402,21 @@ struct spider_net_descr_chain {
|
||||
};
|
||||
|
||||
/* descriptor data_status bits */
|
||||
#define SPIDER_NET_RXIPCHK 29
|
||||
#define SPIDER_NET_TCPUDPIPCHK 28
|
||||
#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
|
||||
1 << SPIDER_NET_TCPUDPIPCHK)
|
||||
|
||||
#define SPIDER_NET_RX_IPCHK 29
|
||||
#define SPIDER_NET_RX_TCPCHK 28
|
||||
#define SPIDER_NET_VLAN_PACKET 21
|
||||
#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
|
||||
(1 << SPIDER_NET_RX_TCPCHK) )
|
||||
|
||||
/* descriptor data_error bits */
|
||||
#define SPIDER_NET_RXIPCHKERR 27
|
||||
#define SPIDER_NET_RXTCPCHKERR 26
|
||||
#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
|
||||
1 << SPIDER_NET_RXTCPCHKERR)
|
||||
#define SPIDER_NET_RX_IPCHKERR 27
|
||||
#define SPIDER_NET_RX_RXTCPCHKERR 28
|
||||
|
||||
/* the cases we don't pass the packet to the stack */
|
||||
#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000
|
||||
#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
|
||||
|
||||
/* the cases we don't pass the packet to the stack.
|
||||
* 701b8000 would be correct, but every packets gets that flag */
|
||||
#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
|
||||
|
||||
#define SPIDER_NET_DESCR_SIZE 32
|
||||
|
||||
@ -445,13 +451,16 @@ struct spider_net_card {
|
||||
|
||||
struct spider_net_descr_chain tx_chain;
|
||||
struct spider_net_descr_chain rx_chain;
|
||||
spinlock_t chain_lock;
|
||||
atomic_t rx_chain_refill;
|
||||
atomic_t tx_chain_release;
|
||||
|
||||
struct net_device_stats netdev_stats;
|
||||
|
||||
struct spider_net_options options;
|
||||
|
||||
spinlock_t intmask_lock;
|
||||
struct tasklet_struct rxram_full_tl;
|
||||
struct timer_list tx_timer;
|
||||
|
||||
struct work_struct tx_timeout_task;
|
||||
atomic_t tx_timeout_task_counter;
|
||||
|
@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
spider_net_ethtool_get_tx_csum(struct net_device *netdev)
|
||||
{
|
||||
return (netdev->features & NETIF_F_HW_CSUM) != 0;
|
||||
}
|
||||
|
||||
static int
|
||||
spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
|
||||
{
|
||||
if (data)
|
||||
netdev->features |= NETIF_F_HW_CSUM;
|
||||
else
|
||||
netdev->features &= ~NETIF_F_HW_CSUM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ethtool_ops spider_net_ethtool_ops = {
|
||||
.get_settings = spider_net_ethtool_get_settings,
|
||||
.get_drvinfo = spider_net_ethtool_get_drvinfo,
|
||||
@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
|
||||
.nway_reset = spider_net_ethtool_nway_reset,
|
||||
.get_rx_csum = spider_net_ethtool_get_rx_csum,
|
||||
.set_rx_csum = spider_net_ethtool_set_rx_csum,
|
||||
.get_tx_csum = spider_net_ethtool_get_tx_csum,
|
||||
.set_tx_csum = spider_net_ethtool_set_tx_csum,
|
||||
};
|
||||
|
||||
|
@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
|
||||
int channel = fwrq->m;
|
||||
/* We should do a better check than that,
|
||||
* based on the card capability !!! */
|
||||
if((channel < 1) || (channel > 16)) {
|
||||
if((channel < 1) || (channel > 14)) {
|
||||
printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
|
||||
rc = -EINVAL;
|
||||
} else {
|
||||
readConfigRid(local, 1);
|
||||
/* Yes ! We can set it !!! */
|
||||
local->config.channelSet = (u16)(channel - 1);
|
||||
local->config.channelSet = (u16) channel;
|
||||
set_bit (FLAG_COMMIT, &local->flags);
|
||||
}
|
||||
}
|
||||
@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
|
||||
{
|
||||
struct airo_info *local = dev->priv;
|
||||
StatusRid status_rid; /* Card status info */
|
||||
int ch;
|
||||
|
||||
readConfigRid(local, 1);
|
||||
if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
|
||||
@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
|
||||
else
|
||||
readStatusRid(local, &status_rid, 1);
|
||||
|
||||
#ifdef WEXT_USECHANNELS
|
||||
fwrq->m = ((int)status_rid.channel) + 1;
|
||||
fwrq->e = 0;
|
||||
#else
|
||||
{
|
||||
int f = (int)status_rid.channel;
|
||||
fwrq->m = frequency_list[f] * 100000;
|
||||
ch = (int)status_rid.channel;
|
||||
if((ch > 0) && (ch < 15)) {
|
||||
fwrq->m = frequency_list[ch - 1] * 100000;
|
||||
fwrq->e = 1;
|
||||
} else {
|
||||
fwrq->m = ch;
|
||||
fwrq->e = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
|
||||
/* If none, we may want to get the one that was set */
|
||||
|
||||
/* Push it out ! */
|
||||
dwrq->length = status_rid.SSIDlen + 1;
|
||||
dwrq->length = status_rid.SSIDlen;
|
||||
dwrq->flags = 1; /* active */
|
||||
|
||||
return 0;
|
||||
|
@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
|
||||
if (priv->new_SSID_size != 0) {
|
||||
memcpy(extra, priv->new_SSID, priv->new_SSID_size);
|
||||
extra[priv->new_SSID_size] = '\0';
|
||||
dwrq->length = priv->new_SSID_size + 1;
|
||||
dwrq->length = priv->new_SSID_size;
|
||||
} else {
|
||||
memcpy(extra, priv->SSID, priv->SSID_size);
|
||||
extra[priv->SSID_size] = '\0';
|
||||
dwrq->length = priv->SSID_size + 1;
|
||||
dwrq->length = priv->SSID_size;
|
||||
}
|
||||
|
||||
dwrq->flags = !priv->connect_to_any_BSS; /* active */
|
||||
|
@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
|
||||
depends on HOSTAP
|
||||
---help---
|
||||
Configure Host AP driver to include support for firmware image
|
||||
download. Current version supports only downloading to volatile, i.e.,
|
||||
RAM memory. Flash upgrade is not yet supported.
|
||||
download. This option by itself only enables downloading to the
|
||||
volatile memory, i.e. the card RAM. This option is required to
|
||||
support cards that don't have firmware in flash, such as D-Link
|
||||
DWL-520 rev E and D-Link DWL-650 rev P.
|
||||
|
||||
Firmware image downloading needs user space tool, prism2_srec. It is
|
||||
available from http://hostap.epitest.fi/.
|
||||
Firmware image downloading needs a user space tool, prism2_srec.
|
||||
It is available from http://hostap.epitest.fi/.
|
||||
|
||||
config HOSTAP_FIRMWARE_NVRAM
|
||||
bool "Support for non-volatile firmware download"
|
||||
depends on HOSTAP_FIRMWARE
|
||||
---help---
|
||||
Allow Host AP driver to write firmware images to the non-volatile
|
||||
card memory, i.e. flash memory that survives power cycling.
|
||||
Enable this option if you want to be able to change card firmware
|
||||
permanently.
|
||||
|
||||
Firmware image downloading needs a user space tool, prism2_srec.
|
||||
It is available from http://hostap.epitest.fi/.
|
||||
|
||||
config HOSTAP_PLX
|
||||
tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
|
||||
|
@ -1,4 +1,5 @@
|
||||
hostap-y := hostap_main.o
|
||||
hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
|
||||
hostap_ioctl.o hostap_main.o hostap_proc.o
|
||||
obj-$(CONFIG_HOSTAP) += hostap.o
|
||||
|
||||
obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
|
||||
|
@ -1,6 +1,15 @@
|
||||
#ifndef HOSTAP_H
|
||||
#define HOSTAP_H
|
||||
|
||||
#include <linux/ethtool.h>
|
||||
|
||||
#include "hostap_wlan.h"
|
||||
#include "hostap_ap.h"
|
||||
|
||||
static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
|
||||
2447, 2452, 2457, 2462, 2467, 2472, 2484 };
|
||||
#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
|
||||
|
||||
/* hostap.c */
|
||||
|
||||
extern struct proc_dir_entry *hostap_proc;
|
||||
@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
|
||||
int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
|
||||
u8 *body, size_t bodylen);
|
||||
int prism2_sta_deauth(local_info_t *local, u16 reason);
|
||||
int prism2_wds_add(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked);
|
||||
int prism2_wds_del(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked, int do_not_remove);
|
||||
|
||||
|
||||
/* hostap_ap.c */
|
||||
|
||||
int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
|
||||
int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
|
||||
void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
|
||||
int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
|
||||
void ap_control_kickall(struct ap_data *ap);
|
||||
void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
|
||||
struct ieee80211_crypt_data ***crypt);
|
||||
int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
|
||||
struct iw_quality qual[], int buf_size,
|
||||
int aplist);
|
||||
int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
|
||||
int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
|
||||
|
||||
|
||||
/* hostap_proc.c */
|
||||
@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
|
||||
void hostap_info_process(local_info_t *local, struct sk_buff *skb);
|
||||
|
||||
|
||||
/* hostap_ioctl.c */
|
||||
|
||||
extern const struct iw_handler_def hostap_iw_handler_def;
|
||||
extern struct ethtool_ops prism2_ethtool_ops;
|
||||
|
||||
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
||||
|
||||
|
||||
#endif /* HOSTAP_H */
|
||||
|
@ -1,6 +1,9 @@
|
||||
#ifndef HOSTAP_80211_H
|
||||
#define HOSTAP_80211_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <net/ieee80211_crypt.h>
|
||||
|
||||
struct hostap_ieee80211_mgmt {
|
||||
u16 frame_control;
|
||||
u16 duration;
|
||||
|
@ -1,7 +1,18 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <net/ieee80211_crypt.h>
|
||||
|
||||
#include "hostap_80211.h"
|
||||
#include "hostap.h"
|
||||
#include "hostap_ap.h"
|
||||
|
||||
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
|
||||
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
|
||||
static unsigned char rfc1042_header[] =
|
||||
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
|
||||
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
|
||||
static unsigned char bridge_tunnel_header[] =
|
||||
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
|
||||
/* No encapsulation header if EtherType < 0x600 (=length) */
|
||||
|
||||
void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
|
||||
struct hostap_80211_rx_status *rx_stats)
|
||||
|
@ -1,3 +1,18 @@
|
||||
#include "hostap_80211.h"
|
||||
#include "hostap_common.h"
|
||||
#include "hostap_wlan.h"
|
||||
#include "hostap.h"
|
||||
#include "hostap_ap.h"
|
||||
|
||||
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
|
||||
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
|
||||
static unsigned char rfc1042_header[] =
|
||||
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
|
||||
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
|
||||
static unsigned char bridge_tunnel_header[] =
|
||||
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
|
||||
/* No encapsulation header if EtherType < 0x600 (=length) */
|
||||
|
||||
void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr_4addr *hdr;
|
||||
|
@ -16,6 +16,14 @@
|
||||
* (8802.11: 5.5)
|
||||
*/
|
||||
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include "hostap_wlan.h"
|
||||
#include "hostap.h"
|
||||
#include "hostap_ap.h"
|
||||
|
||||
static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
|
||||
DEF_INTS };
|
||||
module_param_array(other_ap_policy, int, NULL, 0444);
|
||||
@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
|
||||
}
|
||||
|
||||
|
||||
static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
|
||||
u8 *mac)
|
||||
int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
|
||||
{
|
||||
struct mac_entry *entry;
|
||||
|
||||
@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
|
||||
}
|
||||
|
||||
|
||||
static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
|
||||
u8 *mac)
|
||||
int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
|
||||
{
|
||||
struct list_head *ptr;
|
||||
struct mac_entry *entry;
|
||||
@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
|
||||
}
|
||||
|
||||
|
||||
static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
|
||||
void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
|
||||
{
|
||||
struct list_head *ptr, *n;
|
||||
struct mac_entry *entry;
|
||||
@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
|
||||
}
|
||||
|
||||
|
||||
static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
|
||||
u8 *mac)
|
||||
int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
|
||||
{
|
||||
struct sta_info *sta;
|
||||
u16 resp;
|
||||
@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
|
||||
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
|
||||
|
||||
|
||||
static void ap_control_kickall(struct ap_data *ap)
|
||||
void ap_control_kickall(struct ap_data *ap)
|
||||
{
|
||||
struct list_head *ptr, *n;
|
||||
struct sta_info *sta;
|
||||
@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
|
||||
}
|
||||
|
||||
|
||||
static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
|
||||
struct iw_quality qual[], int buf_size,
|
||||
int aplist)
|
||||
int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
|
||||
struct iw_quality qual[], int buf_size,
|
||||
int aplist)
|
||||
{
|
||||
struct ap_data *ap = local->ap;
|
||||
struct list_head *ptr;
|
||||
@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
|
||||
|
||||
/* Translate our list of Access Points & Stations to a card independant
|
||||
* format that the Wireless Tools will understand - Jean II */
|
||||
static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
|
||||
int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
|
||||
{
|
||||
struct hostap_interface *iface;
|
||||
local_info_t *local;
|
||||
@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
|
||||
}
|
||||
|
||||
|
||||
static int prism2_hostapd(struct ap_data *ap,
|
||||
struct prism2_hostapd_param *param)
|
||||
int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
|
||||
{
|
||||
switch (param->cmd) {
|
||||
case PRISM2_HOSTAPD_FLUSH:
|
||||
@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
|
||||
}
|
||||
|
||||
|
||||
static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
|
||||
struct ieee80211_crypt_data ***crypt)
|
||||
void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
|
||||
struct ieee80211_crypt_data ***crypt)
|
||||
{
|
||||
struct sta_info *sta;
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef HOSTAP_AP_H
|
||||
#define HOSTAP_AP_H
|
||||
|
||||
#include "hostap_80211.h"
|
||||
|
||||
/* AP data structures for STAs */
|
||||
|
||||
/* maximum number of frames to buffer per STA */
|
||||
|
@ -1,6 +1,9 @@
|
||||
#ifndef HOSTAP_COMMON_H
|
||||
#define HOSTAP_COMMON_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
#define BIT(x) (1 << (x))
|
||||
|
||||
#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
|
||||
|
@ -21,15 +21,10 @@
|
||||
#define PRISM2_DOWNLOAD_SUPPORT
|
||||
#endif
|
||||
|
||||
#ifdef PRISM2_DOWNLOAD_SUPPORT
|
||||
/* Allow writing firmware images into flash, i.e., to non-volatile storage.
|
||||
* Before you enable this option, you should make absolutely sure that you are
|
||||
* using prism2_srec utility that comes with THIS version of the driver!
|
||||
* In addition, please note that it is possible to kill your card with
|
||||
* non-volatile download if you are using incorrect image. This feature has not
|
||||
* been fully tested, so please be careful with it. */
|
||||
/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
|
||||
#endif /* PRISM2_DOWNLOAD_SUPPORT */
|
||||
/* Allow kernel configuration to enable non-volatile download support. */
|
||||
#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
|
||||
#define PRISM2_NON_VOLATILE_DOWNLOAD
|
||||
#endif
|
||||
|
||||
/* Save low-level I/O for debugging. This should not be enabled in normal use.
|
||||
*/
|
||||
|
@ -1,5 +1,8 @@
|
||||
/* Host AP driver Info Frame processing (part of hostap.o module) */
|
||||
|
||||
#include "hostap_wlan.h"
|
||||
#include "hostap.h"
|
||||
#include "hostap_ap.h"
|
||||
|
||||
/* Called only as a tasklet (software IRQ) */
|
||||
static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
|
||||
|
@ -1,11 +1,13 @@
|
||||
/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
|
||||
|
||||
#ifdef in_atomic
|
||||
/* Get kernel_locked() for in_atomic() */
|
||||
#include <linux/types.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#endif
|
||||
#include <linux/ethtool.h>
|
||||
#include <net/ieee80211_crypt.h>
|
||||
|
||||
#include "hostap_wlan.h"
|
||||
#include "hostap.h"
|
||||
#include "hostap_ap.h"
|
||||
|
||||
static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
|
||||
{
|
||||
@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
|
||||
local->sta_fw_ver & 0xff);
|
||||
}
|
||||
|
||||
static struct ethtool_ops prism2_ethtool_ops = {
|
||||
struct ethtool_ops prism2_ethtool_ops = {
|
||||
.get_drvinfo = prism2_get_drvinfo
|
||||
};
|
||||
|
||||
@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
|
||||
(iw_handler) prism2_ioctl_priv_readmif, /* 3 */
|
||||
};
|
||||
|
||||
static const struct iw_handler_def hostap_iw_handler_def =
|
||||
const struct iw_handler_def hostap_iw_handler_def =
|
||||
{
|
||||
.num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
|
||||
.num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/wireless.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <net/iw_handler.h>
|
||||
#include <net/ieee80211.h>
|
||||
#include <net/ieee80211_crypt.h>
|
||||
@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
|
||||
#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
|
||||
|
||||
|
||||
/* hostap.c */
|
||||
static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked);
|
||||
static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked, int do_not_remove);
|
||||
|
||||
/* hostap_ap.c */
|
||||
static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
|
||||
struct iw_quality qual[], int buf_size,
|
||||
int aplist);
|
||||
static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
|
||||
static int prism2_hostapd(struct ap_data *ap,
|
||||
struct prism2_hostapd_param *param);
|
||||
static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
|
||||
struct ieee80211_crypt_data ***crypt);
|
||||
static void ap_control_kickall(struct ap_data *ap);
|
||||
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
|
||||
static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
|
||||
u8 *mac);
|
||||
static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
|
||||
u8 *mac);
|
||||
static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
|
||||
static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
|
||||
u8 *mac);
|
||||
#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
|
||||
|
||||
|
||||
static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
|
||||
2447, 2452, 2457, 2462, 2467, 2472, 2484 };
|
||||
#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
|
||||
|
||||
|
||||
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
|
||||
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
|
||||
static unsigned char rfc1042_header[] =
|
||||
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
|
||||
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
|
||||
static unsigned char bridge_tunnel_header[] =
|
||||
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
|
||||
/* No encapsulation header if EtherType < 0x600 (=length) */
|
||||
|
||||
|
||||
/* FIX: these could be compiled separately and linked together to hostap.o */
|
||||
#include "hostap_ap.c"
|
||||
#include "hostap_info.c"
|
||||
#include "hostap_ioctl.c"
|
||||
#include "hostap_proc.c"
|
||||
#include "hostap_80211_rx.c"
|
||||
#include "hostap_80211_tx.c"
|
||||
|
||||
|
||||
struct net_device * hostap_add_interface(struct local_info *local,
|
||||
int type, int rtnl_locked,
|
||||
const char *prefix,
|
||||
@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
|
||||
}
|
||||
|
||||
|
||||
static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked)
|
||||
int prism2_wds_add(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct list_head *ptr;
|
||||
@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
|
||||
}
|
||||
|
||||
|
||||
static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked, int do_not_remove)
|
||||
int prism2_wds_del(local_info_t *local, u8 *remote_addr,
|
||||
int rtnl_locked, int do_not_remove)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct list_head *ptr;
|
||||
|
@ -1,5 +1,12 @@
|
||||
/* /proc routines for Host AP driver */
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <net/ieee80211_crypt.h>
|
||||
|
||||
#include "hostap_wlan.h"
|
||||
#include "hostap.h"
|
||||
|
||||
#define PROC_LIMIT (PAGE_SIZE - 80)
|
||||
|
||||
|
||||
|
@ -1,6 +1,10 @@
|
||||
#ifndef HOSTAP_WLAN_H
|
||||
#define HOSTAP_WLAN_H
|
||||
|
||||
#include <linux/wireless.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/iw_handler.h>
|
||||
|
||||
#include "hostap_config.h"
|
||||
#include "hostap_common.h"
|
||||
|
||||
|
@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
|
||||
return &priv->ieee->stats;
|
||||
}
|
||||
|
||||
#if WIRELESS_EXT < 18
|
||||
/* Support for wpa_supplicant before WE-18, deprecated. */
|
||||
|
||||
/* following definitions must match definitions in driver_ipw.c */
|
||||
|
||||
#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
|
||||
|
||||
#define IPW2100_CMD_SET_WPA_PARAM 1
|
||||
#define IPW2100_CMD_SET_WPA_IE 2
|
||||
#define IPW2100_CMD_SET_ENCRYPTION 3
|
||||
#define IPW2100_CMD_MLME 4
|
||||
|
||||
#define IPW2100_PARAM_WPA_ENABLED 1
|
||||
#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
|
||||
#define IPW2100_PARAM_DROP_UNENCRYPTED 3
|
||||
#define IPW2100_PARAM_PRIVACY_INVOKED 4
|
||||
#define IPW2100_PARAM_AUTH_ALGS 5
|
||||
#define IPW2100_PARAM_IEEE_802_1X 6
|
||||
|
||||
#define IPW2100_MLME_STA_DEAUTH 1
|
||||
#define IPW2100_MLME_STA_DISASSOC 2
|
||||
|
||||
#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
|
||||
#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
|
||||
#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
|
||||
#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
|
||||
#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
|
||||
#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
|
||||
|
||||
#define IPW2100_CRYPT_ALG_NAME_LEN 16
|
||||
|
||||
struct ipw2100_param {
|
||||
u32 cmd;
|
||||
u8 sta_addr[ETH_ALEN];
|
||||
union {
|
||||
struct {
|
||||
u8 name;
|
||||
u32 value;
|
||||
} wpa_param;
|
||||
struct {
|
||||
u32 len;
|
||||
u8 reserved[32];
|
||||
u8 data[0];
|
||||
} wpa_ie;
|
||||
struct {
|
||||
u32 command;
|
||||
u32 reason_code;
|
||||
} mlme;
|
||||
struct {
|
||||
u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
|
||||
u8 set_tx;
|
||||
u32 err;
|
||||
u8 idx;
|
||||
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
|
||||
u16 key_len;
|
||||
u8 key[0];
|
||||
} crypt;
|
||||
|
||||
} u;
|
||||
};
|
||||
|
||||
/* end of driver_ipw.c code */
|
||||
#endif /* WIRELESS_EXT < 18 */
|
||||
|
||||
static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
|
||||
{
|
||||
/* This is called when wpa_supplicant loads and closes the driver
|
||||
@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if WIRELESS_EXT < 18
|
||||
#define IW_AUTH_ALG_OPEN_SYSTEM 0x1
|
||||
#define IW_AUTH_ALG_SHARED_KEY 0x2
|
||||
#endif
|
||||
|
||||
static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
|
||||
{
|
||||
|
||||
@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
|
||||
ipw2100_set_wpa_ie(priv, &frame, 0);
|
||||
}
|
||||
|
||||
#if WIRELESS_EXT < 18
|
||||
static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
|
||||
{
|
||||
struct ipw2100_priv *priv = ieee80211_priv(dev);
|
||||
struct ieee80211_crypt_data *crypt;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
switch (name) {
|
||||
case IPW2100_PARAM_WPA_ENABLED:
|
||||
ret = ipw2100_wpa_enable(priv, value);
|
||||
break;
|
||||
|
||||
case IPW2100_PARAM_TKIP_COUNTERMEASURES:
|
||||
crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
|
||||
if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
|
||||
break;
|
||||
|
||||
flags = crypt->ops->get_flags(crypt->priv);
|
||||
|
||||
if (value)
|
||||
flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
|
||||
else
|
||||
flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
|
||||
|
||||
crypt->ops->set_flags(flags, crypt->priv);
|
||||
|
||||
break;
|
||||
|
||||
case IPW2100_PARAM_DROP_UNENCRYPTED:{
|
||||
/* See IW_AUTH_DROP_UNENCRYPTED handling for details */
|
||||
struct ieee80211_security sec = {
|
||||
.flags = SEC_ENABLED,
|
||||
.enabled = value,
|
||||
};
|
||||
priv->ieee->drop_unencrypted = value;
|
||||
/* We only change SEC_LEVEL for open mode. Others
|
||||
* are set by ipw_wpa_set_encryption.
|
||||
*/
|
||||
if (!value) {
|
||||
sec.flags |= SEC_LEVEL;
|
||||
sec.level = SEC_LEVEL_0;
|
||||
} else {
|
||||
sec.flags |= SEC_LEVEL;
|
||||
sec.level = SEC_LEVEL_1;
|
||||
}
|
||||
if (priv->ieee->set_security)
|
||||
priv->ieee->set_security(priv->ieee->dev, &sec);
|
||||
break;
|
||||
}
|
||||
|
||||
case IPW2100_PARAM_PRIVACY_INVOKED:
|
||||
priv->ieee->privacy_invoked = value;
|
||||
break;
|
||||
|
||||
case IPW2100_PARAM_AUTH_ALGS:
|
||||
ret = ipw2100_wpa_set_auth_algs(priv, value);
|
||||
break;
|
||||
|
||||
case IPW2100_PARAM_IEEE_802_1X:
|
||||
priv->ieee->ieee802_1x = value;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
|
||||
dev->name, name);
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
|
||||
{
|
||||
|
||||
struct ipw2100_priv *priv = ieee80211_priv(dev);
|
||||
int ret = 0;
|
||||
|
||||
switch (command) {
|
||||
case IPW2100_MLME_STA_DEAUTH:
|
||||
// silently ignore
|
||||
break;
|
||||
|
||||
case IPW2100_MLME_STA_DISASSOC:
|
||||
ipw2100_disassociate_bssid(priv);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
|
||||
dev->name, command);
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
|
||||
struct ipw2100_param *param, int plen)
|
||||
{
|
||||
|
||||
struct ipw2100_priv *priv = ieee80211_priv(dev);
|
||||
struct ieee80211_device *ieee = priv->ieee;
|
||||
u8 *buf;
|
||||
|
||||
if (!ieee->wpa_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
|
||||
(param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (param->u.wpa_ie.len) {
|
||||
buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
|
||||
|
||||
kfree(ieee->wpa_ie);
|
||||
ieee->wpa_ie = buf;
|
||||
ieee->wpa_ie_len = param->u.wpa_ie.len;
|
||||
|
||||
} else {
|
||||
kfree(ieee->wpa_ie);
|
||||
ieee->wpa_ie = NULL;
|
||||
ieee->wpa_ie_len = 0;
|
||||
}
|
||||
|
||||
ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* implementation borrowed from hostap driver */
|
||||
|
||||
static int ipw2100_wpa_set_encryption(struct net_device *dev,
|
||||
struct ipw2100_param *param,
|
||||
int param_len)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ipw2100_priv *priv = ieee80211_priv(dev);
|
||||
struct ieee80211_device *ieee = priv->ieee;
|
||||
struct ieee80211_crypto_ops *ops;
|
||||
struct ieee80211_crypt_data **crypt;
|
||||
|
||||
struct ieee80211_security sec = {
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
param->u.crypt.err = 0;
|
||||
param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
|
||||
|
||||
if (param_len !=
|
||||
(int)((char *)param->u.crypt.key - (char *)param) +
|
||||
param->u.crypt.key_len) {
|
||||
IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
|
||||
param->u.crypt.key_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
|
||||
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
|
||||
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
|
||||
if (param->u.crypt.idx >= WEP_KEYS)
|
||||
return -EINVAL;
|
||||
crypt = &ieee->crypt[param->u.crypt.idx];
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
|
||||
if (strcmp(param->u.crypt.alg, "none") == 0) {
|
||||
if (crypt) {
|
||||
sec.enabled = 0;
|
||||
sec.encrypt = 0;
|
||||
sec.level = SEC_LEVEL_0;
|
||||
sec.flags |= SEC_LEVEL;
|
||||
ieee80211_crypt_delayed_deinit(ieee, crypt);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
sec.enabled = 1;
|
||||
sec.encrypt = 1;
|
||||
|
||||
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
|
||||
if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
|
||||
request_module("ieee80211_crypt_wep");
|
||||
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
|
||||
} else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
|
||||
request_module("ieee80211_crypt_tkip");
|
||||
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
|
||||
} else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
|
||||
request_module("ieee80211_crypt_ccmp");
|
||||
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
|
||||
}
|
||||
if (ops == NULL) {
|
||||
IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
|
||||
dev->name, param->u.crypt.alg);
|
||||
param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (*crypt == NULL || (*crypt)->ops != ops) {
|
||||
struct ieee80211_crypt_data *new_crypt;
|
||||
|
||||
ieee80211_crypt_delayed_deinit(ieee, crypt);
|
||||
|
||||
new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
|
||||
if (new_crypt == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
new_crypt->ops = ops;
|
||||
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
|
||||
new_crypt->priv =
|
||||
new_crypt->ops->init(param->u.crypt.idx);
|
||||
|
||||
if (new_crypt->priv == NULL) {
|
||||
kfree(new_crypt);
|
||||
param->u.crypt.err =
|
||||
IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
*crypt = new_crypt;
|
||||
}
|
||||
|
||||
if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
|
||||
(*crypt)->ops->set_key(param->u.crypt.key,
|
||||
param->u.crypt.key_len, param->u.crypt.seq,
|
||||
(*crypt)->priv) < 0) {
|
||||
IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
|
||||
param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (param->u.crypt.set_tx) {
|
||||
ieee->tx_keyidx = param->u.crypt.idx;
|
||||
sec.active_key = param->u.crypt.idx;
|
||||
sec.flags |= SEC_ACTIVE_KEY;
|
||||
}
|
||||
|
||||
if (ops->name != NULL) {
|
||||
|
||||
if (strcmp(ops->name, "WEP") == 0) {
|
||||
memcpy(sec.keys[param->u.crypt.idx],
|
||||
param->u.crypt.key, param->u.crypt.key_len);
|
||||
sec.key_sizes[param->u.crypt.idx] =
|
||||
param->u.crypt.key_len;
|
||||
sec.flags |= (1 << param->u.crypt.idx);
|
||||
sec.flags |= SEC_LEVEL;
|
||||
sec.level = SEC_LEVEL_1;
|
||||
} else if (strcmp(ops->name, "TKIP") == 0) {
|
||||
sec.flags |= SEC_LEVEL;
|
||||
sec.level = SEC_LEVEL_2;
|
||||
} else if (strcmp(ops->name, "CCMP") == 0) {
|
||||
sec.flags |= SEC_LEVEL;
|
||||
sec.level = SEC_LEVEL_3;
|
||||
}
|
||||
}
|
||||
done:
|
||||
if (ieee->set_security)
|
||||
ieee->set_security(ieee->dev, &sec);
|
||||
|
||||
/* Do not reset port if card is in Managed mode since resetting will
|
||||
* generate new IEEE 802.11 authentication which may end up in looping
|
||||
* with IEEE 802.1X. If your hardware requires a reset after WEP
|
||||
* configuration (for example... Prism2), implement the reset_port in
|
||||
* the callbacks structures used to initialize the 802.11 stack. */
|
||||
if (ieee->reset_on_keychange &&
|
||||
ieee->iw_mode != IW_MODE_INFRA &&
|
||||
ieee->reset_port && ieee->reset_port(dev)) {
|
||||
IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
|
||||
param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
|
||||
{
|
||||
|
||||
struct ipw2100_param *param;
|
||||
int ret = 0;
|
||||
|
||||
IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
|
||||
|
||||
if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
|
||||
return -EINVAL;
|
||||
|
||||
param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
|
||||
if (param == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(param, p->pointer, p->length)) {
|
||||
kfree(param);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
switch (param->cmd) {
|
||||
|
||||
case IPW2100_CMD_SET_WPA_PARAM:
|
||||
ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
|
||||
param->u.wpa_param.value);
|
||||
break;
|
||||
|
||||
case IPW2100_CMD_SET_WPA_IE:
|
||||
ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
|
||||
break;
|
||||
|
||||
case IPW2100_CMD_SET_ENCRYPTION:
|
||||
ret = ipw2100_wpa_set_encryption(dev, param, p->length);
|
||||
break;
|
||||
|
||||
case IPW2100_CMD_MLME:
|
||||
ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
|
||||
param->u.mlme.reason_code);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR DRV_NAME
|
||||
": %s: Unknown WPA supplicant request: %d\n", dev->name,
|
||||
param->cmd);
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
}
|
||||
|
||||
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
|
||||
ret = -EFAULT;
|
||||
|
||||
kfree(param);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||
{
|
||||
struct iwreq *wrq = (struct iwreq *)rq;
|
||||
int ret = -1;
|
||||
switch (cmd) {
|
||||
case IPW2100_IOCTL_WPA_SUPPLICANT:
|
||||
ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
|
||||
return ret;
|
||||
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* WIRELESS_EXT < 18 */
|
||||
|
||||
static void ipw_ethtool_get_drvinfo(struct net_device *dev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
|
||||
dev->open = ipw2100_open;
|
||||
dev->stop = ipw2100_close;
|
||||
dev->init = ipw2100_net_init;
|
||||
#if WIRELESS_EXT < 18
|
||||
dev->do_ioctl = ipw2100_ioctl;
|
||||
#endif
|
||||
dev->get_stats = ipw2100_stats;
|
||||
dev->ethtool_ops = &ipw2100_ethtool_ops;
|
||||
dev->tx_timeout = ipw2100_tx_timeout;
|
||||
@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if WIRELESS_EXT > 17
|
||||
/*
|
||||
* WE-18 WPA support
|
||||
*/
|
||||
@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* WIRELESS_EXT > 17 */
|
||||
|
||||
/*
|
||||
*
|
||||
@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
|
||||
NULL, /* SIOCWIWTHRSPY */
|
||||
ipw2100_wx_set_wap, /* SIOCSIWAP */
|
||||
ipw2100_wx_get_wap, /* SIOCGIWAP */
|
||||
#if WIRELESS_EXT > 17
|
||||
ipw2100_wx_set_mlme, /* SIOCSIWMLME */
|
||||
#else
|
||||
NULL, /* -- hole -- */
|
||||
#endif
|
||||
NULL, /* SIOCGIWAPLIST -- deprecated */
|
||||
ipw2100_wx_set_scan, /* SIOCSIWSCAN */
|
||||
ipw2100_wx_get_scan, /* SIOCGIWSCAN */
|
||||
@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
|
||||
ipw2100_wx_get_encode, /* SIOCGIWENCODE */
|
||||
ipw2100_wx_set_power, /* SIOCSIWPOWER */
|
||||
ipw2100_wx_get_power, /* SIOCGIWPOWER */
|
||||
#if WIRELESS_EXT > 17
|
||||
NULL, /* -- hole -- */
|
||||
NULL, /* -- hole -- */
|
||||
ipw2100_wx_set_genie, /* SIOCSIWGENIE */
|
||||
@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
|
||||
ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
|
||||
ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
|
||||
NULL, /* SIOCSIWPMKSA */
|
||||
#endif
|
||||
};
|
||||
|
||||
#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
|
||||
|
@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
|
||||
IPW_DEBUG_HC("starting request direct scan!\n");
|
||||
|
||||
if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
|
||||
err = wait_event_interruptible(priv->wait_state,
|
||||
!(priv->
|
||||
status & (STATUS_SCANNING |
|
||||
STATUS_SCAN_ABORTING)));
|
||||
if (err) {
|
||||
IPW_DEBUG_HC("aborting direct scan");
|
||||
goto done;
|
||||
}
|
||||
/* We should not sleep here; otherwise we will block most
|
||||
* of the system (for instance, we hold rtnl_lock when we
|
||||
* get here).
|
||||
*/
|
||||
err = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
memset(&scan, 0, sizeof(scan));
|
||||
|
||||
|
@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
|
||||
if (essid->length) {
|
||||
dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
|
||||
/* if it is to big, trunk it */
|
||||
dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1);
|
||||
dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
|
||||
} else {
|
||||
dwrq->flags = 0;
|
||||
dwrq->length = 0;
|
||||
|
@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
#endif
|
||||
|
||||
newskb->dev = skb->dev;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
skb = newskb;
|
||||
}
|
||||
}
|
||||
|
@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
|
||||
extra[IW_ESSID_MAX_SIZE] = '\0';
|
||||
|
||||
/* Push it out ! */
|
||||
dwrq->length = strlen(extra) + 1;
|
||||
dwrq->length = strlen(extra);
|
||||
dwrq->flags = 1; /* active */
|
||||
|
||||
return 0;
|
||||
|
@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
|
||||
extra[IW_ESSID_MAX_SIZE] = '\0';
|
||||
|
||||
/* Set the length */
|
||||
wrqu->data.length = strlen(extra) + 1;
|
||||
wrqu->data.length = strlen(extra);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <net/ieee80211.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
enum {
|
||||
|
@ -327,7 +327,7 @@ struct iw_handler_def
|
||||
__u16 num_private_args;
|
||||
|
||||
/* Array of handlers for standard ioctls
|
||||
* We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME]
|
||||
* We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
|
||||
*/
|
||||
const iw_handler * standard;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user