diff --git a/.gitignore b/.gitignore index 27fd37621255..b1f5b9df2ae1 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,11 @@ include/config include/linux/autoconf.h include/linux/compile.h include/linux/version.h +include/linux/utsrelease.h # stgit generated dirs patches-* + +# quilt's files +patches +series diff --git a/CREDITS b/CREDITS index 29be6d1fdf49..0fe904ebb7c7 100644 --- a/CREDITS +++ b/CREDITS @@ -2209,7 +2209,7 @@ S: (address available on request) S: USA N: Ian McDonald -E: iam4@cs.waikato.ac.nz +E: ian.mcdonald@jandi.co.nz E: imcdnzl@gmail.com W: http://wand.net.nz/~iam4 W: http://imcdnzl.blogspot.com diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index 1ae4dc0fd856..f8fe882e33dc 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl @@ -58,6 +58,9 @@ !Iinclude/linux/ktime.h !Iinclude/linux/hrtimer.h !Ekernel/hrtimer.c + + Workqueues and Kevents +!Ekernel/workqueue.c Internal Functions !Ikernel/exit.c @@ -300,7 +303,7 @@ X!Ekernel/module.c Resources Management -!Ekernel/resource.c +!Ikernel/resource.c MTRR Handling @@ -312,9 +315,7 @@ X!Ekernel/module.c !Edrivers/pci/pci-driver.c !Edrivers/pci/remove.c !Edrivers/pci/pci-acpi.c - +!Edrivers/pci/search.c !Edrivers/pci/msi.c !Edrivers/pci/bus.c size-4096 slab */ @@ -3708,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, #define E1000_CB_LENGTH 256 if (length < E1000_CB_LENGTH) { struct sk_buff *new_skb = - dev_alloc_skb(length + NET_IP_ALIGN); + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { skb_reserve(new_skb, NET_IP_ALIGN); new_skb->dev = netdev; @@ -3979,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, while (cleaned_count--) { if (!(skb = buffer_info->skb)) - skb = dev_alloc_skb(bufsz); + skb = netdev_alloc_skb(netdev, bufsz); else { skb_trim(skb, 0); goto map_skb; @@ -3997,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " "at %p\n", bufsz, skb->data); /* Try again, without freeing the previous */ - skb = dev_alloc_skb(bufsz); + skb = netdev_alloc_skb(netdev, bufsz); /* Failed allocation, critical failure */ if (!skb) { dev_kfree_skb(oldskb); @@ -4121,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, rx_desc->read.buffer_addr[j+1] = ~0; } - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); + skb = netdev_alloc_skb(netdev, + adapter->rx_ps_bsize0 + NET_IP_ALIGN); if (unlikely(!skb)) { adapter->alloc_rx_buff_failed++; @@ -4385,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) pci_write_config_word(adapter->pdev, reg, *value); } +#if 0 uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port) { return inl(port); } +#endif /* 0 */ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index e5c5cd2a2712..e4e733a380e3 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c @@ -425,8 +425,8 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int -init_module(void) + +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 20d31430c74f..8dc61d65dd23 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int i; diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 33291bcf6d4c..0701c1d810ca 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL"); * are specified, we verify and then use them. If no parameters are given, we * autoprobe for one card only. */ -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 6b0ab1eac3fb..fd7b32a24ea4 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c @@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); MODULE_LICENSE("GPL"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 4bf76f86d8e9..ca42efa9143c 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto, module_param(debug, int, 0); MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); -int init_module(void) +int __init init_module(void) { int this_dev, found = 0; struct net_device *dev; diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 97d34fee8c1f..567e27413cfd 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; #include /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile index d6dd3f2fb43e..02d4dc18ba69 100644 --- a/drivers/net/fs_enet/Makefile +++ b/drivers/net/fs_enet/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_FS_ENET) += fs_enet.o -obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o -obj-$(CONFIG_8260) += mac-fcc.o +obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o +obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o -fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o +fs_enet-objs := fs_enet-main.o diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h new file mode 100644 index 000000000000..e980527e2b99 --- /dev/null +++ b/drivers/net/fs_enet/fec.h @@ -0,0 +1,42 @@ +#ifndef FS_ENET_FEC_H +#define FS_ENET_FEC_H + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define FEC_MAX_MULTICAST_ADDRS 64 + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + + + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 +#endif diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index f6abff5846b3..df62506a1787 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq) (*fep->ops->post_free_irq)(dev, irq); } -/**********************************************************************************/ - -/* This interrupt occurs when the PHY detects a link change. */ -static irqreturn_t -fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - struct net_device *dev = dev_id; - struct fs_enet_private *fep; - const struct fs_platform_info *fpi; - - fep = netdev_priv(dev); - fpi = fep->fpi; - - /* - * Acknowledge the interrupt if possible. If we have not - * found the PHY yet we can't process or acknowledge the - * interrupt now. Instead we ignore this interrupt for now, - * which we can do since it is edge triggered. It will be - * acknowledged later by fs_enet_open(). - */ - if (!fep->phy) - return IRQ_NONE; - - fs_mii_ack_int(dev); - fs_mii_link_status_change_check(dev, 0); - - return IRQ_HANDLED; -} - static void fs_timeout(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); @@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev) spin_lock_irqsave(&fep->lock, flags); if (dev->flags & IFF_UP) { + phy_stop(fep->phydev); (*fep->ops->stop)(dev); (*fep->ops->restart)(dev); + phy_start(fep->phydev); } + phy_start(fep->phydev); wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev) netif_wake_queue(dev); } +/*----------------------------------------------------------------------------- + * generic link-change handler - should be sufficient for most cases + *-----------------------------------------------------------------------------*/ +static void generic_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex){ + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + netif_schedule(dev); + netif_carrier_on(dev); + netif_start_queue(dev); + } + + if (new_state) + fep->ops->restart(dev); + + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + netif_carrier_off(dev); + netif_stop_queue(dev); + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); +} + + +static void fs_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&fep->lock, flags); + + if(fep->ops->adjust_link) + fep->ops->adjust_link(dev); + else + generic_adjust_link(dev); + + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fs_init_phy(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev; + + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + if(fep->fpi->bus_id) + phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0); + else { + printk("No phy bus ID specified in BSP code\n"); + return -EINVAL; + } + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + fep->phydev = phydev; + + return 0; +} + + static int fs_enet_open(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; int r; + int err; /* Install our interrupt handler. */ r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); if (r != 0) { printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate FEC IRQ!", dev->name); + ": %s Could not allocate FS_ENET IRQ!", dev->name); return -EINVAL; } - /* Install our phy interrupt handler */ - if (fpi->phy_irq != -1) { + err = fs_init_phy(dev); + if(err) + return err; - r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt); - if (r != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate PHY IRQ!", dev->name); - fs_free_irq(dev, fep->interrupt); - return -EINVAL; - } - } - - fs_mii_startup(dev); - netif_carrier_off(dev); - fs_mii_link_status_change_check(dev, 1); + phy_start(fep->phydev); return 0; } @@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev) static int fs_enet_close(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; unsigned long flags; netif_stop_queue(dev); netif_carrier_off(dev); - fs_mii_shutdown(dev); + phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); (*fep->ops->stop)(dev); spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ - if (fpi->phy_irq != -1) - fs_free_irq(dev, fpi->phy_irq); + phy_disconnect(fep->phydev); + fep->phydev = NULL; fs_free_irq(dev, fep->interrupt); return 0; @@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct fs_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_gset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; + return phy_ethtool_gset(fep->phydev, cmd); } static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct fs_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_sset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; + phy_ethtool_sset(fep->phydev, cmd); + return 0; } static int fs_nway_reset(struct net_device *dev) { - struct fs_enet_private *fep = netdev_priv(dev); - return mii_nway_restart(&fep->mii_if); + return 0; } static u32 fs_get_msglevel(struct net_device *dev) @@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; spin_lock_irqsave(&fep->lock, flags); - rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); + rc = phy_mii_ioctl(fep->phydev, mii, cmd); spin_unlock_irqrestore(&fep->lock, flags); return rc; } @@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev, } registered = 1; - err = fs_mii_connect(ndev); - if (err != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s fs_mii_connect failed.\n", ndev->name); - goto err; - } return ndev; @@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev) fpi = fep->fpi; - fs_mii_disconnect(ndev); - unregister_netdev(ndev); dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), @@ -1196,17 +1225,39 @@ static int __init fs_init(void) r = setup_immap(); if (r != 0) return r; - r = driver_register(&fs_enet_fec_driver); - if (r != 0) - goto err; +#ifdef CONFIG_FS_ENET_HAS_FCC + /* let's insert mii stuff */ + r = fs_enet_mdio_bb_init(); + + if (r != 0) { + printk(KERN_ERR DRV_MODULE_NAME + "BB PHY init failed.\n"); + return r; + } r = driver_register(&fs_enet_fcc_driver); if (r != 0) goto err; +#endif +#ifdef CONFIG_FS_ENET_HAS_FEC + r = fs_enet_mdio_fec_init(); + if (r != 0) { + printk(KERN_ERR DRV_MODULE_NAME + "FEC PHY init failed.\n"); + return r; + } + + r = driver_register(&fs_enet_fec_driver); + if (r != 0) + goto err; +#endif + +#ifdef CONFIG_FS_ENET_HAS_SCC r = driver_register(&fs_enet_scc_driver); if (r != 0) goto err; +#endif return 0; err: diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c deleted file mode 100644 index b7e6e21725cb..000000000000 --- a/drivers/net/fs_enet/fs_enet-mii.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug - * - * Heavily based on original FEC driver by Dan Malek - * and modifications by Joakim Tjernlund - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "fs_enet.h" - -/*************************************************/ - -/* - * Generic PHY support. - * Should work for all PHYs, but link change is detected by polling - */ - -static void generic_timer_callback(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct fs_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; - - add_timer(&fep->phy_timer_list); - - fs_mii_link_status_change_check(dev, 0); -} - -static void generic_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */ - fep->phy_timer_list.data = (unsigned long)dev; - fep->phy_timer_list.function = generic_timer_callback; - add_timer(&fep->phy_timer_list); -} - -static void generic_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - del_timer_sync(&fep->phy_timer_list); -} - -/* ------------------------------------------------------------------------- */ -/* The Davicom DM9161 is used on the NETTA board */ - -/* register definitions */ - -#define MII_DM9161_ANAR 4 /* Aux. Config Register */ -#define MII_DM9161_ACR 16 /* Aux. Config Register */ -#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */ -#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */ -#define MII_DM9161_INTR 21 /* Interrupt Register */ -#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */ -#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */ - -static void dm9161_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000); - /* Start autonegotiation */ - fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200); - - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ*8); -} - -static void dm9161_ack_int(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR); -} - -static void dm9161_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00); -} - -/**********************************************************************************/ - -static const struct phy_info phy_info[] = { - { - .id = 0x00181b88, - .name = "DM9161", - .startup = dm9161_startup, - .ack_int = dm9161_ack_int, - .shutdown = dm9161_shutdown, - }, { - .id = 0, - .name = "GENERIC", - .startup = generic_startup, - .shutdown = generic_shutdown, - }, -}; - -/**********************************************************************************/ - -static int phy_id_detect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - struct fs_enet_mii_bus *bus = fep->mii_bus; - int i, r, start, end, phytype, physubtype; - const struct phy_info *phy; - int phy_hwid, phy_id; - - phy_hwid = -1; - fep->phy = NULL; - - /* auto-detect? */ - if (fpi->phy_addr == -1) { - start = 1; - end = 32; - } else { /* direct */ - start = fpi->phy_addr; - end = start + 1; - } - - for (phy_id = start; phy_id < end; phy_id++) { - /* skip already used phy addresses on this bus */ - if (bus->usage_map & (1 << phy_id)) - continue; - r = fs_mii_read(dev, phy_id, MII_PHYSID1); - if (r == -1 || (phytype = (r & 0xffff)) == 0xffff) - continue; - r = fs_mii_read(dev, phy_id, MII_PHYSID2); - if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff) - continue; - phy_hwid = (phytype << 16) | physubtype; - if (phy_hwid != -1) - break; - } - - if (phy_hwid == -1) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s No PHY detected! range=0x%02x-0x%02x\n", - dev->name, start, end); - return -1; - } - - for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) - if (phy->id == (phy_hwid >> 4) || phy->id == 0) - break; - - if (i >= ARRAY_SIZE(phy_info)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s PHY id 0x%08x is not supported!\n", - dev->name, phy_hwid); - return -1; - } - - fep->phy = phy; - - /* mark this address as used */ - bus->usage_map |= (1 << phy_id); - - printk(KERN_INFO DRV_MODULE_NAME - ": %s Phy @ 0x%x, type %s (0x%08x)%s\n", - dev->name, phy_id, fep->phy->name, phy_hwid, - fpi->phy_addr == -1 ? " (auto-detected)" : ""); - - return phy_id; -} - -void fs_mii_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->startup) - (*fep->phy->startup) (dev); -} - -void fs_mii_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->shutdown) - (*fep->phy->shutdown) (dev); -} - -void fs_mii_ack_int(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->ack_int) - (*fep->phy->ack_int) (dev); -} - -#define MII_LINK 0x0001 -#define MII_HALF 0x0002 -#define MII_FULL 0x0004 -#define MII_BASE4 0x0008 -#define MII_10M 0x0010 -#define MII_100M 0x0020 -#define MII_1G 0x0040 -#define MII_10G 0x0080 - -/* return full mii info at one gulp, with a usable form */ -static unsigned int mii_full_status(struct mii_if_info *mii) -{ - unsigned int status; - int bmsr, adv, lpa, neg; - struct fs_enet_private* fep = netdev_priv(mii->dev); - - /* first, a dummy read, needed to latch some MII phys */ - (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - - /* no link */ - if ((bmsr & BMSR_LSTATUS) == 0) - return 0; - - status = MII_LINK; - - /* Lets look what ANEG says if it's supported - otherwize we shall - take the right values from the platform info*/ - if(!mii->force_media) { - /* autoneg not completed; don't bother */ - if ((bmsr & BMSR_ANEGCOMPLETE) == 0) - return 0; - - adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE); - lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA); - - neg = lpa & adv; - } else { - neg = fep->fpi->bus_info->lpa; - } - - if (neg & LPA_100FULL) - status |= MII_FULL | MII_100M; - else if (neg & LPA_100BASE4) - status |= MII_FULL | MII_BASE4 | MII_100M; - else if (neg & LPA_100HALF) - status |= MII_HALF | MII_100M; - else if (neg & LPA_10FULL) - status |= MII_FULL | MII_10M; - else - status |= MII_HALF | MII_10M; - - return status; -} - -void fs_mii_link_status_change_check(struct net_device *dev, int init_media) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct mii_if_info *mii = &fep->mii_if; - unsigned int mii_status; - int ok_to_print, link, duplex, speed; - unsigned long flags; - - ok_to_print = netif_msg_link(fep); - - mii_status = mii_full_status(mii); - - if (!init_media && mii_status == fep->last_mii_status) - return; - - fep->last_mii_status = mii_status; - - link = !!(mii_status & MII_LINK); - duplex = !!(mii_status & MII_FULL); - speed = (mii_status & MII_100M) ? 100 : 10; - - if (link == 0) { - netif_carrier_off(mii->dev); - netif_stop_queue(dev); - if (!init_media) { - spin_lock_irqsave(&fep->lock, flags); - (*fep->ops->stop)(dev); - spin_unlock_irqrestore(&fep->lock, flags); - } - - if (ok_to_print) - printk(KERN_INFO "%s: link down\n", mii->dev->name); - - } else { - - mii->full_duplex = duplex; - - netif_carrier_on(mii->dev); - - spin_lock_irqsave(&fep->lock, flags); - fep->duplex = duplex; - fep->speed = speed; - (*fep->ops->restart)(dev); - spin_unlock_irqrestore(&fep->lock, flags); - - netif_start_queue(dev); - - if (ok_to_print) - printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n", - dev->name, speed, duplex ? "full" : "half"); - } -} - -/**********************************************************************************/ - -int fs_mii_read(struct net_device *dev, int phy_id, int location) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = fep->mii_bus; - - unsigned long flags; - int ret; - - spin_lock_irqsave(&bus->mii_lock, flags); - ret = (*bus->mii_read)(bus, phy_id, location); - spin_unlock_irqrestore(&bus->mii_lock, flags); - - return ret; -} - -void fs_mii_write(struct net_device *dev, int phy_id, int location, int value) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = fep->mii_bus; - unsigned long flags; - - spin_lock_irqsave(&bus->mii_lock, flags); - (*bus->mii_write)(bus, phy_id, location, value); - spin_unlock_irqrestore(&bus->mii_lock, flags); -} - -/*****************************************************************************/ - -/* list of all registered mii buses */ -static LIST_HEAD(fs_mii_bus_list); - -static struct fs_enet_mii_bus *lookup_bus(int method, int id) -{ - struct list_head *ptr; - struct fs_enet_mii_bus *bus; - - list_for_each(ptr, &fs_mii_bus_list) { - bus = list_entry(ptr, struct fs_enet_mii_bus, list); - if (bus->bus_info->method == method && - bus->bus_info->id == id) - return bus; - } - return NULL; -} - -static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi) -{ - struct fs_enet_mii_bus *bus; - int ret = 0; - - bus = kmalloc(sizeof(*bus), GFP_KERNEL); - if (bus == NULL) { - ret = -ENOMEM; - goto err; - } - memset(bus, 0, sizeof(*bus)); - spin_lock_init(&bus->mii_lock); - bus->bus_info = bi; - bus->refs = 0; - bus->usage_map = 0; - - /* perform initialization */ - switch (bi->method) { - - case fsmii_fixed: - ret = fs_mii_fixed_init(bus); - if (ret != 0) - goto err; - break; - - case fsmii_bitbang: - ret = fs_mii_bitbang_init(bus); - if (ret != 0) - goto err; - break; -#ifdef CONFIG_FS_ENET_HAS_FEC - case fsmii_fec: - ret = fs_mii_fec_init(bus); - if (ret != 0) - goto err; - break; -#endif - default: - ret = -EINVAL; - goto err; - } - - list_add(&bus->list, &fs_mii_bus_list); - - return bus; - -err: - kfree(bus); - return ERR_PTR(ret); -} - -static void destroy_bus(struct fs_enet_mii_bus *bus) -{ - /* remove from bus list */ - list_del(&bus->list); - - /* nothing more needed */ - kfree(bus); -} - -int fs_mii_connect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - struct fs_enet_mii_bus *bus = NULL; - - /* check method validity */ - switch (fpi->bus_info->method) { - case fsmii_fixed: - case fsmii_bitbang: - break; -#ifdef CONFIG_FS_ENET_HAS_FEC - case fsmii_fec: - break; -#endif - default: - printk(KERN_ERR DRV_MODULE_NAME - ": %s Unknown MII bus method (%d)!\n", - dev->name, fpi->bus_info->method); - return -EINVAL; - } - - bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id); - - /* if not found create new bus */ - if (bus == NULL) { - bus = create_bus(fpi->bus_info); - if (IS_ERR(bus)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s MII bus creation failure!\n", dev->name); - return PTR_ERR(bus); - } - } - - bus->refs++; - - fep->mii_bus = bus; - - fep->mii_if.dev = dev; - fep->mii_if.phy_id_mask = 0x1f; - fep->mii_if.reg_num_mask = 0x1f; - fep->mii_if.mdio_read = fs_mii_read; - fep->mii_if.mdio_write = fs_mii_write; - fep->mii_if.force_media = fpi->bus_info->disable_aneg; - fep->mii_if.phy_id = phy_id_detect(dev); - - return 0; -} - -void fs_mii_disconnect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = NULL; - - bus = fep->mii_bus; - fep->mii_bus = NULL; - - if (--bus->refs <= 0) - destroy_bus(bus); -} diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index e7ec96c964a9..95022c005f75 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -12,12 +13,30 @@ #ifdef CONFIG_CPM1 #include + +struct fec_info { + fec_t* fecp; + u32 mii_speed; +}; #endif #ifdef CONFIG_CPM2 #include #endif +/* This is used to operate with pins. + Note that the actual port size may + be different; cpm(s) handle it OK */ +struct bb_info { + u8 mdio_dat_msk; + u8 mdio_dir_msk; + u8 *mdio_dir; + u8 *mdio_dat; + u8 mdc_msk; + u8 *mdc_dat; + int delay; +}; + /* hw driver ops */ struct fs_ops { int (*setup_data)(struct net_device *dev); @@ -25,6 +44,7 @@ struct fs_ops { void (*free_bd)(struct net_device *dev); void (*cleanup_data)(struct net_device *dev); void (*set_multicast_list)(struct net_device *dev); + void (*adjust_link)(struct net_device *dev); void (*restart)(struct net_device *dev); void (*stop)(struct net_device *dev); void (*pre_request_irq)(struct net_device *dev, int irq); @@ -100,10 +120,6 @@ struct fs_enet_mii_bus { }; }; -int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus); -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); -int fs_mii_fec_init(struct fs_enet_mii_bus *bus); - struct fs_enet_private { struct device *dev; /* pointer back to the device (must be initialized first) */ spinlock_t lock; /* during all ops except TX pckt processing */ @@ -130,7 +146,8 @@ struct fs_enet_private { struct fs_enet_mii_bus *mii_bus; int interrupt; - int duplex, speed; /* current settings */ + struct phy_device *phydev; + int oldduplex, oldspeed, oldlink; /* current settings */ /* event masks */ u32 ev_napi_rx; /* mask of NAPI rx events */ @@ -168,15 +185,9 @@ struct fs_enet_private { }; /***************************************************************************/ - -int fs_mii_read(struct net_device *dev, int phy_id, int location); -void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); - -void fs_mii_startup(struct net_device *dev); -void fs_mii_shutdown(struct net_device *dev); -void fs_mii_ack_int(struct net_device *dev); - -void fs_mii_link_status_change_check(struct net_device *dev, int init_media); +int fs_enet_mdio_bb_init(void); +int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); +int fs_enet_mdio_fec_init(void); void fs_init_bds(struct net_device *dev); void fs_cleanup_bds(struct net_device *dev); @@ -194,7 +205,6 @@ int fs_enet_platform_init(void); void fs_enet_platform_cleanup(void); /***************************************************************************/ - /* buffer descriptor access macros */ /* access macros */ diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 64e20982c1fe..1ff2597b8495 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Attach the memory for the FCC Parameter RAM */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); - fep->fcc.ep = (void *)r->start; - + fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); if (fep->fcc.ep == NULL) return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); - fep->fcc.fccp = (void *)r->start; - + fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); if (fep->fcc.fccp == NULL) return -EINVAL; - fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + if (fep->fpi->fcc_regs_c) { + + fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + } else { + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "fcc_regs_c"); + fep->fcc.fcccp = (void *)ioremap(r->start, + r->end - r->start + 1); + } if (fep->fcc.fcccp == NULL) return -EINVAL; + fep->fcc.mem = (void *)fep->fpi->mem_offset; + if (fep->fcc.mem == NULL) + return -EINVAL; + return 0; } @@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev) if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ return -EINVAL; - fep->fcc.mem = (void *)fpi->mem_offset; - if (do_pd_setup(fep) != 0) return -EINVAL; @@ -394,7 +403,7 @@ static void restart(struct net_device *dev) /* adjust to speed (for RMII mode) */ if (fpi->use_rmii) { - if (fep->speed == 100) + if (fep->phydev->speed == 100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -420,7 +429,7 @@ static void restart(struct net_device *dev) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (fep->duplex) + if (fep->phydev->duplex) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); @@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { - /* nothing */ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t *fccp = fep->fcc.fccp; + + S32(fccp, fcc_ftodr, 0x80); } static u32 get_int_events(struct net_device *dev) diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index e09547077529..c2c5fd419bd0 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -46,6 +46,7 @@ #endif #include "fs_enet.h" +#include "fec.h" /*************************************************/ @@ -75,50 +76,8 @@ /* clear bits */ #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) - -/* CRC polynomium used by the FEC for the multicast group filtering */ -#define FEC_CRC_POLY 0x04C11DB7 - -#define FEC_MAX_MULTICAST_ADDRS 64 - -/* Interrupt events/masks. -*/ -#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ -#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ -#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ -#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ -#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ -#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ -#define FEC_ENET_RXF 0x02000000U /* Full frame received */ -#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ -#define FEC_ENET_MII 0x00800000U /* MII interrupt */ -#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ - -#define FEC_ECNTRL_PINMUX 0x00000004 -#define FEC_ECNTRL_ETHER_EN 0x00000002 -#define FEC_ECNTRL_RESET 0x00000001 - -#define FEC_RCNTRL_BC_REJ 0x00000010 -#define FEC_RCNTRL_PROM 0x00000008 -#define FEC_RCNTRL_MII_MODE 0x00000004 -#define FEC_RCNTRL_DRT 0x00000002 -#define FEC_RCNTRL_LOOP 0x00000001 - -#define FEC_TCNTRL_FDEN 0x00000004 -#define FEC_TCNTRL_HBC 0x00000002 -#define FEC_TCNTRL_GTS 0x00000001 - - -/* Make MII read/write commands for the FEC. -*/ -#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) -#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) -#define mk_mii_end 0 - -#define FEC_MII_LOOPS 10000 - /* - * Delay to wait for FEC reset command to complete (in us) + * Delay to wait for FEC reset command to complete (in us) */ #define FEC_RESET_DELAY 50 @@ -303,13 +262,15 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; + struct mii_bus* mii = fep->phydev->bus; + struct fec_info* fec_inf = mii->priv; + r = whack_reset(fep->fec.fecp); if (r != 0) printk(KERN_ERR DRV_MODULE_NAME ": %s FEC Reset FAILED!\n", dev->name); - /* - * Set station address. + * Set station address. */ addrhi = ((u32) dev->dev_addr[0] << 24) | ((u32) dev->dev_addr[1] << 16) | @@ -350,12 +311,12 @@ static void restart(struct net_device *dev) FW(fecp, fun_code, 0x78000000); /* - * Set MII speed. + * Set MII speed. */ - FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); + FW(fecp, mii_speed, fec_inf->mii_speed); /* - * Clear any outstanding interrupt. + * Clear any outstanding interrupt. */ FW(fecp, ievent, 0xffc0); FW(fecp, ivec, (fep->interrupt / 2) << 29); @@ -390,11 +351,12 @@ static void restart(struct net_device *dev) } #endif + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ /* - * adjust to duplex mode + * adjust to duplex mode */ - if (fep->duplex) { + if (fep->phydev->duplex) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { @@ -418,9 +380,11 @@ static void restart(struct net_device *dev) static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; fec_t *fecp = fep->fec.fecp; - struct fs_enet_mii_bus *bus = fep->mii_bus; - const struct fs_mii_bus_info *bi = bus->bus_info; + + struct fec_info* feci= fep->phydev->bus->priv; + int i; if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) @@ -444,11 +408,11 @@ static void stop(struct net_device *dev) fs_cleanup_bds(dev); /* shut down FEC1? that's where the mii bus is */ - if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { + if (fpi->has_phy) { FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, bus->fec.mii_speed); + FW(fecp, mii_speed, feci->mii_speed); } } @@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = { .free_bd = free_bd, }; -/***********************************************************************/ - -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) -{ - fec_t *fecp = bus->fec.fecp; - int i, ret = -1; - - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) - BUG(); - - /* Add PHY address to register command. */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) { - FW(fecp, ievent, FEC_ENET_MII); - ret = FR(fecp, mii_data) & 0xffff; - } - - return ret; -} - -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value) -{ - fec_t *fecp = bus->fec.fecp; - int i; - - /* this must never happen */ - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) - BUG(); - - /* Add PHY address to register command. */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) - FW(fecp, ievent, FEC_ENET_MII); -} - -int fs_mii_fec_init(struct fs_enet_mii_bus *bus) -{ - bd_t *bd = (bd_t *)__res; - const struct fs_mii_bus_info *bi = bus->bus_info; - fec_t *fecp; - - if (bi->id != 0) - return -1; - - bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec; - bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) - & 0x3F) << 1; - - fecp = bus->fec.fecp; - - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, bus->fec.mii_speed); - - bus->mii_read = mii_read; - bus->mii_write = mii_write; - - return 0; -} diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index eaa24fab645f..95ec5872c507 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -369,7 +369,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (fep->duplex) + if (fep->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); @@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev) scc_cr_cmd(fep, CPM_CR_RESTART_TX); } + + /*************************************************************************/ const struct fs_ops fs_scc_ops = { diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 48f9cf83ab6f..0b9b8b5c847c 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -40,129 +41,25 @@ #include "fs_enet.h" -#ifdef CONFIG_8xx -static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) +static int bitbang_prep_bit(u8 **datp, u8 *mskp, + struct fs_mii_bit *mii_bit) { - immap_t *im = (immap_t *)fs_enet_immap; - void *dir, *dat, *ppar; + void *dat; int adv; u8 msk; - switch (port) { - case fsiop_porta: - dir = &im->im_ioport.iop_padir; - dat = &im->im_ioport.iop_padat; - ppar = &im->im_ioport.iop_papar; - break; + dat = (void*) mii_bit->offset; - case fsiop_portb: - dir = &im->im_cpm.cp_pbdir; - dat = &im->im_cpm.cp_pbdat; - ppar = &im->im_cpm.cp_pbpar; - break; - - case fsiop_portc: - dir = &im->im_ioport.iop_pcdir; - dat = &im->im_ioport.iop_pcdat; - ppar = &im->im_ioport.iop_pcpar; - break; - - case fsiop_portd: - dir = &im->im_ioport.iop_pddir; - dat = &im->im_ioport.iop_pddat; - ppar = &im->im_ioport.iop_pdpar; - break; - - case fsiop_porte: - dir = &im->im_cpm.cp_pedir; - dat = &im->im_cpm.cp_pedat; - ppar = &im->im_cpm.cp_pepar; - break; - - default: - printk(KERN_ERR DRV_MODULE_NAME - "Illegal port value %d!\n", port); - return -EINVAL; - } - - adv = bit >> 3; - dir = (char *)dir + adv; + adv = mii_bit->bit >> 3; dat = (char *)dat + adv; - ppar = (char *)ppar + adv; - msk = 1 << (7 - (bit & 7)); - if ((in_8(ppar) & msk) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - "pin %d on port %d is not general purpose!\n", bit, port); - return -EINVAL; - } + msk = 1 << (7 - (mii_bit->bit & 7)); - *dirp = dir; *datp = dat; *mskp = msk; return 0; } -#endif - -#ifdef CONFIG_8260 -static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) -{ - iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport; - void *dir, *dat, *ppar; - int adv; - u8 msk; - - switch (port) { - case fsiop_porta: - dir = &io->iop_pdira; - dat = &io->iop_pdata; - ppar = &io->iop_ppara; - break; - - case fsiop_portb: - dir = &io->iop_pdirb; - dat = &io->iop_pdatb; - ppar = &io->iop_pparb; - break; - - case fsiop_portc: - dir = &io->iop_pdirc; - dat = &io->iop_pdatc; - ppar = &io->iop_pparc; - break; - - case fsiop_portd: - dir = &io->iop_pdird; - dat = &io->iop_pdatd; - ppar = &io->iop_ppard; - break; - - default: - printk(KERN_ERR DRV_MODULE_NAME - "Illegal port value %d!\n", port); - return -EINVAL; - } - - adv = bit >> 3; - dir = (char *)dir + adv; - dat = (char *)dat + adv; - ppar = (char *)ppar + adv; - - msk = 1 << (7 - (bit & 7)); - if ((in_8(ppar) & msk) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - "pin %d on port %d is not general purpose!\n", bit, port); - return -EINVAL; - } - - *dirp = dir; - *datp = dat; - *mskp = msk; - - return 0; -} -#endif static inline void bb_set(u8 *p, u8 m) { @@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m) return (in_8(p) & m) != 0; } -static inline void mdio_active(struct fs_enet_mii_bus *bus) +static inline void mdio_active(struct bb_info *bitbang) { - bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); + bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); } -static inline void mdio_tristate(struct fs_enet_mii_bus *bus) +static inline void mdio_tristate(struct bb_info *bitbang ) { - bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); + bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); } -static inline int mdio_read(struct fs_enet_mii_bus *bus) +static inline int mdio_read(struct bb_info *bitbang ) { - return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); } -static inline void mdio(struct fs_enet_mii_bus *bus, int what) +static inline void mdio(struct bb_info *bitbang , int what) { if (what) - bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); else - bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); } -static inline void mdc(struct fs_enet_mii_bus *bus, int what) +static inline void mdc(struct bb_info *bitbang , int what) { if (what) - bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); + bb_set(bitbang->mdc_dat, bitbang->mdc_msk); else - bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); + bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); } -static inline void mii_delay(struct fs_enet_mii_bus *bus) +static inline void mii_delay(struct bb_info *bitbang ) { - udelay(bus->bus_info->i.bitbang.delay); + udelay(bitbang->delay); } /* Utility to send the preamble, address, and register (common to read and write). */ -static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) +static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) { int j; @@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) * but it is safer and will be much more robust. */ - mdio_active(bus); - mdio(bus, 1); + mdio_active(bitbang); + mdio(bitbang, 1); for (j = 0; j < 32; j++) { - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); } /* send the start bit (01) and the read opcode (10) or write (10) */ - mdc(bus, 0); - mdio(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, 1); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, read); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, !read); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, read); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, !read); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* send the PHY address */ for (j = 0; j < 5; j++) { - mdc(bus, 0); - mdio(bus, (addr & 0x10) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (addr & 0x10) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); addr <<= 1; } /* send the register address */ for (j = 0; j < 5; j++) { - mdc(bus, 0); - mdio(bus, (reg & 0x10) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (reg & 0x10) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); reg <<= 1; } } -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) +static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) { u16 rdreg; int ret, j; u8 addr = phy_id & 0xff; u8 reg = location & 0xff; + struct bb_info* bitbang = bus->priv; - bitbang_pre(bus, 1, addr, reg); + bitbang_pre(bitbang, 1, addr, reg); /* tri-state our MDIO I/O pin so we can read */ - mdc(bus, 0); - mdio_tristate(bus); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio_tristate(bitbang); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* check the turnaround bit: the PHY should be driving it to zero */ - if (mdio_read(bus) != 0) { + if (mdio_read(bitbang) != 0) { /* PHY didn't drive TA low */ for (j = 0; j < 32; j++) { - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); } ret = -1; goto out; } - mdc(bus, 0); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); /* read 16 bits of register data, MSB first */ rdreg = 0; for (j = 0; j < 16; j++) { - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 1); + mii_delay(bitbang); rdreg <<= 1; - rdreg |= mdio_read(bus); - mdc(bus, 0); - mii_delay(bus); + rdreg |= mdio_read(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); } - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); ret = rdreg; out: return ret; } -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) +static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) { int j; + struct bb_info* bitbang = bus->priv; + u8 addr = phy_id & 0xff; u8 reg = location & 0xff; u16 value = val & 0xffff; - bitbang_pre(bus, 0, addr, reg); + bitbang_pre(bitbang, 0, addr, reg); /* send the turnaround (10) */ - mdc(bus, 0); - mdio(bus, 1); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* write 16 bits of register data, MSB first */ for (j = 0; j < 16; j++) { - mdc(bus, 0); - mdio(bus, (value & 0x8000) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (value & 0x8000) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); value <<= 1; } /* * Tri-state the MDIO line. */ - mdio_tristate(bus); - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdio_tristate(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + return 0; } -int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) +static int fs_enet_mii_bb_reset(struct mii_bus *bus) +{ + /*nothing here - dunno how to reset it*/ + return 0; +} + +static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) { - const struct fs_mii_bus_info *bi = bus->bus_info; int r; - r = bitbang_prep_bit(&bus->bitbang.mdio_dir, - &bus->bitbang.mdio_dat, - &bus->bitbang.mdio_msk, - bi->i.bitbang.mdio_port, - bi->i.bitbang.mdio_bit); + bitbang->delay = fmpi->delay; + + r = bitbang_prep_bit(&bitbang->mdio_dir, + &bitbang->mdio_dir_msk, + &fmpi->mdio_dir); if (r != 0) return r; - r = bitbang_prep_bit(&bus->bitbang.mdc_dir, - &bus->bitbang.mdc_dat, - &bus->bitbang.mdc_msk, - bi->i.bitbang.mdc_port, - bi->i.bitbang.mdc_bit); + r = bitbang_prep_bit(&bitbang->mdio_dat, + &bitbang->mdio_dat_msk, + &fmpi->mdio_dat); if (r != 0) return r; - bus->mii_read = mii_read; - bus->mii_write = mii_write; + r = bitbang_prep_bit(&bitbang->mdc_dat, + &bitbang->mdc_msk, + &fmpi->mdc_dat); + if (r != 0) + return r; return 0; } + + +static int __devinit fs_enet_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fs_mii_bb_platform_info *pdata; + struct mii_bus *new_bus; + struct bb_info *bitbang; + int err = 0; + + if (NULL == dev) + return -EINVAL; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + + if (NULL == bitbang) + return -ENOMEM; + + new_bus->name = "BB MII Bus", + new_bus->read = &fs_enet_mii_bb_read, + new_bus->write = &fs_enet_mii_bb_write, + new_bus->reset = &fs_enet_mii_bb_reset, + new_bus->id = pdev->id; + + new_bus->phy_mask = ~0x9; + pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /*set up workspace*/ + fs_mii_bitbang_init(bitbang, pdata); + + new_bus->priv = bitbang; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + kfree(bitbang); + kfree(new_bus); + + return err; +} + + +static int fs_enet_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + + iounmap((void *) (&bus->priv)); + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver fs_enet_bb_mdio_driver = { + .name = "fsl-bb-mdio", + .bus = &platform_bus_type, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +int fs_enet_mdio_bb_init(void) +{ + return driver_register(&fs_enet_bb_mdio_driver); +} + +void fs_enet_mdio_bb_exit(void) +{ + driver_unregister(&fs_enet_bb_mdio_driver); +} + diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c new file mode 100644 index 000000000000..1328e10caa35 --- /dev/null +++ b/drivers/net/fs_enet/mii-fec.c @@ -0,0 +1,243 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "fs_enet.h" +#include "fec.h" + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define FEC_MII_LOOPS 10000 + +static int match_has_phy (struct device *dev, void* data) +{ + struct platform_device* pdev = container_of(dev, struct platform_device, dev); + struct fs_platform_info* fpi; + if(strcmp(pdev->name, (char*)data)) + { + return 0; + } + + fpi = pdev->dev.platform_data; + if((fpi)&&(fpi->has_phy)) + return 1; + return 0; +} + +static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) +{ + struct resource *r; + fec_t *fecp; + char* name = "fsl-cpm-fec"; + + /* we need fec in order to be useful */ + struct platform_device *fec_pdev = + container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy), + struct platform_device, dev); + + if(fec_pdev == NULL) { + printk(KERN_ERR"Unable to find PHY for %s", name); + return -ENODEV; + } + + r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); + + fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); + fec->mii_speed = fmpi->mii_speed; + + setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + out_be32(&fecp->fec_mii_speed, fec->mii_speed); + + return 0; +} + +static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) +{ + struct fec_info* fec = bus->priv; + fec_t *fecp = fec->fecp; + int i, ret = -1; + + if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) + BUG(); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) { + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + ret = in_be32(&fecp->fec_mii_data) & 0xffff; + } + + return ret; + +} + +static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + struct fec_info* fec = bus->priv; + fec_t *fecp = fec->fecp; + int i; + + /* this must never happen */ + if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) + BUG(); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + + return 0; + +} + +static int fs_enet_fec_mii_reset(struct mii_bus *bus) +{ + /* nothing here - for now */ + return 0; +} + +static int __devinit fs_enet_fec_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fs_mii_fec_platform_info *pdata; + struct mii_bus *new_bus; + struct fec_info *fec; + int err = 0; + if (NULL == dev) + return -EINVAL; + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + + if (NULL == fec) + return -ENOMEM; + + new_bus->name = "FEC MII Bus", + new_bus->read = &fs_enet_fec_mii_read, + new_bus->write = &fs_enet_fec_mii_write, + new_bus->reset = &fs_enet_fec_mii_reset, + new_bus->id = pdev->id; + + pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /*set up workspace*/ + + fs_mii_fec_init(fec, pdata); + new_bus->priv = fec; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + kfree(new_bus); + + return err; +} + + +static int fs_enet_fec_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + kfree(bus->priv); + + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver fs_enet_fec_mdio_driver = { + .name = "fsl-cpm-fec-mdio", + .bus = &platform_bus_type, + .probe = fs_enet_fec_mdio_probe, + .remove = fs_enet_fec_mdio_remove, +}; + +int fs_enet_mdio_fec_init(void) +{ + return driver_register(&fs_enet_fec_mdio_driver); +} + +void fs_enet_mdio_fec_exit(void) +{ + driver_unregister(&fs_enet_fec_mdio_driver); +} + diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c deleted file mode 100644 index ae4a9c3bb393..000000000000 --- a/drivers/net/fs_enet/mii-fixed.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "fs_enet.h" - -static const u16 mii_regs[7] = { - 0x3100, - 0x786d, - 0x0fff, - 0x0fff, - 0x01e1, - 0x45e1, - 0x0003, -}; - -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) -{ - int ret = 0; - - if ((unsigned int)location >= ARRAY_SIZE(mii_regs)) - return -1; - - if (location != 5) - ret = mii_regs[location]; - else - ret = bus->fixed.lpa; - - return ret; -} - -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) -{ - /* do nothing */ -} - -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus) -{ - const struct fs_mii_bus_info *bi = bus->bus_info; - - bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */ - - /* if speed is fixed at 10Mb, remove 100Mb modes */ - if (bi->i.fixed.speed == 10) - bus->fixed.lpa &= ~LPA_100; - - /* if duplex is half, remove full duplex modes */ - if (bi->i.fixed.duplex == 0) - bus->fixed.lpa &= ~LPA_DUPLEX; - - bus->mii_read = mii_read; - bus->mii_write = mii_write; - - return 0; -} diff --git a/drivers/net/lance.c b/drivers/net/lance.c index c1c3452c90ca..5b4dbfe5fb77 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 646e89fc3562..c0ec7f6abcb2 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c @@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); MODULE_LICENSE("GPL"); -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 07ca9480a6fe..9bdd43ab3573 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -177,6 +177,7 @@ struct myri10ge_priv { struct work_struct watchdog_work; struct timer_list watchdog_timer; int watchdog_tx_done; + int watchdog_tx_req; int watchdog_resets; int tx_linearized; int pause; @@ -448,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) struct mcp_gen_header *hdr; size_t hdr_offset; int status; + unsigned i; if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { dev_err(dev, "Unable to load %s firmware image via hotplug\n", @@ -479,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) goto abort_with_fw; crc = crc32(~0, fw->data, fw->size); - if (mgp->tx.boundary == 2048) { - /* Avoid PCI burst on chipset with unaligned completions. */ - int i; - __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + - MYRI10GE_FW_OFFSET); - for (i = 0; i < fw->size / 4; i++) { - __raw_writel(((u32 *) fw->data)[i], ptr + i); - wmb(); - } - } else { - myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, - fw->size); + for (i = 0; i < fw->size; i += 256) { + myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, + fw->data + i, + min(256U, (unsigned)(fw->size - i))); + mb(); + readb(mgp->sram); } /* corruption checking is good for parity recovery and buggy chipset */ memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); @@ -620,7 +616,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) return -ENXIO; } dev_info(&mgp->pdev->dev, "handoff confirmed\n"); - myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); + myri10ge_dummy_rdma(mgp, 1); return 0; } @@ -2429,7 +2425,7 @@ static int myri10ge_resume(struct pci_dev *pdev) } myri10ge_reset(mgp); - myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); + myri10ge_dummy_rdma(mgp, 1); /* Save configuration space to be restored if the * nic resets due to a parity error */ @@ -2547,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) mgp = (struct myri10ge_priv *)arg; if (mgp->tx.req != mgp->tx.done && - mgp->tx.done == mgp->watchdog_tx_done) + mgp->tx.done == mgp->watchdog_tx_done && + mgp->watchdog_tx_req != mgp->watchdog_tx_done) /* nic seems like it might be stuck.. */ schedule_work(&mgp->watchdog_work); else @@ -2556,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg) jiffies + myri10ge_watchdog_timeout * HZ); mgp->watchdog_tx_done = mgp->tx.done; + mgp->watchdog_tx_req = mgp->tx.req; } static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index fa854c8fde75..4d52ecf8af56 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); -int init_module(void) +int __init init_module(void) { if(io <= 0x0 || !memend || !memstart || irq < 2) { printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index bb42ff218484..810cc572f5f7 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); MODULE_PARM_DESC(io, "ni6510 I/O base address"); MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); -int init_module(void) +int __init init_module(void) { dev_ni65 = ni65_probe(-1); return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 9bae77ce1314..4122bb46f5ff 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -345,6 +345,7 @@ typedef struct local_info_t { void __iomem *dingo_ccr; /* only used for CEM56 cards */ unsigned last_ptr_value; /* last packets transmitted value */ const char *manf_str; + struct work_struct tx_timeout_task; } local_info_t; /**************** @@ -352,6 +353,7 @@ typedef struct local_info_t { */ static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); static void do_tx_timeout(struct net_device *dev); +static void xirc2ps_tx_timeout_task(void *data); static struct net_device_stats *do_get_stats(struct net_device *dev); static void set_addresses(struct net_device *dev); static void set_multicast_list(struct net_device *dev); @@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link) #ifdef HAVE_TX_TIMEOUT dev->tx_timeout = do_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; + INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); #endif return xirc2ps_config(link); @@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs) /*====================================================================*/ static void -do_tx_timeout(struct net_device *dev) +xirc2ps_tx_timeout_task(void *data) { - local_info_t *lp = netdev_priv(dev); - printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); - lp->stats.tx_errors++; + struct net_device *dev = data; /* reset the card */ do_reset(dev,1); dev->trans_start = jiffies; netif_wake_queue(dev); } +static void +do_tx_timeout(struct net_device *dev) +{ + local_info_t *lp = netdev_priv(dev); + lp->stats.tx_errors++; + printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); + schedule_work(&lp->tx_timeout_task); +} + static int do_start_xmit(struct sk_buff *skb, struct net_device *dev) { diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 4daafe303358..d50bcb89dd28 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -202,6 +202,8 @@ static int homepna[MAX_UNITS]; #define CSR15 15 #define PCNET32_MC_FILTER 8 +#define PCNET32_79C970A 0x2621 + /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { u32 base; @@ -289,6 +291,7 @@ struct pcnet32_private { /* each bit indicates an available PHY */ u32 phymask; + unsigned short chip_version; /* which variant this is */ }; static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); @@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); - } else { + } else if (lp->chip_version >= PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; } spin_unlock_irqrestore(&lp->lock, flags); @@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, ulong ioaddr = dev->base_addr; int ticks; + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + /* set SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); @@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->mii_if.reg_num_mask = 0x1f; lp->dxsuflo = dxsuflo; lp->mii = mii; + lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) @@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev) val |= 2; } else if (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ - i = ((lp->a.read_csr(ioaddr, 88) | - (lp->a. - read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; - if (i == 0x2627) + if (lp->chip_version == 0x2627) val |= 3; } lp->a.write_bcr(ioaddr, 9, val); @@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev) netif_start_queue(dev); - /* Print the link status and start the watchdog */ - pcnet32_check_media(dev, 1); - mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + } i = 0; while (i++ < 100) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2ba6d3a40e2e..b79ec0d7480f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -56,5 +56,22 @@ config SMSC_PHY ---help--- Currently supports the LAN83C185 PHY +config FIXED_PHY + tristate "Drivers for PHY emulation on fixed speed/link" + depends on PHYLIB + ---help--- + Adds the driver to PHY layer to cover the boards that do not have any PHY bound, + but with the ability to manipulate with speed/link in software. The relavant MII + speed/duplex parameters could be effectively handled in user-specified fuction. + Currently tested with mpc866ads. + +config FIXED_MII_10_FDX + bool "Emulation for 10M Fdx fixed PHY behavior" + depends on FIXED_PHY + +config FIXED_MII_100_FDX + bool "Emulation for 100M Fdx fixed PHY behavior" + depends on FIXED_PHY + endmenu diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index a00e61942525..320f8323123f 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_SMSC_PHY) += smsc.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o +obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c new file mode 100644 index 000000000000..341036df4710 --- /dev/null +++ b/drivers/net/phy/fixed.c @@ -0,0 +1,358 @@ +/* + * drivers/net/phy/fixed.c + * + * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode. + * + * Author: Vitaly Bordug + * + * Copyright (c) 2006 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define MII_REGS_NUM 7 + +/* + The idea is to emulate normal phy behavior by responding with + pre-defined values to mii BMCR read, so that read_status hook could + take all the needed info. +*/ + +struct fixed_phy_status { + u8 link; + u16 speed; + u8 duplex; +}; + +/*----------------------------------------------------------------------------- + * Private information hoder for mii_bus + *-----------------------------------------------------------------------------*/ +struct fixed_info { + u16 *regs; + u8 regs_num; + struct fixed_phy_status phy_status; + struct phy_device *phydev; /* pointer to the container */ + /* link & speed cb */ + int(*link_update)(struct net_device*, struct fixed_phy_status*); + +}; + +/*----------------------------------------------------------------------------- + * If something weird is required to be done with link/speed, + * network driver is able to assign a function to implement this. + * May be useful for PHY's that need to be software-driven. + *-----------------------------------------------------------------------------*/ +int fixed_mdio_set_link_update(struct phy_device* phydev, + int(*link_update)(struct net_device*, struct fixed_phy_status*)) +{ + struct fixed_info *fixed; + + if(link_update == NULL) + return -EINVAL; + + if(phydev) { + if(phydev->bus) { + fixed = phydev->bus->priv; + fixed->link_update = link_update; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL(fixed_mdio_set_link_update); + +/*----------------------------------------------------------------------------- + * This is used for updating internal mii regs from the status + *-----------------------------------------------------------------------------*/ +static int fixed_mdio_update_regs(struct fixed_info *fixed) +{ + u16 *regs = fixed->regs; + u16 bmsr = 0; + u16 bmcr = 0; + + if(!regs) { + printk(KERN_ERR "%s: regs not set up", __FUNCTION__); + return -EINVAL; + } + + if(fixed->phy_status.link) + bmsr |= BMSR_LSTATUS; + + if(fixed->phy_status.duplex) { + bmcr |= BMCR_FULLDPLX; + + switch ( fixed->phy_status.speed ) { + case 100: + bmsr |= BMSR_100FULL; + bmcr |= BMCR_SPEED100; + break; + + case 10: + bmsr |= BMSR_10FULL; + break; + } + } else { + switch ( fixed->phy_status.speed ) { + case 100: + bmsr |= BMSR_100HALF; + bmcr |= BMCR_SPEED100; + break; + + case 10: + bmsr |= BMSR_100HALF; + break; + } + } + + regs[MII_BMCR] = bmcr; + regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/ + + return 0; +} + +static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location) +{ + struct fixed_info *fixed = bus->priv; + + /* if user has registered link update callback, use it */ + if(fixed->phydev) + if(fixed->phydev->attached_dev) { + if(fixed->link_update) { + fixed->link_update(fixed->phydev->attached_dev, + &fixed->phy_status); + fixed_mdio_update_regs(fixed); + } + } + + if ((unsigned int)location >= fixed->regs_num) + return -1; + return fixed->regs[location]; +} + +static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + /* do nothing for now*/ + return 0; +} + +static int fixed_mii_reset(struct mii_bus *bus) +{ + /*nothing here - no way/need to reset it*/ + return 0; +} + +static int fixed_config_aneg(struct phy_device *phydev) +{ + /* :TODO:03/13/2006 09:45:37 PM:: + The full autoneg funcionality can be emulated, + but no need to have anything here for now + */ + return 0; +} + +/*----------------------------------------------------------------------------- + * the manual bind will do the magic - with phy_id_mask == 0 + * match will never return true... + *-----------------------------------------------------------------------------*/ +static struct phy_driver fixed_mdio_driver = { + .name = "Fixed PHY", + .features = PHY_BASIC_FEATURES, + .config_aneg = fixed_config_aneg, + .read_status = genphy_read_status, + .driver = { .owner = THIS_MODULE,}, +}; + +/*----------------------------------------------------------------------------- + * This func is used to create all the necessary stuff, bind + * the fixed phy driver and register all it on the mdio_bus_type. + * speed is either 10 or 100, duplex is boolean. + * number is used to create multiple fixed PHYs, so that several devices can + * utilize them simultaneously. + *-----------------------------------------------------------------------------*/ +static int fixed_mdio_register_device(int number, int speed, int duplex) +{ + struct mii_bus *new_bus; + struct fixed_info *fixed; + struct phy_device *phydev; + int err = 0; + + struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL); + + if (NULL == dev) + return -ENOMEM; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) { + kfree(dev); + return -ENOMEM; + } + fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL); + + if (NULL == fixed) { + kfree(dev); + kfree(new_bus); + return -ENOMEM; + } + + fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL); + fixed->regs_num = MII_REGS_NUM; + fixed->phy_status.speed = speed; + fixed->phy_status.duplex = duplex; + fixed->phy_status.link = 1; + + new_bus->name = "Fixed MII Bus", + new_bus->read = &fixed_mii_read, + new_bus->write = &fixed_mii_write, + new_bus->reset = &fixed_mii_reset, + + /*set up workspace*/ + fixed_mdio_update_regs(fixed); + new_bus->priv = fixed; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + /* create phy_device and register it on the mdio bus */ + phydev = phy_device_create(new_bus, 0, 0); + + /* + Put the phydev pointer into the fixed pack so that bus read/write code could + be able to access for instance attached netdev. Well it doesn't have to do + so, only in case of utilizing user-specified link-update... + */ + fixed->phydev = phydev; + + if(NULL == phydev) { + err = -ENOMEM; + goto device_create_fail; + } + + phydev->irq = -1; + phydev->dev.bus = &mdio_bus_type; + + if(number) + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + "fixed_%d@%d:%d", number, speed, duplex); + else + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + "fixed@%d:%d", speed, duplex); + phydev->bus = new_bus; + + err = device_register(&phydev->dev); + if(err) { + printk(KERN_ERR "Phy %s failed to register\n", + phydev->dev.bus_id); + goto bus_register_fail; + } + + /* + the mdio bus has phy_id match... In order not to do it + artificially, we are binding the driver here by hand; + it will be the same for all the fixed phys anyway. + */ + down_write(&phydev->dev.bus->subsys.rwsem); + + phydev->dev.driver = &fixed_mdio_driver.driver; + + err = phydev->dev.driver->probe(&phydev->dev); + if(err < 0) { + printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); + up_write(&phydev->dev.bus->subsys.rwsem); + goto probe_fail; + } + + device_bind_driver(&phydev->dev); + up_write(&phydev->dev.bus->subsys.rwsem); + + return 0; + +probe_fail: + device_unregister(&phydev->dev); +bus_register_fail: + kfree(phydev); +device_create_fail: + kfree(dev); + kfree(new_bus); + kfree(fixed); + + return err; +} + + +MODULE_DESCRIPTION("Fixed PHY device & driver for PAL"); +MODULE_AUTHOR("Vitaly Bordug"); +MODULE_LICENSE("GPL"); + +static int __init fixed_init(void) +{ + int ret; + int duplex = 0; + + /* register on the bus... Not expected to be matched with anything there... */ + phy_driver_register(&fixed_mdio_driver); + + /* So let the fun begin... + We will create several mdio devices here, and will bound the upper + driver to them. + + Then the external software can lookup the phy bus by searching + fixed@speed:duplex, e.g. fixed@100:1, to be connected to the + virtual 100M Fdx phy. + + In case several virtual PHYs required, the bus_id will be in form + fixed_@:, which make it able even to define + driver-specific link control callback, if for instance PHY is completely + SW-driven. + + */ + +#ifdef CONFIG_FIXED_MII_DUPLEX + duplex = 1; +#endif + +#ifdef CONFIG_FIXED_MII_100_FDX + fixed_mdio_register_device(0, 100, 1); +#endif + +#ifdef CONFIX_FIXED_MII_10_FDX + fixed_mdio_register_device(0, 10, 1); +#endif + return 0; +} + +static void __exit fixed_exit(void) +{ + phy_driver_unregister(&fixed_mdio_driver); + /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */ +} + +module_init(fixed_init); +module_exit(fixed_exit); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 1dde390c164d..cf6660c93ffa 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = { .suspend = mdio_bus_suspend, .resume = mdio_bus_resume, }; +EXPORT_SYMBOL(mdio_bus_type); int __init mdio_bus_init(void) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d5c2233c252..f5aad77288f9 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev, /* phy_stop_machine * - * description: Stops the state machine timer, sets the state to - * UP (unless it wasn't up yet), and then frees the interrupt, - * if it is in use. This function must be called BEFORE + * description: Stops the state machine timer, sets the state to UP + * (unless it wasn't up yet). This function must be called BEFORE * phy_detach. */ void phy_stop_machine(struct phy_device *phydev) @@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev) phydev->state = PHY_UP; spin_unlock(&phydev->lock); - if (phydev->irq != PHY_POLL) - phy_stop_interrupts(phydev); - phydev->adjust_state = NULL; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1bc1e032c5d6..2d1ecfdc80db 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -45,6 +45,35 @@ static struct phy_driver genphy_driver; extern int mdio_bus_init(void); extern void mdio_bus_exit(void); +struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) +{ + struct phy_device *dev; + /* We allocate the device, and initialize the + * default values */ + dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); + + if (NULL == dev) + return (struct phy_device*) PTR_ERR((void*)-ENOMEM); + + dev->speed = 0; + dev->duplex = -1; + dev->pause = dev->asym_pause = 0; + dev->link = 1; + + dev->autoneg = AUTONEG_ENABLE; + + dev->addr = addr; + dev->phy_id = phy_id; + dev->bus = bus; + + dev->state = PHY_DOWN; + + spin_lock_init(&dev->lock); + + return dev; +} +EXPORT_SYMBOL(phy_device_create); + /* get_phy_device * * description: Reads the ID registers of the PHY at addr on the @@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr) if (0xffffffff == phy_id) return NULL; - /* Otherwise, we allocate the device, and initialize the - * default values */ - dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); - - if (NULL == dev) - return ERR_PTR(-ENOMEM); - - dev->speed = 0; - dev->duplex = -1; - dev->pause = dev->asym_pause = 0; - dev->link = 1; - - dev->autoneg = AUTONEG_ENABLE; - - dev->addr = addr; - dev->phy_id = phy_id; - dev->bus = bus; - - dev->state = PHY_DOWN; - - spin_lock_init(&dev->lock); + dev = phy_device_create(bus, addr, phy_id); return dev; } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0ec6e9d57b94..c872f7c6cce3 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -192,7 +192,7 @@ struct cardmap { void *ptr[CARDMAP_WIDTH]; }; static void *cardmap_get(struct cardmap *map, unsigned int nr); -static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); +static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); static unsigned int cardmap_find_first_free(struct cardmap *map); static void cardmap_destroy(struct cardmap **map); @@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan) { struct channel *pch; - pch = kmalloc(sizeof(struct channel), GFP_KERNEL); + pch = kzalloc(sizeof(struct channel), GFP_KERNEL); if (pch == 0) return -ENOMEM; - memset(pch, 0, sizeof(struct channel)); pch->ppp = NULL; pch->chan = chan; chan->ppp = pch; @@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp) int ret = -ENOMEM; int i; - ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); + ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); if (!ppp) goto out; dev = alloc_netdev(0, "", ppp_setup); if (!dev) goto out1; - memset(ppp, 0, sizeof(struct ppp)); ppp->mru = PPP_MRU; init_ppp_file(&ppp->file, INTERFACE); @@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp) } atomic_inc(&ppp_unit_count); - cardmap_set(&all_ppp_units, unit, ppp); + ret = cardmap_set(&all_ppp_units, unit, ppp); + if (ret != 0) + goto out3; + mutex_unlock(&all_ppp_mutex); *retp = 0; return ppp; +out3: + atomic_dec(&ppp_unit_count); out2: mutex_unlock(&all_ppp_mutex); free_netdev(dev); @@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr) return NULL; } -static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) +static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) { struct cardmap *p; int i; @@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { do { /* need a new top level */ - struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); - memset(np, 0, sizeof(*np)); + struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + goto enomem; np->ptr[0] = p; if (p != NULL) { np->shift = p->shift + CARDMAP_ORDER; @@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) while (p->shift > 0) { i = (nr >> p->shift) & CARDMAP_MASK; if (p->ptr[i] == NULL) { - struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); - memset(np, 0, sizeof(*np)); + struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + goto enomem; np->shift = p->shift - CARDMAP_ORDER; np->parent = p; p->ptr[i] = np; @@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) set_bit(i, &p->inuse); else clear_bit(i, &p->inuse); + return 0; + enomem: + return -ENOMEM; } static unsigned int cardmap_find_first_free(struct cardmap *map) diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index e1fe3a0a7b0b..e72e0e099060 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -71,12 +71,13 @@ #include #include #include +#include /* local include */ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.14.2" +#define DRV_VERSION "2.0.15.2" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; @@ -370,38 +371,50 @@ static const u64 fix_mac[] = { END_SIGN }; +MODULE_AUTHOR("Raghavendra Koushik "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + + /* Module Loadable parameters. */ -static unsigned int tx_fifo_num = 1; +S2IO_PARM_INT(tx_fifo_num, 1); +S2IO_PARM_INT(rx_ring_num, 1); + + +S2IO_PARM_INT(rx_ring_mode, 1); +S2IO_PARM_INT(use_continuous_tx_intrs, 1); +S2IO_PARM_INT(rmac_pause_time, 0x100); +S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); +S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); +S2IO_PARM_INT(shared_splits, 0); +S2IO_PARM_INT(tmac_util_period, 5); +S2IO_PARM_INT(rmac_util_period, 5); +S2IO_PARM_INT(bimodal, 0); +S2IO_PARM_INT(l3l4hdr_size, 128); +/* Frequency of Rx desc syncs expressed as power of 2 */ +S2IO_PARM_INT(rxsync_frequency, 3); +/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ +S2IO_PARM_INT(intr_type, 0); +/* Large receive offload feature */ +S2IO_PARM_INT(lro, 0); +/* Max pkts to be aggregated by LRO at one time. If not specified, + * aggregation happens until we hit max IP pkt size(64K) + */ +S2IO_PARM_INT(lro_max_pkts, 0xFFFF); +#ifndef CONFIG_S2IO_NAPI +S2IO_PARM_INT(indicate_max_pkts, 0); +#endif + static unsigned int tx_fifo_len[MAX_TX_FIFOS] = {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; -static unsigned int rx_ring_num = 1; static unsigned int rx_ring_sz[MAX_RX_RINGS] = {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; static unsigned int rts_frm_len[MAX_RX_RINGS] = {[0 ...(MAX_RX_RINGS - 1)] = 0 }; -static unsigned int rx_ring_mode = 1; -static unsigned int use_continuous_tx_intrs = 1; -static unsigned int rmac_pause_time = 0x100; -static unsigned int mc_pause_threshold_q0q3 = 187; -static unsigned int mc_pause_threshold_q4q7 = 187; -static unsigned int shared_splits; -static unsigned int tmac_util_period = 5; -static unsigned int rmac_util_period = 5; -static unsigned int bimodal = 0; -static unsigned int l3l4hdr_size = 128; -#ifndef CONFIG_S2IO_NAPI -static unsigned int indicate_max_pkts; -#endif -/* Frequency of Rx desc syncs expressed as power of 2 */ -static unsigned int rxsync_frequency = 3; -/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ -static unsigned int intr_type = 0; -/* Large receive offload feature */ -static unsigned int lro = 0; -/* Max pkts to be aggregated by LRO at one time. If not specified, - * aggregation happens until we hit max IP pkt size(64K) - */ -static unsigned int lro_max_pkts = 0xFFFF; + +module_param_array(tx_fifo_len, uint, NULL, 0); +module_param_array(rx_ring_sz, uint, NULL, 0); +module_param_array(rts_frm_len, uint, NULL, 0); /* * S2IO device table. @@ -464,10 +477,9 @@ static int init_shared_mem(struct s2io_nic *nic) size += config->tx_cfg[i].fifo_len; } if (size > MAX_AVAILABLE_TXDS) { - DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", - __FUNCTION__); + DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); - return FAILURE; + return -EINVAL; } lst_size = (sizeof(TxD_t) * config->max_txds); @@ -547,6 +559,7 @@ static int init_shared_mem(struct s2io_nic *nic) nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); if (!nic->ufo_in_band_v) return -ENOMEM; + memset(nic->ufo_in_band_v, 0, size); /* Allocation and initialization of RXDs in Rings */ size = 0; @@ -1213,7 +1226,7 @@ static int init_nic(struct s2io_nic *nic) break; } - /* Enable Tx FIFO partition 0. */ + /* Enable all configured Tx FIFO partitions */ val64 = readq(&bar0->tx_fifo_partition_0); val64 |= (TX_FIFO_PARTITION_EN); writeq(val64, &bar0->tx_fifo_partition_0); @@ -1650,7 +1663,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) writeq(temp64, &bar0->general_int_mask); /* * If Hercules adapter enable GPIO otherwise - * disabled all PCIX, Flash, MDIO, IIC and GPIO + * disable all PCIX, Flash, MDIO, IIC and GPIO * interrupts for now. * TODO */ @@ -2119,7 +2132,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in frag->size, PCI_DMA_TODEVICE); } } - txdlp->Host_Control = 0; + memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); return(skb); } @@ -2371,9 +2384,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) skb->data = (void *) (unsigned long)tmp; skb->tail = (void *) (unsigned long)tmp; - ((RxD3_t*)rxdp)->Buffer0_ptr = - pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, + if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) + ((RxD3_t*)rxdp)->Buffer0_ptr = + pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); + else + pci_dma_sync_single_for_device(nic->pdev, + (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (nic->rxd_mode == RXD_MODE_3B) { /* Two buffer mode */ @@ -2386,10 +2404,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) (nic->pdev, skb->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - /* Buffer-1 will be dummy buffer not used */ - ((RxD3_t*)rxdp)->Buffer1_ptr = - pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); + /* Buffer-1 will be dummy buffer. Not used */ + if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { + ((RxD3_t*)rxdp)->Buffer1_ptr = + pci_map_single(nic->pdev, + ba->ba_1, BUF1_LEN, + PCI_DMA_FROMDEVICE); + } rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3 (dev->mtu + 4); @@ -2614,23 +2635,23 @@ no_rx: } #endif +#ifdef CONFIG_NET_POLL_CONTROLLER /** - * s2io_netpoll - Rx interrupt service handler for netpoll support + * s2io_netpoll - netpoll event handler entry point * @dev : pointer to the device structure. * Description: - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. + * This function will be called by upper layer to check for events on the + * interface in situations where interrupts are disabled. It is used for + * specific in-kernel networking tasks, such as remote consoles and kernel + * debugging over the network (example netdump in RedHat). */ - -#ifdef CONFIG_NET_POLL_CONTROLLER static void s2io_netpoll(struct net_device *dev) { nic_t *nic = dev->priv; mac_info_t *mac_control; struct config_param *config; XENA_dev_config_t __iomem *bar0 = nic->bar0; - u64 val64; + u64 val64 = 0xFFFFFFFFFFFFFFFFULL; int i; disable_irq(dev->irq); @@ -2639,9 +2660,17 @@ static void s2io_netpoll(struct net_device *dev) mac_control = &nic->mac_control; config = &nic->config; - val64 = readq(&bar0->rx_traffic_int); writeq(val64, &bar0->rx_traffic_int); + writeq(val64, &bar0->tx_traffic_int); + /* we need to free up the transmitted skbufs or else netpoll will + * run out of skbs and will fail and eventually netpoll application such + * as netdump will fail. + */ + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); + + /* check for received packet and indicate up to network */ for (i = 0; i < config->rx_ring_num; i++) rx_intr_handler(&mac_control->rings[i]); @@ -2708,7 +2737,7 @@ static void rx_intr_handler(ring_info_t *ring_data) /* If your are next to put index then it's FIFO full condition */ if ((get_block == put_block) && (get_info.offset + 1) == put_info.offset) { - DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); + DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); break; } skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); @@ -2728,18 +2757,15 @@ static void rx_intr_handler(ring_info_t *ring_data) HEADER_SNAP_SIZE, PCI_DMA_FROMDEVICE); } else if (nic->rxd_mode == RXD_MODE_3B) { - pci_unmap_single(nic->pdev, (dma_addr_t) + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); - pci_unmap_single(nic->pdev, (dma_addr_t) - ((RxD3_t*)rxdp)->Buffer1_ptr, - BUF1_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(nic->pdev, (dma_addr_t) ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu + 4, PCI_DMA_FROMDEVICE); } else { - pci_unmap_single(nic->pdev, (dma_addr_t) + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(nic->pdev, (dma_addr_t) @@ -3327,7 +3353,7 @@ static void s2io_reset(nic_t * sp) /* Clear certain PCI/PCI-X fields after reset */ if (sp->device_type == XFRAME_II_DEVICE) { - /* Clear parity err detect bit */ + /* Clear "detected parity error" bit */ pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); /* Clearing PCIX Ecc status register */ @@ -3528,7 +3554,7 @@ static void restore_xmsi_data(nic_t *nic) u64 val64; int i; - for (i=0; i< nic->avail_msix_vectors; i++) { + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { writeq(nic->msix_info[i].addr, &bar0->xmsi_address); writeq(nic->msix_info[i].data, &bar0->xmsi_data); val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); @@ -3547,7 +3573,7 @@ static void store_xmsi_data(nic_t *nic) int i; /* Store and display */ - for (i=0; i< nic->avail_msix_vectors; i++) { + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { val64 = (BIT(15) | vBIT(i, 26, 6)); writeq(val64, &bar0->xmsi_access); if (wait_for_msix_trans(nic, i)) { @@ -3808,13 +3834,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) TxD_t *txdp; TxFIFO_element_t __iomem *tx_fifo; unsigned long flags; -#ifdef NETIF_F_TSO - int mss; -#endif u16 vlan_tag = 0; int vlan_priority = 0; mac_info_t *mac_control; struct config_param *config; + int offload_type; mac_control = &sp->mac_control; config = &sp->config; @@ -3862,13 +3886,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } - txdp->Control_1 = 0; - txdp->Control_2 = 0; + offload_type = s2io_offload_type(skb); #ifdef NETIF_F_TSO - mss = skb_shinfo(skb)->gso_size; - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { txdp->Control_1 |= TXD_TCP_LSO_EN; - txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); + txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); } #endif if (skb->ip_summed == CHECKSUM_HW) { @@ -3886,10 +3908,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb->len - skb->data_len; - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { + if (offload_type == SKB_GSO_UDP) { int ufo_size; - ufo_size = skb_shinfo(skb)->gso_size; + ufo_size = s2io_udp_mss(skb); ufo_size &= ~7; txdp->Control_1 |= TXD_UFO_EN; txdp->Control_1 |= TXD_UFO_MSS(ufo_size); @@ -3906,16 +3928,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) sp->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); txdp++; - txdp->Control_1 = 0; - txdp->Control_2 = 0; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); txdp->Host_Control = (unsigned long) skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); - - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type == SKB_GSO_UDP) txdp->Control_1 |= TXD_UFO_EN; frg_cnt = skb_shinfo(skb)->nr_frags; @@ -3930,12 +3949,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) (sp->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type == SKB_GSO_UDP) txdp->Control_1 |= TXD_UFO_EN; } txdp->Control_1 |= TXD_GATHER_CODE_LAST; - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) + if (offload_type == SKB_GSO_UDP) frg_cnt++; /* as Txd0 was used for inband header */ tx_fifo = mac_control->tx_FIFO_start[queue]; @@ -3944,13 +3963,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | TX_FIFO_LAST_LIST); + if (offload_type) + val64 |= TX_FIFO_SPECIAL_FUNC; -#ifdef NETIF_F_TSO - if (mss) - val64 |= TX_FIFO_SPECIAL_FUNC; -#endif - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) - val64 |= TX_FIFO_SPECIAL_FUNC; writeq(val64, &tx_fifo->List_Control); mmiowb(); @@ -3984,13 +3999,41 @@ s2io_alarm_handle(unsigned long data) mod_timer(&sp->alarm_timer, jiffies + HZ / 2); } +static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) +{ + int rxb_size, level; + + if (!sp->lro) { + rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); + level = rx_buffer_level(sp, rxb_size, rng_n); + + if ((level == PANIC) && (!TASKLET_IN_USE)) { + int ret; + DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); + DBG_PRINT(INTR_DBG, "PANIC levels\n"); + if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { + DBG_PRINT(ERR_DBG, "Out of memory in %s", + __FUNCTION__); + clear_bit(0, (&sp->tasklet_status)); + return -1; + } + clear_bit(0, (&sp->tasklet_status)); + } else if (level == LOW) + tasklet_schedule(&sp->task); + + } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { + DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); + DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); + } + return 0; +} + static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) dev_id; nic_t *sp = dev->priv; int i; - int ret; mac_info_t *mac_control; struct config_param *config; @@ -4012,35 +4055,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) * reallocate the buffers from the interrupt handler itself, * else schedule a tasklet to reallocate the buffers. */ - for (i = 0; i < config->rx_ring_num; i++) { - if (!sp->lro) { - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); - int level = rx_buffer_level(sp, rxb_size, i); - - if ((level == PANIC) && (!TASKLET_IN_USE)) { - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", - dev->name); - DBG_PRINT(INTR_DBG, "PANIC levels\n"); - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in ISR!!\n"); - clear_bit(0, (&sp->tasklet_status)); - atomic_dec(&sp->isr_cnt); - return IRQ_HANDLED; - } - clear_bit(0, (&sp->tasklet_status)); - } else if (level == LOW) { - tasklet_schedule(&sp->task); - } - } - else if (fill_rx_buffers(sp, i) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); - break; - } - } + for (i = 0; i < config->rx_ring_num; i++) + s2io_chk_rx_buffers(sp, i); atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; @@ -4051,39 +4067,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) { ring_info_t *ring = (ring_info_t *)dev_id; nic_t *sp = ring->nic; - struct net_device *dev = (struct net_device *) dev_id; - int rxb_size, level, rng_n; atomic_inc(&sp->isr_cnt); + rx_intr_handler(ring); - - rng_n = ring->ring_no; - if (!sp->lro) { - rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); - level = rx_buffer_level(sp, rxb_size, rng_n); - - if ((level == PANIC) && (!TASKLET_IN_USE)) { - int ret; - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); - DBG_PRINT(INTR_DBG, "PANIC levels\n"); - if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "Out of memory in %s", - __FUNCTION__); - clear_bit(0, (&sp->tasklet_status)); - return IRQ_HANDLED; - } - clear_bit(0, (&sp->tasklet_status)); - } else if (level == LOW) { - tasklet_schedule(&sp->task); - } - } - else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); - } + s2io_chk_rx_buffers(sp, ring->ring_no); atomic_dec(&sp->isr_cnt); - return IRQ_HANDLED; } @@ -4248,37 +4238,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) * else schedule a tasklet to reallocate the buffers. */ #ifndef CONFIG_S2IO_NAPI - for (i = 0; i < config->rx_ring_num; i++) { - if (!sp->lro) { - int ret; - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); - int level = rx_buffer_level(sp, rxb_size, i); - - if ((level == PANIC) && (!TASKLET_IN_USE)) { - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", - dev->name); - DBG_PRINT(INTR_DBG, "PANIC levels\n"); - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in ISR!!\n"); - clear_bit(0, (&sp->tasklet_status)); - atomic_dec(&sp->isr_cnt); - writeq(org_mask, &bar0->general_int_mask); - return IRQ_HANDLED; - } - clear_bit(0, (&sp->tasklet_status)); - } else if (level == LOW) { - tasklet_schedule(&sp->task); - } - } - else if (fill_rx_buffers(sp, i) == -ENOMEM) { - DBG_PRINT(ERR_DBG, "%s:Out of memory", - dev->name); - DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); - break; - } - } + for (i = 0; i < config->rx_ring_num; i++) + s2io_chk_rx_buffers(sp, i); #endif writeq(org_mask, &bar0->general_int_mask); atomic_dec(&sp->isr_cnt); @@ -4308,6 +4269,8 @@ static void s2io_updt_stats(nic_t *sp) if (cnt == 5) break; /* Updt failed */ } while(1); + } else { + memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); } } @@ -4942,7 +4905,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) } static void s2io_vpd_read(nic_t *nic) { - u8 vpd_data[256],data; + u8 *vpd_data; + u8 data; int i=0, cnt, fail = 0; int vpd_addr = 0x80; @@ -4955,6 +4919,10 @@ static void s2io_vpd_read(nic_t *nic) vpd_addr = 0x50; } + vpd_data = kmalloc(256, GFP_KERNEL); + if (!vpd_data) + return; + for (i = 0; i < 256; i +=4 ) { pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); @@ -4977,6 +4945,7 @@ static void s2io_vpd_read(nic_t *nic) memset(nic->product_name, 0, vpd_data[1]); memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); } + kfree(vpd_data); } /** @@ -5295,7 +5264,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data) else *data = 0; - return 0; + return *data; } /** @@ -5753,6 +5722,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) return 0; } +static u32 s2io_ethtool_op_get_tso(struct net_device *dev) +{ + return (dev->features & NETIF_F_TSO) != 0; +} +static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return 0; +} static struct ethtool_ops netdev_ethtool_ops = { .get_settings = s2io_ethtool_gset, @@ -5773,8 +5755,8 @@ static struct ethtool_ops netdev_ethtool_ops = { .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, #ifdef NETIF_F_TSO - .get_tso = ethtool_op_get_tso, - .set_tso = ethtool_op_set_tso, + .get_tso = s2io_ethtool_op_get_tso, + .set_tso = s2io_ethtool_op_set_tso, #endif .get_ufo = ethtool_op_get_ufo, .set_ufo = ethtool_op_set_ufo, @@ -6337,7 +6319,7 @@ static int s2io_card_up(nic_t * sp) s2io_set_multicast(dev); if (sp->lro) { - /* Initialize max aggregatable pkts based on MTU */ + /* Initialize max aggregatable pkts per session based on MTU */ sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; /* Check if we can use(if specified) user provided value */ if (lro_max_pkts < sp->lro_max_aggr_per_sess) @@ -6438,7 +6420,7 @@ static void s2io_tx_watchdog(struct net_device *dev) * @cksum : FCS checksum of the frame. * @ring_no : the ring from which this RxD was extracted. * Description: - * This function is called by the Tx interrupt serivce routine to perform + * This function is called by the Rx interrupt serivce routine to perform * some OS related operations on the SKB before passing it to the upper * layers. It mainly checks if the checksum is OK, if so adds it to the * SKBs cksum variable, increments the Rx packet count and passes the SKB @@ -6698,33 +6680,6 @@ static void s2io_init_pci(nic_t * sp) pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); } -MODULE_AUTHOR("Raghavendra Koushik "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -module_param(tx_fifo_num, int, 0); -module_param(rx_ring_num, int, 0); -module_param(rx_ring_mode, int, 0); -module_param_array(tx_fifo_len, uint, NULL, 0); -module_param_array(rx_ring_sz, uint, NULL, 0); -module_param_array(rts_frm_len, uint, NULL, 0); -module_param(use_continuous_tx_intrs, int, 1); -module_param(rmac_pause_time, int, 0); -module_param(mc_pause_threshold_q0q3, int, 0); -module_param(mc_pause_threshold_q4q7, int, 0); -module_param(shared_splits, int, 0); -module_param(tmac_util_period, int, 0); -module_param(rmac_util_period, int, 0); -module_param(bimodal, bool, 0); -module_param(l3l4hdr_size, int , 0); -#ifndef CONFIG_S2IO_NAPI -module_param(indicate_max_pkts, int, 0); -#endif -module_param(rxsync_frequency, int, 0); -module_param(intr_type, int, 0); -module_param(lro, int, 0); -module_param(lro_max_pkts, int, 0); - static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) { if ( tx_fifo_num > 8) { @@ -6832,8 +6787,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) } if (dev_intr_type != MSI_X) { if (pci_request_regions(pdev, s2io_driver_name)) { - DBG_PRINT(ERR_DBG, "Request Regions failed\n"), - pci_disable_device(pdev); + DBG_PRINT(ERR_DBG, "Request Regions failed\n"); + pci_disable_device(pdev); return -ENODEV; } } @@ -6957,7 +6912,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* initialize the shared memory used by the NIC and the host */ if (init_shared_mem(sp)) { DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", - __FUNCTION__); + dev->name); ret = -ENOMEM; goto mem_alloc_failed; } @@ -7094,6 +7049,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) dev->addr_len = ETH_ALEN; memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + /* reset Nic and bring it to known state */ + s2io_reset(sp); + /* * Initialize the tasklet status and link state flags * and the card state parameter @@ -7131,11 +7089,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) goto register_failed; } s2io_vpd_read(sp); - DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); - DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", - get_xena_rev_id(sp->pdev), - s2io_driver_version); DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); + DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, + sp->product_name, get_xena_rev_id(sp->pdev)); + DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, + s2io_driver_version); DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, sp->def_mac_addr[0].mac_addr[0], @@ -7436,8 +7394,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, if (ip->ihl != 5) /* IP has options */ return -1; + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) + return -1; + + /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || - !tcp->ack) { + tcp->ece || tcp->cwr || !tcp->ack) { /* * Currently recognize only the ack control word and * any other control field being set would result in @@ -7591,18 +7554,16 @@ static void queue_rx_frame(struct sk_buff *skb) static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len) { - struct sk_buff *tmp, *first = lro->parent; + struct sk_buff *first = lro->parent; first->len += tcp_len; first->data_len = lro->frags_len; skb_pull(skb, (skb->len - tcp_len)); - if ((tmp = skb_shinfo(first)->frag_list)) { - while (tmp->next) - tmp = tmp->next; - tmp->next = skb; - } + if (skb_shinfo(first)->frag_list) + lro->last_frag->next = skb; else skb_shinfo(first)->frag_list = skb; + lro->last_frag = skb; sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; return; } diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 217097bc22f1..5ed49c3be1e9 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -719,6 +719,7 @@ struct msix_info_st { /* Data structure to represent a LRO session */ typedef struct lro { struct sk_buff *parent; + struct sk_buff *last_frag; u8 *l2h; struct iphdr *iph; struct tcphdr *tcph; @@ -1011,4 +1012,13 @@ static void clear_lro_session(lro_t *lro); static void queue_rx_frame(struct sk_buff *skb); static void update_L3L4_header(nic_t *sp, lro_t *lro); static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); + +#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size +#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size +#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type + +#define S2IO_PARM_INT(X, def_val) \ + static unsigned int X = def_val;\ + module_param(X , uint, 0); + #endif /* _S2IO_H */ diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index efd0f235020f..01392bca0223 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -742,7 +742,7 @@ module_param(irq, int, 0); MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); -int init_module(void) +int __init init_module(void) { dev_seeq = seeq8005_probe(-1); if (IS_ERR(dev_seeq)) diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 82200bfaa8ed..ad878dfddef4 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev, /* Chip internal frequency for clock calculations */ static inline u32 hwkhz(const struct skge_hw *hw) { - if (hw->chip_id == CHIP_ID_GENESIS) - return 53215; /* or: 53.125 MHz */ - else - return 78215; /* or: 78.125 MHz */ + return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; } /* Chip HZ to microseconds */ @@ -2214,6 +2211,7 @@ static int skge_up(struct net_device *dev) skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); skge_led(skge, LED_MODE_ON); + netif_poll_enable(dev); return 0; free_rx_ring: @@ -2282,6 +2280,7 @@ static int skge_down(struct net_device *dev) skge_led(skge, LED_MODE_OFF); + netif_poll_disable(dev); skge_tx_clean(skge); skge_rx_clean(skge); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index de91609ca112..933e87f1cc68 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -233,6 +233,8 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) if (hw->ports > 1) reg1 |= PCI_Y2_PHY2_COMA; } + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + udelay(100); if (hw->chip_id == CHIP_ID_YUKON_EC_U) { sky2_pci_write32(hw, PCI_DEV_REG3, 0); @@ -242,9 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) sky2_pci_write32(hw, PCI_DEV_REG5, 0); } - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - udelay(100); - break; case PCI_D3hot: diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index d37bd860b336..0b15290df278 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs /* Spurious interrupt check */ if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { + spin_unlock_irqrestore(&lp->lock, flags); return IRQ_NONE; } diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 3d8dcb6c8758..cf62373b808b 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev) DBG(2, "%s: %s\n", dev->name, __FUNCTION__); /* Disable all interrupts, block TX tasklet */ - spin_lock(&lp->lock); + spin_lock_irq(&lp->lock); SMC_SELECT_BANK(2); SMC_SET_INT_MASK(0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; - spin_unlock(&lp->lock); + spin_unlock_irq(&lp->lock); /* free any pending tx skb */ if (pending_skb) { @@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev) DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); /* no more interrupts for me */ - spin_lock(&lp->lock); + spin_lock_irq(&lp->lock); SMC_SELECT_BANK(2); SMC_SET_INT_MASK(0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; - spin_unlock(&lp->lock); + spin_unlock_irq(&lp->lock); if (pending_skb) dev_kfree_skb(pending_skb); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 4ec4b4d23ae5..7aa7fbac8224 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -136,14 +136,9 @@ #define SMC_CAN_USE_32BIT 0 #define SMC_IO_SHIFT 0 #define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 -#define SMC_inb(a, r) readb((a) + (r)) #define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) @@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_IO_SHIFT 0 #define SMC_NOWAIT 1 -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_inw(a, r) readw((a) + (r)) #define SMC_outw(v, a, r) writew(v, (a) + (r)) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) #include #include @@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #define SMC_IRQ_FLAGS (0) +#elif defined(CONFIG_ARCH_VERSATILE) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +#define SMC_IRQ_FLAGS (0) + #else #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 647f62e9707d..88907218457a 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev) int result; result = -ENOMEM; - if (spider_net_init_chain(card, &card->tx_chain, - card->descr, - PCI_DMA_TODEVICE, tx_descriptors)) + if (spider_net_init_chain(card, &card->tx_chain, card->descr, + PCI_DMA_TODEVICE, card->tx_desc)) goto alloc_tx_failed; if (spider_net_init_chain(card, &card->rx_chain, - card->descr + tx_descriptors, - PCI_DMA_FROMDEVICE, rx_descriptors)) + card->descr + card->rx_desc, + PCI_DMA_FROMDEVICE, card->rx_desc)) goto alloc_rx_failed; /* allocate rx skbs */ @@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card) card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; + card->tx_desc = tx_descriptors; + card->rx_desc = rx_descriptors; + spider_net_setup_netdev_ops(netdev); netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index f6dcf180ae3d..30407cdf0892 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -440,6 +440,9 @@ struct spider_net_card { /* for ethtool */ int msg_enable; + int rx_desc; + int tx_desc; + struct spider_net_descr descr[0]; }; diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index a5bb0b7633af..02209222b8c9 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c @@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data) return 0; } +static void +spider_net_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct spider_net_card *card = netdev->priv; + + ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; + ering->tx_pending = card->tx_desc; + ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; + ering->rx_pending = card->rx_desc; +} + struct ethtool_ops spider_net_ethtool_ops = { .get_settings = spider_net_ethtool_get_settings, .get_drvinfo = spider_net_ethtool_get_drvinfo, @@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = { .set_rx_csum = spider_net_ethtool_set_rx_csum, .get_tx_csum = spider_net_ethtool_get_tx_csum, .set_tx_csum = spider_net_ethtool_set_tx_csum, + .get_ringparam = spider_net_ethtool_get_ringparam, }; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index ac17377b3e9f..698568e751da 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -107,7 +107,7 @@ static char *media[MAX_UNITS]; #endif /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" KERN_INFO " http://www.scyld.com/network/sundance.html\n"; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) { if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || (idprom->id_machtype == (SM_SUN4|SM_4_470))) { - memset(&sun4_sdev, 0, sizeof(sdev)); + memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; sun4_sdev.irqs[0] = 6; return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); @@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) static int __exit sunlance_sun4_remove(void) { - struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); + struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); struct net_device *net_dev = lp->dev; unregister_netdevice(net_dev); - lance_free_hwresources(root_lance_dev); + lance_free_hwresources(lp); free_netdev(net_dev); - dev_set_drvdata(&sun4_sdev->dev, NULL); + dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 1b8138f641e3..eafabb253f08 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.63" -#define DRV_MODULE_RELDATE "July 25, 2006" +#define DRV_MODULE_VERSION "3.65" +#define DRV_MODULE_RELDATE "August 07, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -123,9 +123,6 @@ TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) -#define TX_BUFFS_AVAIL(TP) \ - ((TP)->tx_pending - \ - (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) @@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } +static inline u32 tg3_tx_avail(struct tg3 *tp) +{ + smp_mb(); + return (tp->tx_pending - + ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); +} + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) tp->tx_cons = sw_idx; - if (unlikely(netif_queue_stopped(tp->dev))) { - spin_lock(&tp->tx_lock); + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_queue_stopped(tp->dev) && + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { + netif_tx_lock(tp->dev); if (netif_queue_stopped(tp->dev) && - (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); + netif_tx_unlock(tp->dev); } } @@ -3097,11 +3109,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = dev_alloc_skb(skb_size); + skb = netdev_alloc_skb(tp->dev, skb_size); if (skb == NULL) return -ENOMEM; - skb->dev = tp->dev; skb_reserve(skb, tp->rx_offset); mapping = pci_map_single(tp->pdev, skb->data, @@ -3270,11 +3281,10 @@ static int tg3_rx(struct tg3 *tp, int budget) tg3_recycle_rx(tp, opaque_key, desc_idx, *post_ptr); - copy_skb = dev_alloc_skb(len + 2); + copy_skb = netdev_alloc_skb(tp->dev, len + 2); if (copy_skb == NULL) goto drop_it_no_recycle; - copy_skb->dev = tp->dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); @@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) struct sk_buff *segs, *nskb; /* Estimate the number of fragments in the worst case */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { netif_stop_queue(tp->dev); return NETDEV_TX_BUSY; } @@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -8618,7 +8624,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) err = -EIO; tx_len = 1514; - skb = dev_alloc_skb(tx_len); + skb = netdev_alloc_skb(tp->dev, tx_len); if (!skb) return -ENOMEM; @@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); - spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task, tp); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c98711c88..3ecf356cfb08 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2079,9 +2079,9 @@ struct tg3 { * lock: Held during reset, PHY access, timer, and when * updating tg3_flags and tg3_flags2. * - * tx_lock: Held during tg3_start_xmit and tg3_tx only - * when calling netif_[start|stop]_queue. - * tg3_start_xmit is protected by netif_tx_lock. + * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds + * netif_tx_lock when it needs to call + * netif_wake_queue. * * Both of these locks are to be held with BH safety. * @@ -2118,8 +2118,6 @@ struct tg3 { u32 tx_cons; u32 tx_pending; - spinlock_t tx_lock; - struct tg3_tx_buffer_desc *tx_ring; struct tx_ring_info *tx_buffers; dma_addr_t tx_desc_mapping; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9f491563944e..4470025ff7f8 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */ /* version and credits */ #ifndef PCMCIA -static char version[] __initdata = +static char version[] __devinitdata = "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" " v2.1.125 10/20/98 Paul Norton \n" " v2.2.0 12/30/98 Joel Sloan \n" @@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0}; static int __devinitdata turbo_searched = 0; #ifndef PCMCIA -static __u32 ibmtr_mem_base __initdata = 0xd0000; +static __u32 ibmtr_mem_base __devinitdata = 0xd0000; #endif static void __devinit PrtChanID(char *pcid, short stride) diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index cd2e0251e2bc..85a7f797d343 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param(ringspeed, int, 0); -static struct net_device *setup_card(int n) +static struct net_device * __init setup_card(int n) { struct net_device *dev = alloc_trdev(sizeof(struct net_local)); int err; @@ -5696,9 +5696,8 @@ out: free_netdev(dev); return ERR_PTR(err); } - -int init_module(void) +int __init init_module(void) { int i, found = 0; struct net_device *dev; diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 7f414815cc62..eba9083da146 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; #include /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker \n" KERN_INFO " http://www.scyld.com/network/drivers.html\n"; diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index f874e4f6ccf6..cf43390d2c80 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p static int __init xircom_init(void) { - pci_register_driver(&xircom_ops); - return 0; + return pci_register_driver(&xircom_ops); } static void __exit xircom_exit(void) diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c new file mode 100644 index 000000000000..47f49ef72bdc --- /dev/null +++ b/drivers/net/ucc_geth.c @@ -0,0 +1,4278 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish + * + * Description: + * QE UCC Gigabit Ethernet Driver + * + * Changelog: + * Jul 6, 2006 Li Yang + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ucc_geth.h" +#include "ucc_geth_phy.h" + +#undef DEBUG + +#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006" +#define DRV_NAME "ucc_geth" + +#define ugeth_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugeth_dbg(format, arg...) \ + ugeth_printk(KERN_DEBUG , format , ## arg) +#define ugeth_err(format, arg...) \ + ugeth_printk(KERN_ERR , format , ## arg) +#define ugeth_info(format, arg...) \ + ugeth_printk(KERN_INFO , format , ## arg) +#define ugeth_warn(format, arg...) \ + ugeth_printk(KERN_WARNING , format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugeth_vdbg ugeth_dbg +#else +#define ugeth_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ + +static DEFINE_SPINLOCK(ugeth_lock); + +static ucc_geth_info_t ugeth_primary_info = { + .uf_info = { + .bd_mem_part = MEM_PART_SYSTEM, + .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, + .max_rx_buf_length = 1536, +/* FIXME: should be changed in run time for 1G and 100M */ +#ifdef CONFIG_UGETH_HAS_GIGA + .urfs = UCC_GETH_URFS_GIGA_INIT, + .urfet = UCC_GETH_URFET_GIGA_INIT, + .urfset = UCC_GETH_URFSET_GIGA_INIT, + .utfs = UCC_GETH_UTFS_GIGA_INIT, + .utfet = UCC_GETH_UTFET_GIGA_INIT, + .utftt = UCC_GETH_UTFTT_GIGA_INIT, +#else + .urfs = UCC_GETH_URFS_INIT, + .urfet = UCC_GETH_URFET_INIT, + .urfset = UCC_GETH_URFSET_INIT, + .utfs = UCC_GETH_UTFS_INIT, + .utfet = UCC_GETH_UTFET_INIT, + .utftt = UCC_GETH_UTFTT_INIT, +#endif + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + .numQueuesTx = 1, + .numQueuesRx = 1, + .extendedFilteringChainPointer = ((uint32_t) NULL), + .typeorlen = 3072 /*1536 */ , + .nonBackToBackIfgPart1 = 0x40, + .nonBackToBackIfgPart2 = 0x60, + .miminumInterFrameGapEnforcement = 0x50, + .backToBackInterFrameGap = 0x60, + .mblinterval = 128, + .nortsrbytetime = 5, + .fracsiz = 1, + .strictpriorityq = 0xff, + .altBebTruncation = 0xa, + .excessDefer = 1, + .maxRetransmission = 0xf, + .collisionWindow = 0x37, + .receiveFlowControl = 1, + .maxGroupAddrInHash = 4, + .maxIndAddrInHash = 4, + .prel = 7, + .maxFrameLength = 1518, + .minFrameLength = 64, + .maxD1Length = 1520, + .maxD2Length = 1520, + .vlantype = 0x8100, + .ecamptr = ((uint32_t) NULL), + .eventRegMask = UCCE_OTHER, + .pausePeriod = 0xf000, + .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, + .bdRingLenTx = { + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN}, + + .bdRingLenRx = { + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN}, + + .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, + .largestexternallookupkeysize = + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, + .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, + .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, + .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, + .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, + .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, + .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, + .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4, + .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4, + .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, + .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, +}; + +static ucc_geth_info_t ugeth_info[8]; + +#ifdef DEBUG +static void mem_disp(u8 *addr, int size) +{ + u8 *i; + int size16Aling = (size >> 4) << 4; + int size4Aling = (size >> 2) << 2; + int notAlign = 0; + if (size % 16) + notAlign = 1; + + for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) + printk("0x%08x: %08x %08x %08x %08x\r\n", + (u32) i, + *((u32 *) (i)), + *((u32 *) (i + 4)), + *((u32 *) (i + 8)), *((u32 *) (i + 12))); + if (notAlign == 1) + printk("0x%08x: ", (u32) i); + for (; (u32) i < (u32) addr + size4Aling; i += 4) + printk("%08x ", *((u32 *) (i))); + for (; (u32) i < (u32) addr + size; i++) + printk("%02x", *((u8 *) (i))); + if (notAlign == 1) + printk("\r\n"); +} +#endif /* DEBUG */ + +#ifdef CONFIG_UGETH_FILTERING +static void enqueue(struct list_head *node, struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(ugeth_lock, flags); + list_add_tail(node, lh); + spin_unlock_irqrestore(ugeth_lock, flags); +} +#endif /* CONFIG_UGETH_FILTERING */ + +static struct list_head *dequeue(struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(ugeth_lock, flags); + if (!list_empty(lh)) { + struct list_head *node = lh->next; + list_del(node); + spin_unlock_irqrestore(ugeth_lock, flags); + return node; + } else { + spin_unlock_irqrestore(ugeth_lock, flags); + return NULL; + } +} + +static int get_interface_details(enet_interface_e enet_interface, + enet_speed_e *speed, + int *r10m, + int *rmm, + int *rpm, + int *tbi, int *limited_to_full_duplex) +{ + /* Analyze enet_interface according to Interface Mode + Configuration table */ + switch (enet_interface) { + case ENET_10_MII: + *speed = ENET_SPEED_10BT; + break; + case ENET_10_RMII: + *speed = ENET_SPEED_10BT; + *r10m = 1; + *rmm = 1; + break; + case ENET_10_RGMII: + *speed = ENET_SPEED_10BT; + *rpm = 1; + *r10m = 1; + *limited_to_full_duplex = 1; + break; + case ENET_100_MII: + *speed = ENET_SPEED_100BT; + break; + case ENET_100_RMII: + *speed = ENET_SPEED_100BT; + *rmm = 1; + break; + case ENET_100_RGMII: + *speed = ENET_SPEED_100BT; + *rpm = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_GMII: + *speed = ENET_SPEED_1000BT; + *limited_to_full_duplex = 1; + break; + case ENET_1000_RGMII: + *speed = ENET_SPEED_1000BT; + *rpm = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_TBI: + *speed = ENET_SPEED_1000BT; + *tbi = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_RTBI: + *speed = ENET_SPEED_1000BT; + *rpm = 1; + *tbi = 1; + *limited_to_full_duplex = 1; + break; + default: + return -EINVAL; + break; + } + + return 0; +} + +static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd) +{ + struct sk_buff *skb = NULL; + + skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); + + if (skb == NULL) + return NULL; + + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, + UCC_GETH_RX_DATA_BUF_ALIGNMENT - + (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - + 1))); + + skb->dev = ugeth->dev; + + BD_BUFFER_SET(bd, + dma_map_single(NULL, + skb->data, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE)); + + BD_STATUS_AND_LENGTH_SET(bd, + (R_E | R_I | + (BD_STATUS_AND_LENGTH(bd) & R_W))); + + return skb; +} + +static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ) +{ + u8 *bd; + u32 bd_status; + struct sk_buff *skb; + int i; + + bd = ugeth->p_rx_bd_ring[rxQ]; + i = 0; + + do { + bd_status = BD_STATUS_AND_LENGTH(bd); + skb = get_new_skb(ugeth, bd); + + if (!skb) /* If can not allocate data buffer, + abort. Cleanup will be elsewhere */ + return -ENOMEM; + + ugeth->rx_skbuff[rxQ][i] = skb; + + /* advance the BD pointer */ + bd += UCC_GETH_SIZE_OF_BD; + i++; + } while (!(bd_status & R_W)); + + return 0; +} + +static int fill_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + u32 thread_size, + u32 thread_alignment, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + if ((snum = qe_get_snum()) < 0) { + ugeth_err("fill_init_enet_entries: Can not get SNUM."); + return snum; + } + if ((i == 0) && skip_page_for_first_entry) + /* First entry of Rx does not have page */ + init_enet_offset = 0; + else { + init_enet_offset = + qe_muram_alloc(thread_size, thread_alignment); + if (IS_MURAM_ERR(init_enet_offset)) { + ugeth_err + ("fill_init_enet_entries: Can not allocate DPRAM memory."); + qe_put_snum((u8) snum); + return -ENOMEM; + } + } + *(p_start++) = + ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset + | risc; + } + + return 0; +} + +static int return_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + qe_muram_free(init_enet_offset); + } + *(p_start++) = 0; /* Just for cosmetics */ + } + } + + return 0; +} + +#ifdef DEBUG +static int dump_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + u32 thread_size, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + ugeth_info("Init enet entry %d:", i); + ugeth_info("Base address: 0x%08x", + (u32) + qe_muram_addr(init_enet_offset)); + mem_disp(qe_muram_addr(init_enet_offset), + thread_size); + } + p_start++; + } + } + + return 0; +} +#endif + +#ifdef CONFIG_UGETH_FILTERING +static enet_addr_container_t *get_enet_addr_container(void) +{ + enet_addr_container_t *enet_addr_cont; + + /* allocate memory */ + enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL); + if (!enet_addr_cont) { + ugeth_err("%s: No memory for enet_addr_container_t object.", + __FUNCTION__); + return NULL; + } + + return enet_addr_cont; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont) +{ + kfree(enet_addr_cont); +} + +#ifdef CONFIG_UGETH_FILTERING +static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr, u8 paddr_num) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + + if (!(paddr_num < NUM_OF_PADDRS)) { + ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); + return -EINVAL; + } + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Ethernet frames are defined in Little Endian mode, */ + /* therefore to insert the address we reverse the bytes. */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, + (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | + (u16) (*p_enet_addr)[4])); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, + (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | + (u16) (*p_enet_addr)[2])); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, + (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | + (u16) (*p_enet_addr)[0])); + + return 0; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + + if (!(paddr_num < NUM_OF_PADDRS)) { + ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); + return -EINVAL; + } + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Writing address ff.ff.ff.ff.ff.ff disables address + recognition for this register */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); + + return 0; +} + +static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + u32 cecr_subblock; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + + /* Ethernet frames are defined in Little Endian mode, + therefor to insert */ + /* the address to the hash (Big Endian mode), we reverse the bytes.*/ + out_be16(&p_82xx_addr_filt->taddr.h, + (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | + (u16) (*p_enet_addr)[4])); + out_be16(&p_82xx_addr_filt->taddr.m, + (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | + (u16) (*p_enet_addr)[2])); + out_be16(&p_82xx_addr_filt->taddr.l, + (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | + (u16) (*p_enet_addr)[0])); + + qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); +} + +#ifdef CONFIG_UGETH_MAGIC_PACKET +static void magic_packet_detection_enable(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + ucc_geth_t *ug_regs; + u32 maccfg2, uccm; + + uccf = ugeth->uccf; + ug_regs = ugeth->ug_regs; + + /* Enable interrupts for magic packet detection */ + uccm = in_be32(uccf->p_uccm); + uccm |= UCCE_MPD; + out_be32(uccf->p_uccm, uccm); + + /* Enable magic packet detection */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 |= MACCFG2_MPE; + out_be32(&ug_regs->maccfg2, maccfg2); +} + +static void magic_packet_detection_disable(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + ucc_geth_t *ug_regs; + u32 maccfg2, uccm; + + uccf = ugeth->uccf; + ug_regs = ugeth->ug_regs; + + /* Disable interrupts for magic packet detection */ + uccm = in_be32(uccf->p_uccm); + uccm &= ~UCCE_MPD; + out_be32(uccf->p_uccm, uccm); + + /* Disable magic packet detection */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_MPE; + out_be32(&ug_regs->maccfg2, maccfg2); +} +#endif /* MAGIC_PACKET */ + +static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2) +{ + return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); +} + +#ifdef DEBUG +static void get_statistics(ucc_geth_private_t *ugeth, + ucc_geth_tx_firmware_statistics_t * + tx_firmware_statistics, + ucc_geth_rx_firmware_statistics_t * + rx_firmware_statistics, + ucc_geth_hardware_statistics_t *hardware_statistics) +{ + ucc_fast_t *uf_regs; + ucc_geth_t *ug_regs; + ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; + ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; + + ug_regs = ugeth->ug_regs; + uf_regs = (ucc_fast_t *) ug_regs; + p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; + p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; + + /* Tx firmware only if user handed pointer and driver actually + gathers Tx firmware statistics */ + if (tx_firmware_statistics && p_tx_fw_statistics_pram) { + tx_firmware_statistics->sicoltx = + in_be32(&p_tx_fw_statistics_pram->sicoltx); + tx_firmware_statistics->mulcoltx = + in_be32(&p_tx_fw_statistics_pram->mulcoltx); + tx_firmware_statistics->latecoltxfr = + in_be32(&p_tx_fw_statistics_pram->latecoltxfr); + tx_firmware_statistics->frabortduecol = + in_be32(&p_tx_fw_statistics_pram->frabortduecol); + tx_firmware_statistics->frlostinmactxer = + in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); + tx_firmware_statistics->carriersenseertx = + in_be32(&p_tx_fw_statistics_pram->carriersenseertx); + tx_firmware_statistics->frtxok = + in_be32(&p_tx_fw_statistics_pram->frtxok); + tx_firmware_statistics->txfrexcessivedefer = + in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); + tx_firmware_statistics->txpkts256 = + in_be32(&p_tx_fw_statistics_pram->txpkts256); + tx_firmware_statistics->txpkts512 = + in_be32(&p_tx_fw_statistics_pram->txpkts512); + tx_firmware_statistics->txpkts1024 = + in_be32(&p_tx_fw_statistics_pram->txpkts1024); + tx_firmware_statistics->txpktsjumbo = + in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); + } + + /* Rx firmware only if user handed pointer and driver actually + * gathers Rx firmware statistics */ + if (rx_firmware_statistics && p_rx_fw_statistics_pram) { + int i; + rx_firmware_statistics->frrxfcser = + in_be32(&p_rx_fw_statistics_pram->frrxfcser); + rx_firmware_statistics->fraligner = + in_be32(&p_rx_fw_statistics_pram->fraligner); + rx_firmware_statistics->inrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); + rx_firmware_statistics->outrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); + rx_firmware_statistics->frtoolong = + in_be32(&p_rx_fw_statistics_pram->frtoolong); + rx_firmware_statistics->runt = + in_be32(&p_rx_fw_statistics_pram->runt); + rx_firmware_statistics->verylongevent = + in_be32(&p_rx_fw_statistics_pram->verylongevent); + rx_firmware_statistics->symbolerror = + in_be32(&p_rx_fw_statistics_pram->symbolerror); + rx_firmware_statistics->dropbsy = + in_be32(&p_rx_fw_statistics_pram->dropbsy); + for (i = 0; i < 0x8; i++) + rx_firmware_statistics->res0[i] = + p_rx_fw_statistics_pram->res0[i]; + rx_firmware_statistics->mismatchdrop = + in_be32(&p_rx_fw_statistics_pram->mismatchdrop); + rx_firmware_statistics->underpkts = + in_be32(&p_rx_fw_statistics_pram->underpkts); + rx_firmware_statistics->pkts256 = + in_be32(&p_rx_fw_statistics_pram->pkts256); + rx_firmware_statistics->pkts512 = + in_be32(&p_rx_fw_statistics_pram->pkts512); + rx_firmware_statistics->pkts1024 = + in_be32(&p_rx_fw_statistics_pram->pkts1024); + rx_firmware_statistics->pktsjumbo = + in_be32(&p_rx_fw_statistics_pram->pktsjumbo); + rx_firmware_statistics->frlossinmacer = + in_be32(&p_rx_fw_statistics_pram->frlossinmacer); + rx_firmware_statistics->pausefr = + in_be32(&p_rx_fw_statistics_pram->pausefr); + for (i = 0; i < 0x4; i++) + rx_firmware_statistics->res1[i] = + p_rx_fw_statistics_pram->res1[i]; + rx_firmware_statistics->removevlan = + in_be32(&p_rx_fw_statistics_pram->removevlan); + rx_firmware_statistics->replacevlan = + in_be32(&p_rx_fw_statistics_pram->replacevlan); + rx_firmware_statistics->insertvlan = + in_be32(&p_rx_fw_statistics_pram->insertvlan); + } + + /* Hardware only if user handed pointer and driver actually + gathers hardware statistics */ + if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { + hardware_statistics->tx64 = in_be32(&ug_regs->tx64); + hardware_statistics->tx127 = in_be32(&ug_regs->tx127); + hardware_statistics->tx255 = in_be32(&ug_regs->tx255); + hardware_statistics->rx64 = in_be32(&ug_regs->rx64); + hardware_statistics->rx127 = in_be32(&ug_regs->rx127); + hardware_statistics->rx255 = in_be32(&ug_regs->rx255); + hardware_statistics->txok = in_be32(&ug_regs->txok); + hardware_statistics->txcf = in_be16(&ug_regs->txcf); + hardware_statistics->tmca = in_be32(&ug_regs->tmca); + hardware_statistics->tbca = in_be32(&ug_regs->tbca); + hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); + hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); + hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); + hardware_statistics->rmca = in_be32(&ug_regs->rmca); + hardware_statistics->rbca = in_be32(&ug_regs->rbca); + } +} + +static void dump_bds(ucc_geth_private_t *ugeth) +{ + int i; + int length; + + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + if (ugeth->p_tx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenTx[i] * + UCC_GETH_SIZE_OF_BD); + ugeth_info("TX BDs[%d]", i); + mem_disp(ugeth->p_tx_bd_ring[i], length); + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenRx[i] * + UCC_GETH_SIZE_OF_BD); + ugeth_info("RX BDs[%d]", i); + mem_disp(ugeth->p_rx_bd_ring[i], length); + } + } +} + +static void dump_regs(ucc_geth_private_t *ugeth) +{ + int i; + + ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); + ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); + + ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->maccfg1, + in_be32(&ugeth->ug_regs->maccfg1)); + ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->maccfg2, + in_be32(&ugeth->ug_regs->maccfg2)); + ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ipgifg, + in_be32(&ugeth->ug_regs->ipgifg)); + ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->hafdup, + in_be32(&ugeth->ug_regs->hafdup)); + ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcfg, + in_be32(&ugeth->ug_regs->miimng.miimcfg)); + ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcom, + in_be32(&ugeth->ug_regs->miimng.miimcom)); + ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimadd, + in_be32(&ugeth->ug_regs->miimng.miimadd)); + ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcon, + in_be32(&ugeth->ug_regs->miimng.miimcon)); + ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimstat, + in_be32(&ugeth->ug_regs->miimng.miimstat)); + ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimind, + in_be32(&ugeth->ug_regs->miimng.miimind)); + ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ifctl, + in_be32(&ugeth->ug_regs->ifctl)); + ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ifstat, + in_be32(&ugeth->ug_regs->ifstat)); + ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->macstnaddr1, + in_be32(&ugeth->ug_regs->macstnaddr1)); + ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->macstnaddr2, + in_be32(&ugeth->ug_regs->macstnaddr2)); + ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->uempr, + in_be32(&ugeth->ug_regs->uempr)); + ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->utbipar, + in_be32(&ugeth->ug_regs->utbipar)); + ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->ug_regs->uescr, + in_be16(&ugeth->ug_regs->uescr)); + ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx64, + in_be32(&ugeth->ug_regs->tx64)); + ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx127, + in_be32(&ugeth->ug_regs->tx127)); + ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx255, + in_be32(&ugeth->ug_regs->tx255)); + ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx64, + in_be32(&ugeth->ug_regs->rx64)); + ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx127, + in_be32(&ugeth->ug_regs->rx127)); + ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx255, + in_be32(&ugeth->ug_regs->rx255)); + ugeth_info("txok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->txok, + in_be32(&ugeth->ug_regs->txok)); + ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->ug_regs->txcf, + in_be16(&ugeth->ug_regs->txcf)); + ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tmca, + in_be32(&ugeth->ug_regs->tmca)); + ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tbca, + in_be32(&ugeth->ug_regs->tbca)); + ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rxfok, + in_be32(&ugeth->ug_regs->rxfok)); + ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rxbok, + in_be32(&ugeth->ug_regs->rxbok)); + ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rbyt, + in_be32(&ugeth->ug_regs->rbyt)); + ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rmca, + in_be32(&ugeth->ug_regs->rmca)); + ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rbca, + in_be32(&ugeth->ug_regs->rbca)); + ugeth_info("scar : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->scar, + in_be32(&ugeth->ug_regs->scar)); + ugeth_info("scam : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->scam, + in_be32(&ugeth->ug_regs->scam)); + + if (ugeth->p_thread_data_tx) { + int numThreadsTxNumerical; + switch (ugeth->ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + numThreadsTxNumerical = 0; + break; + } + + ugeth_info("Thread data TXs:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_thread_data_tx); + for (i = 0; i < numThreadsTxNumerical; i++) { + ugeth_info("Thread data TX[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_thread_data_tx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_tx[i], + sizeof(ucc_geth_thread_data_tx_t)); + } + } + if (ugeth->p_thread_data_rx) { + int numThreadsRxNumerical; + switch (ugeth->ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + numThreadsRxNumerical = 0; + break; + } + + ugeth_info("Thread data RX:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_thread_data_rx); + for (i = 0; i < numThreadsRxNumerical; i++) { + ugeth_info("Thread data RX[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_thread_data_rx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_rx[i], + sizeof(ucc_geth_thread_data_rx_t)); + } + } + if (ugeth->p_exf_glbl_param) { + ugeth_info("EXF global param:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_exf_glbl_param); + mem_disp((u8 *) ugeth->p_exf_glbl_param, + sizeof(*ugeth->p_exf_glbl_param)); + } + if (ugeth->p_tx_glbl_pram) { + ugeth_info("TX global param:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); + ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_tx_glbl_pram->temoder, + in_be16(&ugeth->p_tx_glbl_pram->temoder)); + ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->sqptr, + in_be32(&ugeth->p_tx_glbl_pram->sqptr)); + ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, + in_be32(&ugeth->p_tx_glbl_pram-> + schedulerbasepointer)); + ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, + in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); + ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->tstate, + in_be32(&ugeth->p_tx_glbl_pram->tstate)); + ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], + ugeth->p_tx_glbl_pram->iphoffset[0]); + ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], + ugeth->p_tx_glbl_pram->iphoffset[1]); + ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], + ugeth->p_tx_glbl_pram->iphoffset[2]); + ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], + ugeth->p_tx_glbl_pram->iphoffset[3]); + ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], + ugeth->p_tx_glbl_pram->iphoffset[4]); + ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], + ugeth->p_tx_glbl_pram->iphoffset[5]); + ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], + ugeth->p_tx_glbl_pram->iphoffset[6]); + ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], + ugeth->p_tx_glbl_pram->iphoffset[7]); + ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); + ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); + ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); + ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); + ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); + ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); + ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); + ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); + ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->tqptr, + in_be32(&ugeth->p_tx_glbl_pram->tqptr)); + } + if (ugeth->p_rx_glbl_pram) { + ugeth_info("RX global param:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); + ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->remoder, + in_be32(&ugeth->p_rx_glbl_pram->remoder)); + ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rqptr, + in_be32(&ugeth->p_rx_glbl_pram->rqptr)); + ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->typeorlen, + in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); + ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_rx_glbl_pram->rxgstpack, + ugeth->p_rx_glbl_pram->rxgstpack); + ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, + in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); + ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, + in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); + ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_rx_glbl_pram->rstate, + ugeth->p_rx_glbl_pram->rstate); + ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->mrblr, + in_be16(&ugeth->p_rx_glbl_pram->mrblr)); + ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rbdqptr, + in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); + ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->mflr, + in_be16(&ugeth->p_rx_glbl_pram->mflr)); + ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->minflr, + in_be16(&ugeth->p_rx_glbl_pram->minflr)); + ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->maxd1, + in_be16(&ugeth->p_rx_glbl_pram->maxd1)); + ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->maxd2, + in_be16(&ugeth->p_rx_glbl_pram->maxd2)); + ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->ecamptr, + in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); + ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l2qt, + in_be32(&ugeth->p_rx_glbl_pram->l2qt)); + ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[0], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); + ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[1], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); + ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[2], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); + ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[3], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); + ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[4], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); + ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[5], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); + ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[6], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); + ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[7], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); + ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->vlantype, + in_be16(&ugeth->p_rx_glbl_pram->vlantype)); + ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->vlantci, + in_be16(&ugeth->p_rx_glbl_pram->vlantci)); + for (i = 0; i < 64; i++) + ugeth_info + ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", + i, + (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], + ugeth->p_rx_glbl_pram->addressfiltering[i]); + ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, + in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); + } + if (ugeth->p_send_q_mem_reg) { + ugeth_info("Send Q memory registers:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_send_q_mem_reg); + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + ugeth_info("SQQD[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); + mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], + sizeof(ucc_geth_send_queue_qd_t)); + } + } + if (ugeth->p_scheduler) { + ugeth_info("Scheduler:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); + mem_disp((u8 *) ugeth->p_scheduler, + sizeof(*ugeth->p_scheduler)); + } + if (ugeth->p_tx_fw_statistics_pram) { + ugeth_info("TX FW statistics pram:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_tx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, + sizeof(*ugeth->p_tx_fw_statistics_pram)); + } + if (ugeth->p_rx_fw_statistics_pram) { + ugeth_info("RX FW statistics pram:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_rx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, + sizeof(*ugeth->p_rx_fw_statistics_pram)); + } + if (ugeth->p_rx_irq_coalescing_tbl) { + ugeth_info("RX IRQ coalescing tables:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_rx_irq_coalescing_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + ugeth_info("RX IRQ coalescing table entry[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]); + ugeth_info + ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingmaxvalue, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingmaxvalue)); + ugeth_info + ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingcounter, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingcounter)); + } + } + if (ugeth->p_rx_bd_qs_tbl) { + ugeth_info("RX BD QS tables:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + ugeth_info("RX BD QS table[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i]); + ugeth_info + ("bdbaseptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); + ugeth_info + ("bdptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); + ugeth_info + ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i]. + externalbdbaseptr)); + ugeth_info + ("externalbdptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); + ugeth_info("ucode RX Prefetched BDs:"); + ugeth_info("Base address: 0x%08x", + (u32) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr))); + mem_disp((u8 *) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr)), + sizeof(ucc_geth_rx_prefetched_bds_t)); + } + } + if (ugeth->p_init_enet_param_shadow) { + int size; + ugeth_info("Init enet param shadow:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_init_enet_param_shadow); + mem_disp((u8 *) ugeth->p_init_enet_param_shadow, + sizeof(*ugeth->p_init_enet_param_shadow)); + + size = sizeof(ucc_geth_thread_rx_pram_t); + if (ugeth->ug_info->rxExtendedFiltering) { + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + sizeof(ucc_geth_thread_tx_pram_t), + ugeth->ug_info->riscTx, 0); + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, size, + ugeth->ug_info->riscRx, 1); + } +} +#endif /* DEBUG */ + +static void init_default_reg_vals(volatile u32 *upsmr_register, + volatile u32 *maccfg1_register, + volatile u32 *maccfg2_register) +{ + out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); + out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); + out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); +} + +static int init_half_duplex_params(int alt_beb, + int back_pressure_no_backoff, + int no_backoff, + int excess_defer, + u8 alt_beb_truncation, + u8 max_retransmissions, + u8 collision_window, + volatile u32 *hafdup_register) +{ + u32 value = 0; + + if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || + (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || + (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) + return -EINVAL; + + value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); + + if (alt_beb) + value |= HALFDUP_ALT_BEB; + if (back_pressure_no_backoff) + value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; + if (no_backoff) + value |= HALFDUP_NO_BACKOFF; + if (excess_defer) + value |= HALFDUP_EXCESSIVE_DEFER; + + value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); + + value |= collision_window; + + out_be32(hafdup_register, value); + return 0; +} + +static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, + u8 non_btb_ipg, + u8 min_ifg, + u8 btb_ipg, + volatile u32 *ipgifg_register) +{ + u32 value = 0; + + /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back + IPG part 2 */ + if (non_btb_cs_ipg > non_btb_ipg) + return -EINVAL; + + if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || + (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || + /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ + (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) + return -EINVAL; + + value |= + ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & + IPGIFG_NBTB_CS_IPG_MASK); + value |= + ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & + IPGIFG_NBTB_IPG_MASK); + value |= + ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & + IPGIFG_MIN_IFG_MASK); + value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); + + out_be32(ipgifg_register, value); + return 0; +} + +static int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, + int tx_flow_control_enable, + u16 pause_period, + u16 extension_field, + volatile u32 *upsmr_register, + volatile u32 *uempr_register, + volatile u32 *maccfg1_register) +{ + u32 value = 0; + + /* Set UEMPR register */ + value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; + value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; + out_be32(uempr_register, value); + + /* Set UPSMR register */ + value = in_be32(upsmr_register); + value |= automatic_flow_control_mode; + out_be32(upsmr_register, value); + + value = in_be32(maccfg1_register); + if (rx_flow_control_enable) + value |= MACCFG1_FLOW_RX; + if (tx_flow_control_enable) + value |= MACCFG1_FLOW_TX; + out_be32(maccfg1_register, value); + + return 0; +} + +static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, + int auto_zero_hardware_statistics, + volatile u32 *upsmr_register, + volatile u16 *uescr_register) +{ + u32 upsmr_value = 0; + u16 uescr_value = 0; + /* Enable hardware statistics gathering if requested */ + if (enable_hardware_statistics) { + upsmr_value = in_be32(upsmr_register); + upsmr_value |= UPSMR_HSE; + out_be32(upsmr_register, upsmr_value); + } + + /* Clear hardware statistics counters */ + uescr_value = in_be16(uescr_register); + uescr_value |= UESCR_CLRCNT; + /* Automatically zero hardware statistics counters on read, + if requested */ + if (auto_zero_hardware_statistics) + uescr_value |= UESCR_AUTOZ; + out_be16(uescr_register, uescr_value); + + return 0; +} + +static int init_firmware_statistics_gathering_mode(int + enable_tx_firmware_statistics, + int enable_rx_firmware_statistics, + volatile u32 *tx_rmon_base_ptr, + u32 tx_firmware_statistics_structure_address, + volatile u32 *rx_rmon_base_ptr, + u32 rx_firmware_statistics_structure_address, + volatile u16 *temoder_register, + volatile u32 *remoder_register) +{ + /* Note: this function does not check if */ + /* the parameters it receives are NULL */ + u16 temoder_value; + u32 remoder_value; + + if (enable_tx_firmware_statistics) { + out_be32(tx_rmon_base_ptr, + tx_firmware_statistics_structure_address); + temoder_value = in_be16(temoder_register); + temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; + out_be16(temoder_register, temoder_value); + } + + if (enable_rx_firmware_statistics) { + out_be32(rx_rmon_base_ptr, + rx_firmware_statistics_structure_address); + remoder_value = in_be32(remoder_register); + remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; + out_be32(remoder_register, remoder_value); + } + + return 0; +} + +static int init_mac_station_addr_regs(u8 address_byte_0, + u8 address_byte_1, + u8 address_byte_2, + u8 address_byte_3, + u8 address_byte_4, + u8 address_byte_5, + volatile u32 *macstnaddr1_register, + volatile u32 *macstnaddr2_register) +{ + u32 value = 0; + + /* Example: for a station address of 0x12345678ABCD, */ + /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ + + /* MACSTNADDR1 Register: */ + + /* 0 7 8 15 */ + /* station address byte 5 station address byte 4 */ + /* 16 23 24 31 */ + /* station address byte 3 station address byte 2 */ + value |= (u32) ((address_byte_2 << 0) & 0x000000FF); + value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); + value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_5 << 24) & 0xFF000000); + + out_be32(macstnaddr1_register, value); + + /* MACSTNADDR2 Register: */ + + /* 0 7 8 15 */ + /* station address byte 1 station address byte 0 */ + /* 16 23 24 31 */ + /* reserved reserved */ + value = 0; + value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_1 << 24) & 0xFF000000); + + out_be32(macstnaddr2_register, value); + + return 0; +} + +static int init_mac_duplex_mode(int full_duplex, + int limited_to_full_duplex, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + /* some interfaces must work in full duplex mode */ + if ((full_duplex == 0) && (limited_to_full_duplex == 1)) + return -EINVAL; + + value = in_be32(maccfg2_register); + + if (full_duplex) + value |= MACCFG2_FDX; + else + value &= ~MACCFG2_FDX; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_check_frame_length_mode(int length_check, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + value = in_be32(maccfg2_register); + + if (length_check) + value |= MACCFG2_LC; + else + value &= ~MACCFG2_LC; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_preamble_length(u8 preamble_length, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + if ((preamble_length < 3) || (preamble_length > 7)) + return -EINVAL; + + value = in_be32(maccfg2_register); + value &= ~MACCFG2_PREL_MASK; + value |= (preamble_length << MACCFG2_PREL_SHIFT); + out_be32(maccfg2_register, value); + return 0; +} + +static int init_mii_management_configuration(int reset_mgmt, + int preamble_supress, + volatile u32 *miimcfg_register, + volatile u32 *miimind_register) +{ + unsigned int timeout = PHY_INIT_TIMEOUT; + u32 value = 0; + + value = in_be32(miimcfg_register); + if (reset_mgmt) { + value |= MIIMCFG_RESET_MANAGEMENT; + out_be32(miimcfg_register, value); + } + + value = 0; + + if (preamble_supress) + value |= MIIMCFG_NO_PREAMBLE; + + value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT; + out_be32(miimcfg_register, value); + + /* Wait until the bus is free */ + while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--) + cpu_relax(); + + if (timeout <= 0) { + ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__); + return -ETIMEDOUT; + } + + return 0; +} + +static int init_rx_parameters(int reject_broadcast, + int receive_short_frames, + int promiscuous, volatile u32 *upsmr_register) +{ + u32 value = 0; + + value = in_be32(upsmr_register); + + if (reject_broadcast) + value |= UPSMR_BRO; + else + value &= ~UPSMR_BRO; + + if (receive_short_frames) + value |= UPSMR_RSH; + else + value &= ~UPSMR_RSH; + + if (promiscuous) + value |= UPSMR_PRO; + else + value &= ~UPSMR_PRO; + + out_be32(upsmr_register, value); + + return 0; +} + +static int init_max_rx_buff_len(u16 max_rx_buf_len, + volatile u16 *mrblr_register) +{ + /* max_rx_buf_len value must be a multiple of 128 */ + if ((max_rx_buf_len == 0) + || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) + return -EINVAL; + + out_be16(mrblr_register, max_rx_buf_len); + return 0; +} + +static int init_min_frame_len(u16 min_frame_length, + volatile u16 *minflr_register, + volatile u16 *mrblr_register) +{ + u16 mrblr_value = 0; + + mrblr_value = in_be16(mrblr_register); + if (min_frame_length >= (mrblr_value - 4)) + return -EINVAL; + + out_be16(minflr_register, min_frame_length); + return 0; +} + +static int adjust_enet_interface(ucc_geth_private_t *ugeth) +{ + ucc_geth_info_t *ug_info; + ucc_geth_t *ug_regs; + ucc_fast_t *uf_regs; + enet_speed_e speed; + int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm = + 0, limited_to_full_duplex = 0; + u32 upsmr, maccfg2, utbipar, tbiBaseAddress; + u16 value; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_info = ugeth->ug_info; + ug_regs = ugeth->ug_regs; + uf_regs = ugeth->uccf->uf_regs; + + /* Analyze enet_interface according to Interface Mode Configuration + table */ + ret_val = + get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm, + &rpm, &tbi, &limited_to_full_duplex); + if (ret_val != 0) { + ugeth_err + ("%s: half duplex not supported in requested configuration.", + __FUNCTION__); + return ret_val; + } + + /* Set MACCFG2 */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT)) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (speed == ENET_SPEED_1000BT) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + maccfg2 |= ug_info->padAndCrc; + out_be32(&ug_regs->maccfg2, maccfg2); + + /* Set UPSMR */ + upsmr = in_be32(&uf_regs->upsmr); + upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); + if (rpm) + upsmr |= UPSMR_RPM; + if (r10m) + upsmr |= UPSMR_R10M; + if (tbi) + upsmr |= UPSMR_TBIM; + if (rmm) + upsmr |= UPSMR_RMM; + out_be32(&uf_regs->upsmr, upsmr); + + /* Set UTBIPAR */ + utbipar = in_be32(&ug_regs->utbipar); + utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; + if (tbi) + utbipar |= + (ug_info->phy_address + + ugeth->ug_info->uf_info. + ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; + else + utbipar |= + (0x10 + + ugeth->ug_info->uf_info. + ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; + out_be32(&ug_regs->utbipar, utbipar); + + /* Disable autonegotiation in tbi mode, because by default it + comes up in autonegotiation mode. */ + /* Note that this depends on proper setting in utbipar register. */ + if (tbi) { + tbiBaseAddress = in_be32(&ug_regs->utbipar); + tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; + tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; + value = + ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress, + ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress, + ENET_TBI_MII_CR, value); + } + + ret_val = init_mac_duplex_mode(1, + limited_to_full_duplex, + &ug_regs->maccfg2); + if (ret_val != 0) { + ugeth_err + ("%s: half duplex not supported in requested configuration.", + __FUNCTION__); + return ret_val; + } + + init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); + + ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); + if (ret_val != 0) { + ugeth_err + ("%s: Preamble length must be between 3 and 7 inclusive.", + __FUNCTION__); + return ret_val; + } + + return 0; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the ugeth structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_geth_t *ug_regs; + u32 tempval; + struct ugeth_mii_info *mii_info = ugeth->mii_info; + + ug_regs = ugeth->ug_regs; + + if (mii_info->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (mii_info->duplex != ugeth->oldduplex) { + if (!(mii_info->duplex)) { + tempval = in_be32(&ug_regs->maccfg2); + tempval &= ~(MACCFG2_FDX); + out_be32(&ug_regs->maccfg2, tempval); + + ugeth_info("%s: Half Duplex", dev->name); + } else { + tempval = in_be32(&ug_regs->maccfg2); + tempval |= MACCFG2_FDX; + out_be32(&ug_regs->maccfg2, tempval); + + ugeth_info("%s: Full Duplex", dev->name); + } + + ugeth->oldduplex = mii_info->duplex; + } + + if (mii_info->speed != ugeth->oldspeed) { + switch (mii_info->speed) { + case 1000: +#ifdef CONFIG_MPC836x +/* FIXME: This code is for 100Mbs BUG fixing, +remove this when it is fixed!!! */ + if (ugeth->ug_info->enet_interface == + ENET_1000_GMII) + /* Run the commands which initialize the PHY */ + { + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, 0x1b); + tempval |= 0x000f; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | BMCR_RESET)); + } else if (ugeth->ug_info->enet_interface == + ENET_1000_RGMII) + /* Run the commands which initialize the PHY */ + { + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, 0x1b); + tempval = (tempval & ~0x000f) | 0x000b; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | BMCR_RESET)); + } + msleep(4000); +#endif /* CONFIG_MPC8360 */ + adjust_enet_interface(ugeth); + break; + case 100: + case 10: +#ifdef CONFIG_MPC836x +/* FIXME: This code is for 100Mbs BUG fixing, +remove this lines when it will be fixed!!! */ + ugeth->ug_info->enet_interface = ENET_100_RGMII; + tempval = + (u32) mii_info->mdio_read(ugeth->dev, + mii_info->mii_id, + 0x1b); + tempval = (tempval & ~0x000f) | 0x000b; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth->dev, + mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | + BMCR_RESET)); + msleep(4000); +#endif /* CONFIG_MPC8360 */ + adjust_enet_interface(ugeth); + break; + default: + ugeth_warn + ("%s: Ack! Speed (%d) is not 10/100/1000!", + dev->name, mii_info->speed); + break; + } + + ugeth_info("%s: Speed %dBT", dev->name, + mii_info->speed); + + ugeth->oldspeed = mii_info->speed; + } + + if (!ugeth->oldlink) { + ugeth_info("%s: Link is up", dev->name); + ugeth->oldlink = 1; + netif_carrier_on(dev); + netif_schedule(dev); + } + } else { + if (ugeth->oldlink) { + ugeth_info("%s: Link is down", dev->name); + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + netif_carrier_off(dev); + } + } +} + +/* Configure the PHY for dev. + * returns 0 if success. -1 if failure + */ +static int init_phy(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + struct phy_info *curphy; + ucc_mii_mng_t *mii_regs; + struct ugeth_mii_info *mii_info; + int err; + + mii_regs = &ugeth->ug_regs->miimng; + + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + + mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL); + + if (NULL == mii_info) { + ugeth_err("%s: Could not allocate mii_info", dev->name); + return -ENOMEM; + } + + mii_info->mii_regs = mii_regs; + mii_info->speed = SPEED_1000; + mii_info->duplex = DUPLEX_FULL; + mii_info->pause = 0; + mii_info->link = 0; + + mii_info->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + mii_info->autoneg = 1; + + mii_info->mii_id = ugeth->ug_info->phy_address; + + mii_info->dev = dev; + + mii_info->mdio_read = &read_phy_reg; + mii_info->mdio_write = &write_phy_reg; + + ugeth->mii_info = mii_info; + + spin_lock_irq(&ugeth->lock); + + /* Set this UCC to be the master of the MII managment */ + ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); + + if (init_mii_management_configuration(1, + ugeth->ug_info-> + miiPreambleSupress, + &mii_regs->miimcfg, + &mii_regs->miimind)) { + ugeth_err("%s: The MII Bus is stuck!", dev->name); + err = -1; + goto bus_fail; + } + + spin_unlock_irq(&ugeth->lock); + + /* get info for this PHY */ + curphy = get_phy_info(ugeth->mii_info); + + if (curphy == NULL) { + ugeth_err("%s: No PHY found", dev->name); + err = -1; + goto no_phy; + } + + mii_info->phyinfo = curphy; + + /* Run the commands which initialize the PHY */ + if (curphy->init) { + err = curphy->init(ugeth->mii_info); + if (err) + goto phy_init_fail; + } + + return 0; + + phy_init_fail: + no_phy: + bus_fail: + kfree(mii_info); + + return err; +} + +#ifdef CONFIG_UGETH_TX_ON_DEMOND +static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth) +{ + ucc_fast_transmit_on_demand(ugeth->uccf); + + return 0; +} +#endif + +static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + u32 temp; + + uccf = ugeth->uccf; + + /* Mask GRACEFUL STOP TX interrupt bit and clear it */ + temp = in_be32(uccf->p_uccm); + temp &= ~UCCE_GRA; + out_be32(uccf->p_uccm, temp); + out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ + + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); + + /* Wait for command to complete */ + do { + temp = in_be32(uccf->p_ucce); + } while (!(temp & UCCE_GRA)); + + uccf->stopped_tx = 1; + + return 0; +} + +static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + u8 temp; + + uccf = ugeth->uccf; + + /* Clear acknowledge bit */ + temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; + ugeth->p_rx_glbl_pram->rxgstpack = temp; + + /* Keep issuing command and checking acknowledge bit until + it is asserted, according to spec */ + do { + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. + ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); + + temp = ugeth->p_rx_glbl_pram->rxgstpack; + } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); + + uccf->stopped_rx = 1; + + return 0; +} + +static int ugeth_restart_tx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_tx = 0; + + return 0; +} + +static int ugeth_restart_rx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_rx = 0; + + return 0; +} + +static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode) +{ + ucc_fast_private_t *uccf; + int enabled_tx, enabled_rx; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + return -EINVAL; + } + + enabled_tx = uccf->enabled_tx; + enabled_rx = uccf->enabled_rx; + + /* Get Tx and Rx going again, in case this channel was actively + disabled. */ + if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) + ugeth_restart_tx(ugeth); + if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) + ugeth_restart_rx(ugeth); + + ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ + + return 0; + +} + +static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode) +{ + ucc_fast_private_t *uccf; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + return -EINVAL; + } + + /* Stop any transmissions */ + if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) + ugeth_graceful_stop_tx(ugeth); + + /* Stop any receptions */ + if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) + ugeth_graceful_stop_rx(ugeth); + + ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ + + return 0; +} + +static void ugeth_dump_regs(ucc_geth_private_t *ugeth) +{ +#ifdef DEBUG + ucc_fast_dump_regs(ugeth->uccf); + dump_regs(ugeth); + dump_bds(ugeth); +#endif +} + +#ifdef CONFIG_UGETH_FILTERING +static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t * + p_UccGethTadParams, + qe_fltr_tad_t *qe_fltr_tad) +{ + u16 temp; + + /* Zero serialized TAD */ + memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE); + + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */ + if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode || + (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) + || (p_UccGethTadParams->vnontag_op != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP) + ) + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF; + if (p_UccGethTadParams->reject_frame) + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ; + temp = + (u16) (((u16) p_UccGethTadParams-> + vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT); + qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */ + + qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */ + if (p_UccGethTadParams->vnontag_op == + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT) + qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP; + qe_fltr_tad->serialized[1] |= + p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT; + + qe_fltr_tad->serialized[2] |= + p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT; + /* upper bits */ + qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8); + /* lower bits */ + qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff); + + return 0; +} + +static enet_addr_container_t + *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + enet_addr_container_t *enet_addr_cont; + struct list_head *p_lh; + u16 i, num; + int32_t j; + u8 *p_counter; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } + + if (!p_lh) + return NULL; + + num = *p_counter; + + for (i = 0; i < num; i++) { + enet_addr_cont = + (enet_addr_container_t *) + ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); + for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { + if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) + break; + if (j == 0) + return enet_addr_cont; /* Found */ + } + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + } + return NULL; +} + +static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_enet_address_recognition_location_e location; + enet_addr_container_t *enet_addr_cont; + struct list_head *p_lh; + u8 i; + u32 limit; + u8 *p_counter; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + p_lh = &ugeth->group_hash_q; + limit = ugeth->ug_info->maxGroupAddrInHash; + location = + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + p_lh = &ugeth->ind_hash_q; + limit = ugeth->ug_info->maxIndAddrInHash; + location = + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH; + p_counter = &(ugeth->numIndAddrInHash); + } + + if ((enet_addr_cont = + ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) { + list_add(p_lh, &enet_addr_cont->node); /* Put it back */ + return 0; + } + if ((!p_lh) || (!(*p_counter < limit))) + return -EBUSY; + if (!(enet_addr_cont = get_enet_addr_container())) + return -ENOMEM; + for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) + (enet_addr_cont->address)[i] = (*p_enet_addr)[i]; + enet_addr_cont->location = location; + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + ++(*p_counter); + + hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); + + return 0; +} + +static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + enet_addr_container_t *enet_addr_cont; + ucc_fast_private_t *uccf; + comm_dir_e comm_dir; + u16 i, num; + struct list_head *p_lh; + u32 *addr_h, *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + if (! + (enet_addr_cont = + ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) + return -ENOENT; + + /* It's been found and removed from the CQ. */ + /* Now destroy its container */ + put_enet_addr_container(enet_addr_cont); + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + /* Add all remaining CQ elements back into hash */ + num = --(*p_counter); + for (i = 0; i < num; i++) { + enet_addr_cont = + (enet_addr_container_t *) + ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); + hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + } + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t * + ugeth, + enet_addr_type_e + enet_addr_type) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + ucc_fast_private_t *uccf; + comm_dir_e comm_dir; + struct list_head *p_lh; + u16 i, num; + u32 *addr_h, *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } else + return -EINVAL; + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + if (!p_lh) + return 0; + + num = *p_counter; + + /* Delete all remaining CQ elements */ + for (i = 0; i < num; i++) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); + + *p_counter = 0; + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} + +#ifdef CONFIG_UGETH_FILTERING +static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr, + u8 paddr_num) +{ + int i; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) + ugeth_warn + ("%s: multicast address added to paddr will have no " + "effect - is this what you wanted?", + __FUNCTION__); + + ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ + /* store address in our database */ + for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) + ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i]; + /* put in hardware */ + return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num); +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth, + u8 paddr_num) +{ + ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ + return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ +} + +static void ucc_geth_memclean(ucc_geth_private_t *ugeth) +{ + u16 i, j; + u8 *bd; + + if (!ugeth) + return; + + if (ugeth->uccf) + ucc_fast_free(ugeth->uccf); + + if (ugeth->p_thread_data_tx) { + qe_muram_free(ugeth->thread_dat_tx_offset); + ugeth->p_thread_data_tx = NULL; + } + if (ugeth->p_thread_data_rx) { + qe_muram_free(ugeth->thread_dat_rx_offset); + ugeth->p_thread_data_rx = NULL; + } + if (ugeth->p_exf_glbl_param) { + qe_muram_free(ugeth->exf_glbl_param_offset); + ugeth->p_exf_glbl_param = NULL; + } + if (ugeth->p_rx_glbl_pram) { + qe_muram_free(ugeth->rx_glbl_pram_offset); + ugeth->p_rx_glbl_pram = NULL; + } + if (ugeth->p_tx_glbl_pram) { + qe_muram_free(ugeth->tx_glbl_pram_offset); + ugeth->p_tx_glbl_pram = NULL; + } + if (ugeth->p_send_q_mem_reg) { + qe_muram_free(ugeth->send_q_mem_reg_offset); + ugeth->p_send_q_mem_reg = NULL; + } + if (ugeth->p_scheduler) { + qe_muram_free(ugeth->scheduler_offset); + ugeth->p_scheduler = NULL; + } + if (ugeth->p_tx_fw_statistics_pram) { + qe_muram_free(ugeth->tx_fw_statistics_pram_offset); + ugeth->p_tx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_fw_statistics_pram) { + qe_muram_free(ugeth->rx_fw_statistics_pram_offset); + ugeth->p_rx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_irq_coalescing_tbl) { + qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); + ugeth->p_rx_irq_coalescing_tbl = NULL; + } + if (ugeth->p_rx_bd_qs_tbl) { + qe_muram_free(ugeth->rx_bd_qs_tbl_offset); + ugeth->p_rx_bd_qs_tbl = NULL; + } + if (ugeth->p_init_enet_param_shadow) { + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, + ugeth->ug_info->riscRx, 1); + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + ugeth->ug_info->riscTx, 0); + kfree(ugeth->p_init_enet_param_shadow); + ugeth->p_init_enet_param_shadow = NULL; + } + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + bd = ugeth->p_tx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { + if (ugeth->tx_skbuff[i][j]) { + dma_unmap_single(NULL, + BD_BUFFER_ARG(bd), + (BD_STATUS_AND_LENGTH(bd) & + BD_LENGTH_MASK), + DMA_TO_DEVICE); + dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); + ugeth->tx_skbuff[i][j] = NULL; + } + } + + kfree(ugeth->tx_skbuff[i]); + + if (ugeth->p_tx_bd_ring[i]) { + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->tx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->tx_bd_ring_offset[i]); + ugeth->p_tx_bd_ring[i] = NULL; + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + /* Return existing data buffers in ring */ + bd = ugeth->p_rx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { + if (ugeth->rx_skbuff[i][j]) { + dma_unmap_single(NULL, BD_BUFFER(bd), + ugeth->ug_info-> + uf_info. + max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(ugeth-> + rx_skbuff[i][j]); + ugeth->rx_skbuff[i][j] = NULL; + } + bd += UCC_GETH_SIZE_OF_BD; + } + + kfree(ugeth->rx_skbuff[i]); + + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->rx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->rx_bd_ring_offset[i]); + ugeth->p_rx_bd_ring[i] = NULL; + } + } + while (!list_empty(&ugeth->group_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->group_hash_q))); + while (!list_empty(&ugeth->ind_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->ind_hash_q))); + +} + +static void ucc_geth_set_multi(struct net_device *dev) +{ + ucc_geth_private_t *ugeth; + struct dev_mc_list *dmi; + ucc_fast_t *uf_regs; + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + enet_addr_t tempaddr; + u8 *mcptr, *tdptr; + int i, j; + + ugeth = netdev_priv(dev); + + uf_regs = ugeth->uccf->uf_regs; + + if (dev->flags & IFF_PROMISC) { + + /* Log any net taps. */ + printk("%s: Promiscuous mode enabled.\n", dev->name); + uf_regs->upsmr |= UPSMR_PRO; + + } else { + + uf_regs->upsmr &= ~UPSMR_PRO; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); + out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); + } else { + /* Clear filter and add the addresses in the list. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); + out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); + + dmi = dev->mc_list; + + for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { + + /* Only support group multicast for now. + */ + if (!(dmi->dmi_addr[0] & 1)) + continue; + + /* The address in dmi_addr is LSB first, + * and taddr is MSB first. We have to + * copy bytes MSB first from dmi_addr. + */ + mcptr = (u8 *) dmi->dmi_addr + 5; + tdptr = (u8 *) & tempaddr; + for (j = 0; j < 6; j++) + *tdptr++ = *mcptr--; + + /* Ask CPM to run CRC and set bit in + * filter mask. + */ + hw_add_addr_in_hash(ugeth, &tempaddr); + + } + } + } +} + +static void ucc_geth_stop(ucc_geth_private_t *ugeth) +{ + ucc_geth_t *ug_regs = ugeth->ug_regs; + u32 tempval; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Disable the controller */ + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + /* Tell the kernel the link is down */ + ugeth->mii_info->link = 0; + adjust_link(ugeth->dev); + + /* Mask all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0x00000000); + + /* Clear all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0xffffffff); + + /* Disable Rx and Tx */ + tempval = in_be32(&ug_regs->maccfg1); + tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); + out_be32(&ug_regs->maccfg1, tempval); + + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + /* Clear any pending interrupts */ + mii_clear_phy_interrupt(ugeth->mii_info); + + /* Disable PHY Interrupts */ + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_DISABLED); + } + + free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); + + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev); + } else { + del_timer_sync(&ugeth->phy_info_timer); + } + + ucc_geth_memclean(ugeth); +} + +static int ucc_geth_startup(ucc_geth_private_t *ugeth) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + ucc_geth_init_pram_t *p_init_enet_pram; + ucc_fast_private_t *uccf; + ucc_geth_info_t *ug_info; + ucc_fast_info_t *uf_info; + ucc_fast_t *uf_regs; + ucc_geth_t *ug_regs; + int ret_val = -EINVAL; + u32 remoder = UCC_GETH_REMODER_INIT; + u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; + u32 ifstat, i, j, size, l2qt, l3qt, length; + u16 temoder = UCC_GETH_TEMODER_INIT; + u16 test; + u8 function_code = 0; + u8 *bd, *endOfRing; + u8 numThreadsRxNumerical, numThreadsTxNumerical; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || + (uf_info->bd_mem_part == MEM_PART_MURAM))) { + ugeth_err("%s: Bad memory partition value.", __FUNCTION__); + return -EINVAL; + } + + /* Rx BD lengths */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || + (ug_info->bdRingLenRx[i] % + UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { + ugeth_err + ("%s: Rx BD ring length must be multiple of 4," + " no smaller than 8.", __FUNCTION__); + return -EINVAL; + } + } + + /* Tx BD lengths */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { + ugeth_err + ("%s: Tx BD ring length must be no smaller than 2.", + __FUNCTION__); + return -EINVAL; + } + } + + /* mrblr */ + if ((uf_info->max_rx_buf_length == 0) || + (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { + ugeth_err + ("%s: max_rx_buf_length must be non-zero multiple of 128.", + __FUNCTION__); + return -EINVAL; + } + + /* num Tx queues */ + if (ug_info->numQueuesTx > NUM_TX_QUEUES) { + ugeth_err("%s: number of tx queues too large.", __FUNCTION__); + return -EINVAL; + } + + /* num Rx queues */ + if (ug_info->numQueuesRx > NUM_RX_QUEUES) { + ugeth_err("%s: number of rx queues too large.", __FUNCTION__); + return -EINVAL; + } + + /* l2qt */ + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { + if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { + ugeth_err + ("%s: VLAN priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); + return -EINVAL; + } + } + + /* l3qt */ + for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { + if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { + ugeth_err + ("%s: IP priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); + return -EINVAL; + } + } + + if (ug_info->cam && !ug_info->ecamptr) { + ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", + __FUNCTION__); + return -EINVAL; + } + + if ((ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1) + && ug_info->rxExtendedFiltering) { + ugeth_err("%s: Number of station addresses greater than 1 " + "not allowed in extended parsing mode.", + __FUNCTION__); + return -EINVAL; + } + + /* Generate uccm_mask for receive */ + uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ + for (i = 0; i < ug_info->numQueuesRx; i++) + uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); + + for (i = 0; i < ug_info->numQueuesTx; i++) + uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); + /* Initialize the general fast UCC block. */ + if (ucc_fast_init(uf_info, &uccf)) { + ugeth_err("%s: Failed to init uccf.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->uccf = uccf; + + switch (ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + break; + } + + switch (ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + break; + } + + /* Calculate rx_extended_features */ + ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || + ug_info->ipAddressAlignment || + (ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1); + + ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || + (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) + || (ug_info->vlanOperationNonTagged != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); + + uf_regs = uccf->uf_regs; + ug_regs = (ucc_geth_t *) (uccf->uf_regs); + ugeth->ug_regs = ug_regs; + + init_default_reg_vals(&uf_regs->upsmr, + &ug_regs->maccfg1, &ug_regs->maccfg2); + + /* Set UPSMR */ + /* For more details see the hardware spec. */ + init_rx_parameters(ug_info->bro, + ug_info->rsh, ug_info->pro, &uf_regs->upsmr); + + /* We're going to ignore other registers for now, */ + /* except as needed to get up and running */ + + /* Set MACCFG1 */ + /* For more details see the hardware spec. */ + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + 1, + ug_info->pausePeriod, + ug_info->extensionField, + &uf_regs->upsmr, + &ug_regs->uempr, &ug_regs->maccfg1); + + maccfg1 = in_be32(&ug_regs->maccfg1); + maccfg1 |= MACCFG1_ENABLE_RX; + maccfg1 |= MACCFG1_ENABLE_TX; + out_be32(&ug_regs->maccfg1, maccfg1); + + /* Set IPGIFG */ + /* For more details see the hardware spec. */ + ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, + ug_info->nonBackToBackIfgPart2, + ug_info-> + miminumInterFrameGapEnforcement, + ug_info->backToBackInterFrameGap, + &ug_regs->ipgifg); + if (ret_val != 0) { + ugeth_err("%s: IPGIFG initialization parameter too large.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Set HAFDUP */ + /* For more details see the hardware spec. */ + ret_val = init_half_duplex_params(ug_info->altBeb, + ug_info->backPressureNoBackoff, + ug_info->noBackoff, + ug_info->excessDefer, + ug_info->altBebTruncation, + ug_info->maxRetransmission, + ug_info->collisionWindow, + &ug_regs->hafdup); + if (ret_val != 0) { + ugeth_err("%s: Half Duplex initialization parameter too large.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Set IFSTAT */ + /* For more details see the hardware spec. */ + /* Read only - resets upon read */ + ifstat = in_be32(&ug_regs->ifstat); + + /* Clear UEMPR */ + /* For more details see the hardware spec. */ + out_be32(&ug_regs->uempr, 0); + + /* Set UESCR */ + /* For more details see the hardware spec. */ + init_hw_statistics_gathering_mode((ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), + 0, &uf_regs->upsmr, &ug_regs->uescr); + + /* Allocate Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Allocate in multiple of + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, + according to spec */ + length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) + / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) % + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_TX_BD_RING_ALIGNMENT; + ugeth->tx_bd_ring_offset[j] = + (u32) (kmalloc((u32) (length + align), + GFP_KERNEL)); + if (ugeth->tx_bd_ring_offset[j] != 0) + ugeth->p_tx_bd_ring[j] = + (void*)((ugeth->tx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->tx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_TX_BD_RING_ALIGNMENT); + if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j])) + ugeth->p_tx_bd_ring[j] = + (u8 *) qe_muram_addr(ugeth-> + tx_bd_ring_offset[j]); + } + if (!ugeth->p_tx_bd_ring[j]) { + ugeth_err + ("%s: Can not allocate memory for Tx bd rings.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + /* Zero unused end of bd ring, according to spec */ + memset(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0, + length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD); + } + + /* Allocate Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_RX_BD_RING_ALIGNMENT; + ugeth->rx_bd_ring_offset[j] = + (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); + if (ugeth->rx_bd_ring_offset[j] != 0) + ugeth->p_rx_bd_ring[j] = + (void*)((ugeth->rx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->rx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_RX_BD_RING_ALIGNMENT); + if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j])) + ugeth->p_rx_bd_ring[j] = + (u8 *) qe_muram_addr(ugeth-> + rx_bd_ring_offset[j]); + } + if (!ugeth->p_rx_bd_ring[j]) { + ugeth_err + ("%s: Can not allocate memory for Rx bd rings.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + } + + /* Init Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Setup the skbuff rings */ + ugeth->tx_skbuff[j] = + (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenTx[j], + GFP_KERNEL); + + if (ugeth->tx_skbuff[j] == NULL) { + ugeth_err("%s: Could not allocate tx_skbuff", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) + ugeth->tx_skbuff[j][i] = NULL; + + ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; + bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { + BD_BUFFER_CLEAR(bd); + BD_STATUS_AND_LENGTH_SET(bd, 0); + bd += UCC_GETH_SIZE_OF_BD; + } + bd -= UCC_GETH_SIZE_OF_BD; + BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */ + } + + /* Init Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + /* Setup the skbuff rings */ + ugeth->rx_skbuff[j] = + (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenRx[j], + GFP_KERNEL); + + if (ugeth->rx_skbuff[j] == NULL) { + ugeth_err("%s: Could not allocate rx_skbuff", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) + ugeth->rx_skbuff[j][i] = NULL; + + ugeth->skb_currx[j] = 0; + bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { + BD_STATUS_AND_LENGTH_SET(bd, R_I); + BD_BUFFER_CLEAR(bd); + bd += UCC_GETH_SIZE_OF_BD; + } + bd -= UCC_GETH_SIZE_OF_BD; + BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */ + } + + /* + * Global PRAM + */ + /* Tx global PRAM */ + /* Allocate global tx parameter RAM page */ + ugeth->tx_glbl_pram_offset = + qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t), + UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_tx_glbl_pram = + (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth-> + tx_glbl_pram_offset); + /* Zero out p_tx_glbl_pram */ + memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t)); + + /* Fill global PRAM */ + + /* TQPTR */ + /* Size varies with number of Tx threads */ + ugeth->thread_dat_tx_offset = + qe_muram_alloc(numThreadsTxNumerical * + sizeof(ucc_geth_thread_data_tx_t) + + 32 * (numThreadsTxNumerical == 1), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_thread_data_tx = + (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth-> + thread_dat_tx_offset); + out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); + + /* vtagtable */ + for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) + out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], + ug_info->vtagtable[i]); + + /* iphoffset */ + for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) + ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; + + /* SQPTR */ + /* Size varies with number of Tx queues */ + ugeth->send_q_mem_reg_offset = + qe_muram_alloc(ug_info->numQueuesTx * + sizeof(ucc_geth_send_queue_qd_t), + UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_send_q_mem_reg = + (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth-> + send_q_mem_reg_offset); + out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + endOfRing = + ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - + 1) * UCC_GETH_SIZE_OF_BD; + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) virt_to_phys(endOfRing)); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) immrbar_virt_to_phys(ugeth-> + p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) immrbar_virt_to_phys(endOfRing)); + } + } + + /* schedulerbasepointer */ + + if (ug_info->numQueuesTx > 1) { + /* scheduler exists only if more than 1 tx queue */ + ugeth->scheduler_offset = + qe_muram_alloc(sizeof(ucc_geth_scheduler_t), + UCC_GETH_SCHEDULER_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->scheduler_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_scheduler.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_scheduler = + (ucc_geth_scheduler_t *) qe_muram_addr(ugeth-> + scheduler_offset); + out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, + ugeth->scheduler_offset); + /* Zero out p_scheduler */ + memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t)); + + /* Set values in scheduler */ + out_be32(&ugeth->p_scheduler->mblinterval, + ug_info->mblinterval); + out_be16(&ugeth->p_scheduler->nortsrbytetime, + ug_info->nortsrbytetime); + ugeth->p_scheduler->fracsiz = ug_info->fracsiz; + ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; + ugeth->p_scheduler->txasap = ug_info->txasap; + ugeth->p_scheduler->extrabw = ug_info->extrabw; + for (i = 0; i < NUM_TX_QUEUES; i++) + ugeth->p_scheduler->weightfactor[i] = + ug_info->weightfactor[i]; + + /* Set pointers to cpucount registers in scheduler */ + ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); + ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); + ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); + ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); + ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); + ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); + ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); + ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); + } + + /* schedulerbasepointer */ + /* TxRMON_PTR (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + ugeth->tx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (ucc_geth_tx_firmware_statistics_pram_t), + UCC_GETH_TX_STATISTICS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_tx_fw_statistics_pram.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_tx_fw_statistics_pram = + (ucc_geth_tx_firmware_statistics_pram_t *) + qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); + /* Zero out p_tx_fw_statistics_pram */ + memset(ugeth->p_tx_fw_statistics_pram, + 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t)); + } + + /* temoder */ + /* Already has speed set */ + + if (ug_info->numQueuesTx > 1) + temoder |= TEMODER_SCHEDULER_ENABLE; + if (ug_info->ipCheckSumGenerate) + temoder |= TEMODER_IP_CHECKSUM_GENERATE; + temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); + out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); + + test = in_be16(&ugeth->p_tx_glbl_pram->temoder); + + /* Function code register value to be used later */ + function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL; + /* Required for QE */ + + /* function code register */ + out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); + + /* Rx global PRAM */ + /* Allocate global rx parameter RAM page */ + ugeth->rx_glbl_pram_offset = + qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t), + UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_rx_glbl_pram = + (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth-> + rx_glbl_pram_offset); + /* Zero out p_rx_glbl_pram */ + memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t)); + + /* Fill global PRAM */ + + /* RQPTR */ + /* Size varies with number of Rx threads */ + ugeth->thread_dat_rx_offset = + qe_muram_alloc(numThreadsRxNumerical * + sizeof(ucc_geth_thread_data_rx_t), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_thread_data_rx = + (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth-> + thread_dat_rx_offset); + out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); + + /* typeorlen */ + out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); + + /* rxrmonbaseptr (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { + ugeth->rx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (ucc_geth_rx_firmware_statistics_pram_t), + UCC_GETH_RX_STATISTICS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_fw_statistics_pram.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_rx_fw_statistics_pram = + (ucc_geth_rx_firmware_statistics_pram_t *) + qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); + /* Zero out p_rx_fw_statistics_pram */ + memset(ugeth->p_rx_fw_statistics_pram, 0, + sizeof(ucc_geth_rx_firmware_statistics_pram_t)); + } + + /* intCoalescingPtr */ + + /* Size varies with number of Rx queues */ + ugeth->rx_irq_coalescing_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + sizeof(ucc_geth_rx_interrupt_coalescing_entry_t), + UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_irq_coalescing_tbl.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_rx_irq_coalescing_tbl = + (ucc_geth_rx_interrupt_coalescing_table_t *) + qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, + ugeth->rx_irq_coalescing_tbl_offset); + + /* Fill interrupt coalescing table */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingmaxvalue, + ug_info->interruptcoalescingmaxvalue[i]); + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingcounter, + ug_info->interruptcoalescingmaxvalue[i]); + } + + /* MRBLR */ + init_max_rx_buff_len(uf_info->max_rx_buf_length, + &ugeth->p_rx_glbl_pram->mrblr); + /* MFLR */ + out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); + /* MINFLR */ + init_min_frame_len(ug_info->minFrameLength, + &ugeth->p_rx_glbl_pram->minflr, + &ugeth->p_rx_glbl_pram->mrblr); + /* MAXD1 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); + /* MAXD2 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); + + /* l2qt */ + l2qt = 0; + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) + l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); + + /* l3qt */ + for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { + l3qt = 0; + for (i = 0; i < 8; i++) + l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt); + } + + /* vlantype */ + out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); + + /* vlantci */ + out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); + + /* ecamptr */ + out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); + + /* RBDQPTR */ + /* Size varies with number of Rx queues */ + ugeth->rx_bd_qs_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + (sizeof(ucc_geth_rx_bd_queues_entry_t) + + sizeof(ucc_geth_rx_prefetched_bds_t)), + UCC_GETH_RX_BD_QUEUES_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_rx_bd_qs_tbl = + (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth-> + rx_bd_qs_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); + /* Zero out p_rx_bd_qs_tbl */ + memset(ugeth->p_rx_bd_qs_tbl, + 0, + ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) + + sizeof(ucc_geth_rx_prefetched_bds_t))); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) immrbar_virt_to_phys(ugeth-> + p_rx_bd_ring[i])); + } + /* rest of fields handled by QE */ + } + + /* remoder */ + /* Already has speed set */ + + if (ugeth->rx_extended_features) + remoder |= REMODER_RX_EXTENDED_FEATURES; + if (ug_info->rxExtendedFiltering) + remoder |= REMODER_RX_EXTENDED_FILTERING; + if (ug_info->dynamicMaxFrameLength) + remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; + if (ug_info->dynamicMinFrameLength) + remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; + remoder |= + ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; + remoder |= + ug_info-> + vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; + remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; + remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); + if (ug_info->ipCheckSumCheck) + remoder |= REMODER_IP_CHECKSUM_CHECK; + if (ug_info->ipAddressAlignment) + remoder |= REMODER_IP_ADDRESS_ALIGNMENT; + out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); + + /* Note that this function must be called */ + /* ONLY AFTER p_tx_fw_statistics_pram */ + /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ + init_firmware_statistics_gathering_mode((ug_info-> + statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), + (ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), + &ugeth->p_tx_glbl_pram->txrmonbaseptr, + ugeth->tx_fw_statistics_pram_offset, + &ugeth->p_rx_glbl_pram->rxrmonbaseptr, + ugeth->rx_fw_statistics_pram_offset, + &ugeth->p_tx_glbl_pram->temoder, + &ugeth->p_rx_glbl_pram->remoder); + + /* function code register */ + ugeth->p_rx_glbl_pram->rstate = function_code; + + /* initialize extended filtering */ + if (ug_info->rxExtendedFiltering) { + if (!ug_info->extendedFilteringChainPointer) { + ugeth_err("%s: Null Extended Filtering Chain Pointer.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + } + + /* Allocate memory for extended filtering Mode Global + Parameters */ + ugeth->exf_glbl_param_offset = + qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t), + UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_exf_glbl_param.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_exf_glbl_param = + (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth-> + exf_glbl_param_offset); + out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, + ugeth->exf_glbl_param_offset); + out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, + (u32) ug_info->extendedFilteringChainPointer); + + } else { /* initialize 82xx style address filtering */ + + /* Init individual address recognition registers to disabled */ + + for (j = 0; j < NUM_OF_PADDRS; j++) + ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); + + /* Create CQs for hash tables */ + if (ug_info->maxGroupAddrInHash > 0) { + INIT_LIST_HEAD(&ugeth->group_hash_q); + } + if (ug_info->maxIndAddrInHash > 0) { + INIT_LIST_HEAD(&ugeth->ind_hash_q); + } + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_GROUP); + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_INDIVIDUAL); + } + + /* + * Initialize UCC at QE level + */ + + command = QE_INIT_TX_RX; + + /* Allocate shadow InitEnet command parameter structure. + * This is needed because after the InitEnet command is executed, + * the structure in DPRAM is released, because DPRAM is a premium + * resource. + * This shadow structure keeps a copy of what was done so that the + * allocated resources can be released when the channel is freed. + */ + if (!(ugeth->p_init_enet_param_shadow = + (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t), + GFP_KERNEL))) { + ugeth_err + ("%s: Can not allocate memory for" + " p_UccInitEnetParamShadows.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + /* Zero out *p_init_enet_param_shadow */ + memset((char *)ugeth->p_init_enet_param_shadow, + 0, sizeof(ucc_geth_init_pram_t)); + + /* Fill shadow InitEnet command parameter structure */ + + ugeth->p_init_enet_param_shadow->resinit1 = + ENET_INIT_PARAM_MAGIC_RES_INIT1; + ugeth->p_init_enet_param_shadow->resinit2 = + ENET_INIT_PARAM_MAGIC_RES_INIT2; + ugeth->p_init_enet_param_shadow->resinit3 = + ENET_INIT_PARAM_MAGIC_RES_INIT3; + ugeth->p_init_enet_param_shadow->resinit4 = + ENET_INIT_PARAM_MAGIC_RES_INIT4; + ugeth->p_init_enet_param_shadow->resinit5 = + ENET_INIT_PARAM_MAGIC_RES_INIT5; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; + + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ugeth->rx_glbl_pram_offset | ug_info->riscRx; + if ((ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) + && (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + && (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { + ugeth_err("%s: Invalid largest External Lookup Key Size.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + } + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = + ug_info->largestexternallookupkeysize; + size = sizeof(ucc_geth_thread_rx_pram_t); + if (ug_info->rxExtendedFiltering) { + size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> + p_init_enet_param_shadow->rxthread[0]), + (u8) (numThreadsRxNumerical + 1) + /* Rx needs one extra for terminator */ + , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, + ug_info->riscRx, 1)) != 0) { + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + ugeth->p_init_enet_param_shadow->txglobal = + ugeth->tx_glbl_pram_offset | ug_info->riscTx; + if ((ret_val = + fill_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), numThreadsTxNumerical, + sizeof(ucc_geth_thread_tx_pram_t), + UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, + ug_info->riscTx, 0)) != 0) { + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Load Rx bds with buffers */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { + ugeth_err("%s: Can not fill Rx bds with buffers.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + } + + /* Allocate InitEnet command parameter structure */ + init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4); + if (IS_MURAM_ERR(init_enet_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + p_init_enet_pram = + (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset); + + /* Copy shadow InitEnet command parameter structure into PRAM */ + p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; + p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; + p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; + p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; + out_be16(&p_init_enet_pram->resinit5, + ugeth->p_init_enet_param_shadow->resinit5); + p_init_enet_pram->largestexternallookupkeysize = + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; + out_be32(&p_init_enet_pram->rgftgfrxglobal, + ugeth->p_init_enet_param_shadow->rgftgfrxglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) + out_be32(&p_init_enet_pram->rxthread[i], + ugeth->p_init_enet_param_shadow->rxthread[i]); + out_be32(&p_init_enet_pram->txglobal, + ugeth->p_init_enet_param_shadow->txglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) + out_be32(&p_init_enet_pram->txthread[i], + ugeth->p_init_enet_param_shadow->txthread[i]); + + /* Issue QE command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + init_enet_pram_offset); + + /* Free InitEnet command parameter */ + qe_muram_free(init_enet_pram_offset); + + return 0; +} + +/* returns a net_device_stats structure pointer */ +static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + return &(ugeth->stats); +} + +/* ucc_geth_timeout gets called when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. */ +static void ucc_geth_timeout(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ugeth->stats.tx_errors++; + + ugeth_dump_regs(ugeth); + + if (dev->flags & IFF_UP) { + ucc_geth_stop(ugeth); + ucc_geth_startup(ugeth); + } + + netif_schedule(dev); +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + u8 *bd; /* BD pointer */ + u32 bd_status; + u8 txQ = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + ugeth->stats.tx_bytes += skb->len; + + /* Start from the next BD that should be filled */ + bd = ugeth->txBd[txQ]; + bd_status = BD_STATUS_AND_LENGTH(bd); + /* Save the skb pointer so we can free it later */ + ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + ugeth->skb_curtx[txQ] = + (ugeth->skb_curtx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* set up the buffer descriptor */ + BD_BUFFER_SET(bd, + dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); + + //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); + + bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; + + BD_STATUS_AND_LENGTH_SET(bd, bd_status); + + dev->trans_start = jiffies; + + /* Move to next BD in the ring */ + if (!(bd_status & T_W)) + ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD; + else + ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (bd == ugeth->confBd[txQ]) { + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + } + + if (ugeth->p_scheduler) { + ugeth->cpucount[txQ]++; + /* Indicate to QE that there are more Tx bds ready for + transmission */ + /* This is done by writing a running counter of the bd + count to the scheduler PRAM. */ + out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); + } + + spin_unlock_irq(&ugeth->lock); + + return 0; +} + +static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit) +{ + struct sk_buff *skb; + u8 *bd; + u16 length, howmany = 0; + u32 bd_status; + u8 *bdBuffer; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + spin_lock(&ugeth->lock); + /* collect received buffers */ + bd = ugeth->rxBd[rxQ]; + + bd_status = BD_STATUS_AND_LENGTH(bd); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { + bdBuffer = (u8 *) BD_BUFFER(bd); + length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); + skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; + + /* determine whether buffer is first, last, first and last + (single buffer frame) or middle (not first and not last) */ + if (!skb || + (!(bd_status & (R_F | R_L))) || + (bd_status & R_ERRORS_FATAL)) { + ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", + __FUNCTION__, __LINE__, (u32) skb); + if (skb) + dev_kfree_skb_any(skb); + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; + ugeth->stats.rx_dropped++; + } else { + ugeth->stats.rx_packets++; + howmany++; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, ugeth->dev); + + ugeth->stats.rx_bytes += length; + /* Send the packet up the stack */ +#ifdef CONFIG_UGETH_NAPI + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif /* CONFIG_UGETH_NAPI */ + } + + ugeth->dev->last_rx = jiffies; + + skb = get_new_skb(ugeth, bd); + if (!skb) { + ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); + spin_unlock(&ugeth->lock); + ugeth->stats.rx_dropped++; + break; + } + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; + + /* update to point at the next skb */ + ugeth->skb_currx[rxQ] = + (ugeth->skb_currx[rxQ] + + 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); + + if (bd_status & R_W) + bd = ugeth->p_rx_bd_ring[rxQ]; + else + bd += UCC_GETH_SIZE_OF_BD; + + bd_status = BD_STATUS_AND_LENGTH(bd); + } + + ugeth->rxBd[rxQ] = bd; + spin_unlock(&ugeth->lock); + return howmany; +} + +static int ucc_geth_tx(struct net_device *dev, u8 txQ) +{ + /* Start from the next BD that should be filled */ + ucc_geth_private_t *ugeth = netdev_priv(dev); + u8 *bd; /* BD pointer */ + u32 bd_status; + + bd = ugeth->confBd[txQ]; + bd_status = BD_STATUS_AND_LENGTH(bd); + + /* Normal processing. */ + while ((bd_status & T_R) == 0) { + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) + break; + + ugeth->stats.tx_packets++; + + /* Free the sk buffer associated with this TxBD */ + dev_kfree_skb_irq(ugeth-> + tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); + ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; + ugeth->skb_dirtytx[txQ] = + (ugeth->skb_dirtytx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W)) + ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD; + else + ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; + } + return 0; +} + +#ifdef CONFIG_UGETH_NAPI +static int ucc_geth_poll(struct net_device *dev, int *budget) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + int howmany; + int rx_work_limit = *budget; + u8 rxQ = 0; + + if (rx_work_limit > dev->quota) + rx_work_limit = dev->quota; + + howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit); + + dev->quota -= howmany; + rx_work_limit -= howmany; + *budget -= howmany; + + if (rx_work_limit >= 0) + netif_rx_complete(dev); + + return (rx_work_limit < 0) ? 1 : 0; +} +#endif /* CONFIG_UGETH_NAPI */ + +static irqreturn_t ucc_geth_irq_handler(int irq, void *info, + struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)info; + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_fast_private_t *uccf; + ucc_geth_info_t *ug_info; + register u32 ucce = 0; + register u32 bit_mask = UCCE_RXBF_SINGLE_MASK; + register u32 tx_mask = UCCE_TXBF_SINGLE_MASK; + register u8 i; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + if (!ugeth) + return IRQ_NONE; + + uccf = ugeth->uccf; + ug_info = ugeth->ug_info; + + do { + ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm)); + + /* clear event bits for next time */ + /* Side effect here is to mask ucce variable + for future processing below. */ + out_be32(uccf->p_ucce, ucce); /* Clear with ones, + but only bits in UCCM */ + + /* We ignore Tx interrupts because Tx confirmation is + done inside Tx routine */ + + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ucce & bit_mask) + ucc_geth_rx(ugeth, i, + (int)ugeth->ug_info-> + bdRingLenRx[i]); + ucce &= ~bit_mask; + bit_mask <<= 1; + } + + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ucce & tx_mask) + ucc_geth_tx(dev, i); + ucce &= ~tx_mask; + tx_mask <<= 1; + } + + /* Exceptions */ + if (ucce & UCCE_BSY) { + ugeth_vdbg("Got BUSY irq!!!!"); + ugeth->stats.rx_errors++; + ucce &= ~UCCE_BSY; + } + if (ucce & UCCE_OTHER) { + ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!", + ucce); + ugeth->stats.rx_errors++; + ucce &= ~ucce; + } + } + while (ucce); + + return IRQ_HANDLED; +} + +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)dev_id; + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupt */ + mii_clear_phy_interrupt(ugeth->mii_info); + + /* Disable PHY interrupts */ + mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED); + + /* Schedule the phy change */ + schedule_work(&ugeth->tq); + + return IRQ_HANDLED; +} + +/* Scheduled by the phy_interrupt/timer to handle PHY changes */ +static void ugeth_phy_change(void *data) +{ + struct net_device *dev = (struct net_device *)data; + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_geth_t *ug_regs; + int result = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_regs = ugeth->ug_regs; + + /* Delay to give the PHY a chance to change the + * register state */ + msleep(1); + + /* Update the link, speed, duplex */ + result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info); + + /* Adjust the known status as long as the link + * isn't still coming up */ + if ((0 == result) || (ugeth->mii_info->link == 0)) + adjust_link(dev); + + /* Reenable interrupts, if needed */ + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_ENABLED); +} + +/* Called every so often on systems that don't interrupt + * the core for PHY changes */ +static void ugeth_phy_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + ucc_geth_private_t *ugeth = netdev_priv(dev); + + schedule_work(&ugeth->tq); + + mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); +} + +/* Keep trying aneg for some time + * If, after GFAR_AN_TIMEOUT seconds, it has not + * finished, we switch to forced. + * Either way, once the process has completed, we either + * request the interrupt, or switch the timer over to + * using ugeth_phy_timer to check status */ +static void ugeth_phy_startup_timer(unsigned long data) +{ + struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; + ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev); + static int secondary = UGETH_AN_TIMEOUT; + int result; + + /* Configure the Auto-negotiation */ + result = mii_info->phyinfo->config_aneg(mii_info); + + /* If autonegotiation failed to start, and + * we haven't timed out, reset the timer, and return */ + if (result && secondary--) { + mod_timer(&ugeth->phy_info_timer, jiffies + HZ); + return; + } else if (result) { + /* Couldn't start autonegotiation. + * Try switching to forced */ + mii_info->autoneg = 0; + result = mii_info->phyinfo->config_aneg(mii_info); + + /* Forcing failed! Give up */ + if (result) { + ugeth_err("%s: Forcing failed!", mii_info->dev->name); + return; + } + } + + /* Kill the timer so it can be restarted */ + del_timer_sync(&ugeth->phy_info_timer); + + /* Grab the PHY interrupt, if necessary/possible */ + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + if (request_irq(ugeth->ug_info->phy_interrupt, + phy_interrupt, + SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) { + ugeth_err("%s: Can't get IRQ %d (PHY)", + mii_info->dev->name, + ugeth->ug_info->phy_interrupt); + } else { + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_ENABLED); + return; + } + } + + /* Start the timer again, this time in order to + * handle a change in status */ + init_timer(&ugeth->phy_info_timer); + ugeth->phy_info_timer.function = &ugeth_phy_timer; + ugeth->phy_info_timer.data = (unsigned long)mii_info->dev; + mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int ucc_geth_open(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + int err; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Test station address */ + if (dev->dev_addr[0] & ENET_GROUP_ADDR) { + ugeth_err("%s: Multicast address used for station address" + " - is this what you wanted?", __FUNCTION__); + return -EINVAL; + } + + err = ucc_geth_startup(ugeth); + if (err) { + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + return err; + } + + err = adjust_enet_interface(ugeth); + if (err) { + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + return err; + } + + /* Set MACSTNADDR1, MACSTNADDR2 */ + /* For more details see the hardware spec. */ + init_mac_station_addr_regs(dev->dev_addr[0], + dev->dev_addr[1], + dev->dev_addr[2], + dev->dev_addr[3], + dev->dev_addr[4], + dev->dev_addr[5], + &ugeth->ug_regs->macstnaddr1, + &ugeth->ug_regs->macstnaddr2); + + err = init_phy(dev); + if (err) { + ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name); + return err; + } +#ifndef CONFIG_UGETH_NAPI + err = + request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, + "UCC Geth", dev); + if (err) { + ugeth_err("%s: Cannot get IRQ for net device, aborting.", + dev->name); + ucc_geth_stop(ugeth); + return err; + } +#endif /* CONFIG_UGETH_NAPI */ + + /* Set up the PHY change work queue */ + INIT_WORK(&ugeth->tq, ugeth_phy_change, dev); + + init_timer(&ugeth->phy_info_timer); + ugeth->phy_info_timer.function = &ugeth_phy_startup_timer; + ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info; + mod_timer(&ugeth->phy_info_timer, jiffies + HZ); + + err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + if (err) { + ugeth_err("%s: Cannot enable net device, aborting.", dev->name); + ucc_geth_stop(ugeth); + return err; + } + + netif_start_queue(dev); + + return err; +} + +/* Stops the kernel queue, and halts the controller */ +static int ucc_geth_close(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ucc_geth_stop(ugeth); + + /* Shutdown the PHY */ + if (ugeth->mii_info->phyinfo->close) + ugeth->mii_info->phyinfo->close(ugeth->mii_info); + + kfree(ugeth->mii_info); + + netif_stop_queue(dev); + + return 0; +} + +struct ethtool_ops ucc_geth_ethtool_ops = { + .get_settings = NULL, + .get_drvinfo = NULL, + .get_regs_len = NULL, + .get_regs = NULL, + .get_link = NULL, + .get_coalesce = NULL, + .set_coalesce = NULL, + .get_ringparam = NULL, + .set_ringparam = NULL, + .get_strings = NULL, + .get_stats_count = NULL, + .get_ethtool_stats = NULL, +}; + +static int ucc_geth_probe(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct ucc_geth_platform_data *ugeth_pdata; + struct net_device *dev = NULL; + struct ucc_geth_private *ugeth = NULL; + struct ucc_geth_info *ug_info; + int err; + static int mii_mng_configured = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data; + + ug_info = &ugeth_info[pdev->id]; + ug_info->uf_info.ucc_num = pdev->id; + ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock; + ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock; + ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr; + ug_info->uf_info.irq = platform_get_irq(pdev, 0); + ug_info->phy_address = ugeth_pdata->phy_id; + ug_info->enet_interface = ugeth_pdata->phy_interface; + ug_info->board_flags = ugeth_pdata->board_flags; + ug_info->phy_interrupt = ugeth_pdata->phy_interrupt; + + printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", + ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + ug_info->uf_info.irq); + + if (ug_info == NULL) { + ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, + pdev->id); + return -ENODEV; + } + + if (!mii_mng_configured) { + ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num); + mii_mng_configured = 1; + } + + /* Create an ethernet device instance */ + dev = alloc_etherdev(sizeof(*ugeth)); + + if (dev == NULL) + return -ENOMEM; + + ugeth = netdev_priv(dev); + spin_lock_init(&ugeth->lock); + + dev_set_drvdata(device, dev); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long)(ug_info->uf_info.regs); + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, device); + + /* Fill in the dev structure */ + dev->open = ucc_geth_open; + dev->hard_start_xmit = ucc_geth_start_xmit; + dev->tx_timeout = ucc_geth_timeout; + dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_UGETH_NAPI + dev->poll = ucc_geth_poll; + dev->weight = UCC_GETH_DEV_WEIGHT; +#endif /* CONFIG_UGETH_NAPI */ + dev->stop = ucc_geth_close; + dev->get_stats = ucc_geth_get_stats; +// dev->change_mtu = ucc_geth_change_mtu; + dev->mtu = 1500; + dev->set_multicast_list = ucc_geth_set_multi; + dev->ethtool_ops = &ucc_geth_ethtool_ops; + + err = register_netdev(dev); + if (err) { + ugeth_err("%s: Cannot register net device, aborting.", + dev->name); + free_netdev(dev); + return err; + } + + ugeth->ug_info = ug_info; + ugeth->dev = dev; + memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6); + + return 0; +} + +static int ucc_geth_remove(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct ucc_geth_private *ugeth = netdev_priv(dev); + + dev_set_drvdata(device, NULL); + ucc_geth_memclean(ugeth); + free_netdev(dev); + + return 0; +} + +/* Structure for a device driver */ +static struct device_driver ucc_geth_driver = { + .name = DRV_NAME, + .bus = &platform_bus_type, + .probe = ucc_geth_probe, + .remove = ucc_geth_remove, +}; + +static int __init ucc_geth_init(void) +{ + int i; + printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); + for (i = 0; i < 8; i++) + memcpy(&(ugeth_info[i]), &ugeth_primary_info, + sizeof(ugeth_primary_info)); + + return driver_register(&ucc_geth_driver); +} + +static void __exit ucc_geth_exit(void) +{ + driver_unregister(&ucc_geth_driver); +} + +module_init(ucc_geth_init); +module_exit(ucc_geth_exit); + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h new file mode 100644 index 000000000000..005965f5dd9b --- /dev/null +++ b/drivers/net/ucc_geth.h @@ -0,0 +1,1339 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish + * + * Description: + * Internal header file for UCC Gigabit Ethernet unit routines. + * + * Changelog: + * Jun 28, 2006 Li Yang + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_GETH_H__ +#define __UCC_GETH_H__ + +#include +#include +#include + +#include +#include + +#include +#include + +#define NUM_TX_QUEUES 8 +#define NUM_RX_QUEUES 8 +#define NUM_BDS_IN_PREFETCHED_BDS 4 +#define TX_IP_OFFSET_ENTRY_MAX 8 +#define NUM_OF_PADDRS 4 +#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 +#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 + +typedef struct ucc_mii_mng { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +} __attribute__ ((packed)) ucc_mii_mng_t; + +typedef struct ucc_geth { + ucc_fast_t uccf; + + u32 maccfg1; /* mac configuration reg. 1 */ + u32 maccfg2; /* mac configuration reg. 2 */ + u32 ipgifg; /* interframe gap reg. */ + u32 hafdup; /* half-duplex reg. */ + u8 res1[0x10]; + ucc_mii_mng_t miimng; /* MII management structure */ + u32 ifctl; /* interface control reg */ + u32 ifstat; /* interface statux reg */ + u32 macstnaddr1; /* mac station address part 1 reg */ + u32 macstnaddr2; /* mac station address part 2 reg */ + u8 res2[0x8]; + u32 uempr; /* UCC Ethernet Mac parameter reg */ + u32 utbipar; /* UCC tbi address reg */ + u16 uescr; /* UCC Ethernet statistics control reg */ + u8 res3[0x180 - 0x15A]; + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in succesfull + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u8 res4[0x2]; + u32 tmca; /* Total number of frames that were transmitted + succesfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + succesfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + succesfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + that had destination address equal to the + broadcast address */ + u32 scar; /* Statistics carry register */ + u32 scam; /* Statistics caryy mask register */ + u8 res5[0x200 - 0x1c4]; +} __attribute__ ((packed)) ucc_geth_t; + +/* UCC GETH TEMODR Register */ +#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics + */ +#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */ +#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4 + checksums */ +#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance + optimization + enhancement (mode1) */ +#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics + */ +#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues << + shift */ + +/* UCC GETH TEMODR Register */ +#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx + statistics */ +#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable + extended + features */ +#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation + tagged << shift */ +#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non + tagged << shift */ +#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift + */ +#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx + statistics */ +#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended + filtering + vs. + mpc82xx-like + filtering */ +#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues << + shift */ +#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable + dynamic max + frame length + */ +#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable + dynamic min + frame length + */ +#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4 + checksums */ +#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip + address to + 4-byte + boundary */ + +/* UCC GETH Event Register */ +#define UCCE_MPD 0x80000000 /* Magic packet + detection */ +#define UCCE_SCAR 0x40000000 +#define UCCE_GRA 0x20000000 /* Tx graceful + stop + complete */ +#define UCCE_CBPR 0x10000000 +#define UCCE_BSY 0x08000000 +#define UCCE_RXC 0x04000000 +#define UCCE_TXC 0x02000000 +#define UCCE_TXE 0x01000000 +#define UCCE_TXB7 0x00800000 +#define UCCE_TXB6 0x00400000 +#define UCCE_TXB5 0x00200000 +#define UCCE_TXB4 0x00100000 +#define UCCE_TXB3 0x00080000 +#define UCCE_TXB2 0x00040000 +#define UCCE_TXB1 0x00020000 +#define UCCE_TXB0 0x00010000 +#define UCCE_RXB7 0x00008000 +#define UCCE_RXB6 0x00004000 +#define UCCE_RXB5 0x00002000 +#define UCCE_RXB4 0x00001000 +#define UCCE_RXB3 0x00000800 +#define UCCE_RXB2 0x00000400 +#define UCCE_RXB1 0x00000200 +#define UCCE_RXB0 0x00000100 +#define UCCE_RXF7 0x00000080 +#define UCCE_RXF6 0x00000040 +#define UCCE_RXF5 0x00000020 +#define UCCE_RXF4 0x00000010 +#define UCCE_RXF3 0x00000008 +#define UCCE_RXF2 0x00000004 +#define UCCE_RXF1 0x00000002 +#define UCCE_RXF0 0x00000001 + +#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0) +#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0) + +#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\ + UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0) +#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\ + UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0) +#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\ + UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0) +#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\ + UCCE_RXC | UCCE_TXC | UCCE_TXE) + +/* UCC GETH UPSMR (Protocol Specific Mode Register) */ +#define UPSMR_ECM 0x04000000 /* Enable CAM + Miss or + Enable + Filtering + Miss */ +#define UPSMR_HSE 0x02000000 /* Hardware + Statistics + Enable */ +#define UPSMR_PRO 0x00400000 /* Promiscuous*/ +#define UPSMR_CAP 0x00200000 /* CAM polarity + */ +#define UPSMR_RSH 0x00100000 /* Receive + Short Frames + */ +#define UPSMR_RPM 0x00080000 /* Reduced Pin + Mode + interfaces */ +#define UPSMR_R10M 0x00040000 /* RGMII/RMII + 10 Mode */ +#define UPSMR_RLPB 0x00020000 /* RMII + Loopback + Mode */ +#define UPSMR_TBIM 0x00010000 /* Ten-bit + Interface + Mode */ +#define UPSMR_RMM 0x00001000 /* RMII/RGMII + Mode */ +#define UPSMR_CAM 0x00000400 /* CAM Address + Matching */ +#define UPSMR_BRO 0x00000200 /* Broadcast + Address */ +#define UPSMR_RES1 0x00002000 /* Reserved + feild - must + be 1 */ + +/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ +#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control + Rx */ +#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control + Tx */ +#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable + synchronized + to Rx stream + */ +#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */ +#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable + synchronized + to Tx stream + */ +#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */ + +/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */ +#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble + Length << + shift */ +#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble + Length mask */ +#define MACCFG2_SRP 0x00000080 /* Soft Receive + Preamble */ +#define MACCFG2_STP 0x00000040 /* Soft + Transmit + Preamble */ +#define MACCFG2_RESERVED_1 0x00000020 /* Reserved - + must be set + to 1 */ +#define MACCFG2_LC 0x00000010 /* Length Check + */ +#define MACCFG2_MPE 0x00000008 /* Magic packet + detect */ +#define MACCFG2_FDX 0x00000001 /* Full Duplex */ +#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex + mask */ +#define MACCFG2_PAD_CRC 0x00000004 +#define MACCFG2_CRC_EN 0x00000002 +#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither + Padding + short frames + nor CRC */ +#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC + only */ +#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004 +#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode + (MII/RMII/RGMII + 10/100bps) */ +#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode + (GMII/TBI/RTB/RGMII + 1000bps ) */ +#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask + covering all + relevant + bits */ + +/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non + back-to-back + inter frame + gap part 1. + << shift */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non + back-to-back + inter frame + gap part 2. + << shift */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG + Enforcement + << shift */ +#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back + inter frame + gap << shift + */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back + inter frame gap part + 1. max val */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back + inter frame gap part + 2. max val */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG + Enforcement max val */ +#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter + frame gap max val */ +#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000 +#define IPGIFG_NBTB_IPG_MASK 0x007F0000 +#define IPGIFG_MIN_IFG_MASK 0x0000FF00 +#define IPGIFG_BTB_IPG_MASK 0x0000007F + +/* UCC GETH HAFDUP (Half Duplex Register) */ +#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate + Binary + Exponential + Backoff + Truncation + << shift */ +#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary + Exponential Backoff + Truncation max val */ +#define HALFDUP_ALT_BEB 0x00080000 /* Alternate + Binary + Exponential + Backoff */ +#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back + pressure no + backoff */ +#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */ +#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive + Defer */ +#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum + Retransmission + << shift */ +#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum + Retransmission max + val */ +#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision + Window << + shift */ +#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max + val */ +#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000 +#define HALFDUP_RETRANS_MASK 0x0000F000 +#define HALFDUP_COL_WINDOW_MASK 0x0000003F + +/* UCC GETH UCCS (Ethernet Status Register) */ +#define UCCS_BPR 0x02 /* Back pressure (in + half duplex mode) */ +#define UCCS_PAU 0x02 /* Pause state (in full + duplex mode) */ +#define UCCS_MPD 0x01 /* Magic Packet + Detected */ + +/* UCC GETH MIIMCFG (MII Management Configuration Register) */ +#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset + management */ +#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble + suppress */ +#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide + << shift */ +#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by + 112 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by + 160 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by + 224 */ + +/* UCC GETH MIIMCOM (MII Management Command Register) */ +#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */ +#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */ + +/* UCC GETH MIIMADD (MII Management Address Register) */ +#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address + << shift */ +#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register + << shift */ + +/* UCC GETH MIIMCON (MII Management Control Register) */ +#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control + << shift */ +#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status + << shift */ + +/* UCC GETH MIIMIND (MII Management Indicator Register) */ +#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */ +#define MIIMIND_SCAN 0x00000002 /* Scan in + progress */ +#define MIIMIND_BUSY 0x00000001 + +/* UCC GETH IFSTAT (Interface Status Register) */ +#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive + transmission + defer */ + +/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */ +#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station + address 6th + octet << + shift */ +#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station + address 5th + octet << + shift */ +#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station + address 4th + octet << + shift */ +#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station + address 3rd + octet << + shift */ + +/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */ +#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station + address 2nd + octet << + shift */ +#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station + address 1st + octet << + shift */ + +/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */ +#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time + value << + shift */ +#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended + pause time + value << + shift */ + +/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */ +#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address + << shift */ +#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address + mask */ + +/* UCC GETH UESCR (Ethernet Statistics Control Register) */ +#define UESCR_AUTOZ 0x8000 /* Automatically zero + addressed + statistical counter + values */ +#define UESCR_CLRCNT 0x4000 /* Clear all statistics + counters */ +#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max + Coalescing + Value << + shift */ +#define UESCR_SCOV_SHIFT (15 - 15) /* Status + Coalescing + Value << + shift */ + +/* UCC GETH UDSR (Data Synchronization Register) */ +#define UDSR_MAGIC 0x067E + +typedef struct ucc_geth_thread_data_tx { + u8 res0[104]; +} __attribute__ ((packed)) ucc_geth_thread_data_tx_t; + +typedef struct ucc_geth_thread_data_rx { + u8 res0[40]; +} __attribute__ ((packed)) ucc_geth_thread_data_rx_t; + +/* Send Queue Queue-Descriptor */ +typedef struct ucc_geth_send_queue_qd { + u32 bd_ring_base; /* pointer to BD ring base address */ + u8 res0[0x8]; + u32 last_bd_completed_address;/* initialize to last entry in BD ring */ + u8 res1[0x30]; +} __attribute__ ((packed)) ucc_geth_send_queue_qd_t; + +typedef struct ucc_geth_send_queue_mem_region { + ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES]; +} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t; + +typedef struct ucc_geth_thread_tx_pram { + u8 res0[64]; +} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t; + +typedef struct ucc_geth_thread_rx_pram { + u8 res0[128]; +} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t; + +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 + +typedef struct ucc_geth_scheduler { + u16 cpucount0; /* CPU packet counter */ + u16 cpucount1; /* CPU packet counter */ + u16 cecount0; /* QE packet counter */ + u16 cecount1; /* QE packet counter */ + u16 cpucount2; /* CPU packet counter */ + u16 cpucount3; /* CPU packet counter */ + u16 cecount2; /* QE packet counter */ + u16 cecount3; /* QE packet counter */ + u16 cpucount4; /* CPU packet counter */ + u16 cpucount5; /* CPU packet counter */ + u16 cecount4; /* QE packet counter */ + u16 cecount5; /* QE packet counter */ + u16 cpucount6; /* CPU packet counter */ + u16 cpucount7; /* CPU packet counter */ + u16 cecount6; /* QE packet counter */ + u16 cecount7; /* QE packet counter */ + u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */ + u32 rtsrshadow; /* temporary variable handled by QE */ + u32 time; /* temporary variable handled by QE */ + u32 ttl; /* temporary variable handled by QE */ + u32 mblinterval; /* max burst length interval */ + u16 nortsrbytetime; /* normalized value of byte time in tsr units */ + u8 fracsiz; /* radix 2 log value of denom. of + NorTSRByteTime */ + u8 res0[1]; + u8 strictpriorityq; /* Strict Priority Mask register */ + u8 txasap; /* Transmit ASAP register */ + u8 extrabw; /* Extra BandWidth register */ + u8 oldwfqmask; /* temporary variable handled by QE */ + u8 weightfactor[NUM_TX_QUEUES]; + /**< weight factor for queues */ + u32 minw; /* temporary variable handled by QE */ + u8 res1[0x70 - 0x64]; +} __attribute__ ((packed)) ucc_geth_scheduler_t; + +typedef struct ucc_geth_tx_firmware_statistics_pram { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t; + +typedef struct ucc_geth_rx_firmware_statistics_pram { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t; + +typedef struct ucc_geth_rx_interrupt_coalescing_entry { + u32 interruptcoalescingmaxvalue; /* interrupt coalescing max + value */ + u32 interruptcoalescingcounter; /* interrupt coalescing counter, + initialize to + interruptcoalescingmaxvalue */ +} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t; + +typedef struct ucc_geth_rx_interrupt_coalescing_table { + ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES]; + /**< interrupt coalescing entry */ +} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t; + +typedef struct ucc_geth_rx_prefetched_bds { + qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ +} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t; + +typedef struct ucc_geth_rx_bd_queues_entry { + u32 bdbaseptr; /* BD base pointer */ + u32 bdptr; /* BD pointer */ + u32 externalbdbaseptr; /* external BD base pointer */ + u32 externalbdptr; /* external BD pointer */ +} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t; + +typedef struct ucc_geth_tx_global_pram { + u16 temoder; + u8 res0[0x38 - 0x02]; + u32 sqptr; /* a base pointer to send queue memory region */ + u32 schedulerbasepointer; /* a base pointer to scheduler memory + region */ + u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */ + u32 tstate; /* tx internal state. High byte contains + function code */ + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ + u32 tqptr; /* a base pointer to the Tx Queues Memory + Region */ + u8 res2[0x80 - 0x74]; +} __attribute__ ((packed)) ucc_geth_tx_global_pram_t; + +/* structure representing Extended Filtering Global Parameters in PRAM */ +typedef struct ucc_geth_exf_global_pram { + u32 l2pcdptr; /* individual address filter, high */ + u8 res0[0x10 - 0x04]; +} __attribute__ ((packed)) ucc_geth_exf_global_pram_t; + +typedef struct ucc_geth_rx_global_pram { + u32 remoder; /* ethernet mode reg. */ + u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ + u32 res0[0x1]; + u8 res1[0x20 - 0xC]; + u16 typeorlen; /* cutoff point less than which, type/len field + is considered length */ + u8 res2[0x1]; + u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/ + u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */ + u8 res3[0x30 - 0x28]; + u32 intcoalescingptr; /* Interrupt coalescing table pointer */ + u8 res4[0x36 - 0x34]; + u8 rstate; /* rx internal state. High byte contains + function code */ + u8 res5[0x46 - 0x37]; + u16 mrblr; /* max receive buffer length reg. */ + u32 rbdqptr; /* base pointer to RxBD parameter table + description */ + u16 mflr; /* max frame length reg. */ + u16 minflr; /* min frame length reg. */ + u16 maxd1; /* max dma1 length reg. */ + u16 maxd2; /* max dma2 length reg. */ + u32 ecamptr; /* external CAM address */ + u32 l2qt; /* VLAN priority mapping table. */ + u32 l3qt[0x8]; /* IP priority mapping table. */ + u16 vlantype; /* vlan type */ + u16 vlantci; /* default vlan tci */ + u8 addressfiltering[64]; /* address filtering data structure */ + u32 exfGlobalParam; /* base address for extended filtering global + parameters */ + u8 res6[0x100 - 0xC4]; /* Initialize to zero */ +} __attribute__ ((packed)) ucc_geth_rx_global_pram_t; + +#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 + +/* structure representing InitEnet command */ +typedef struct ucc_geth_init_pram { + u8 resinit1; + u8 resinit2; + u8 resinit3; + u8 resinit4; + u16 resinit5; + u8 res1[0x1]; + u8 largestexternallookupkeysize; + u32 rgftgfrxglobal; + u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */ + u8 res2[0x38 - 0x30]; + u32 txglobal; /* tx global */ + u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ + u8 res3[0x1]; +} __attribute__ ((packed)) ucc_geth_init_pram_t; + +#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) +#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) + +#define ENET_INIT_PARAM_RISC_MASK 0x0000003f +#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0 +#define ENET_INIT_PARAM_SNUM_MASK 0xff000000 +#define ENET_INIT_PARAM_SNUM_SHIFT 24 + +#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06 +#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30 +#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff +#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00 +#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 + +/* structure representing 82xx Address Filtering Enet Address in PRAM */ +typedef struct ucc_geth_82xx_enet_address { + u8 res1[0x2]; + u16 h; /* address (MSB) */ + u16 m; /* address */ + u16 l; /* address (LSB) */ +} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t; + +/* structure representing 82xx Address Filtering PRAM */ +typedef struct ucc_geth_82xx_address_filtering_pram { + u32 iaddr_h; /* individual address filter, high */ + u32 iaddr_l; /* individual address filter, low */ + u32 gaddr_h; /* group address filter, high */ + u32 gaddr_l; /* group address filter, low */ + ucc_geth_82xx_enet_address_t taddr; + ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS]; + u8 res0[0x40 - 0x38]; +} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t; + +/* GETH Tx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_tx_firmware_statistics { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t; + +/* GETH Rx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_rx_firmware_statistics { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t; + +/* GETH hardware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_hardware_statistics { + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in succesfull + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u32 tmca; /* Total number of frames that were transmitted + succesfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + succesfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + succesfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + that had destination address equal to the + broadcast address */ +} __attribute__ ((packed)) ucc_geth_hardware_statistics_t; + +/* UCC GETH Tx errors returned via TxConf callback */ +#define TX_ERRORS_DEF 0x0200 +#define TX_ERRORS_EXDEF 0x0100 +#define TX_ERRORS_LC 0x0080 +#define TX_ERRORS_RL 0x0040 +#define TX_ERRORS_RC_MASK 0x003C +#define TX_ERRORS_RC_SHIFT 2 +#define TX_ERRORS_UN 0x0002 +#define TX_ERRORS_CSL 0x0001 + +/* UCC GETH Rx errors returned via RxStore callback */ +#define RX_ERRORS_CMR 0x0200 +#define RX_ERRORS_M 0x0100 +#define RX_ERRORS_BC 0x0080 +#define RX_ERRORS_MC 0x0040 + +/* Transmit BD. These are in addition to values defined in uccf. */ +#define T_VID 0x003c0000 /* insert VLAN id index mask. */ +#define T_DEF (((u32) TX_ERRORS_DEF ) << 16) +#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16) +#define T_LC (((u32) TX_ERRORS_LC ) << 16) +#define T_RL (((u32) TX_ERRORS_RL ) << 16) +#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16) +#define T_UN (((u32) TX_ERRORS_UN ) << 16) +#define T_CSL (((u32) TX_ERRORS_CSL ) << 16) +#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \ + | T_UN | T_CSL) /* transmit errors to report */ + +/* Receive BD. These are in addition to values defined in uccf. */ +#define R_LG 0x00200000 /* Frame length violation. */ +#define R_NO 0x00100000 /* Non-octet aligned frame. */ +#define R_SH 0x00080000 /* Short frame. */ +#define R_CR 0x00040000 /* CRC error. */ +#define R_OV 0x00020000 /* Overrun. */ +#define R_IPCH 0x00010000 /* IP checksum check failed. */ +#define R_CMR (((u32) RX_ERRORS_CMR ) << 16) +#define R_M (((u32) RX_ERRORS_M ) << 16) +#define R_BC (((u32) RX_ERRORS_BC ) << 16) +#define R_MC (((u32) RX_ERRORS_MC ) << 16) +#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to + report */ +#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \ + R_OV | R_IPCH) /* receive errors to discard */ + +/* Alignments */ +#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256 +#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64 +#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values + based on num of + threads, but always + using the maximum is + easier */ +#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 +#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a + guess */ +#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ +#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ +#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This + is a + guess + */ +#define UCC_GETH_RX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_TX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_MRBLR_ALIGNMENT 128 +#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4 +#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32 +#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64 + +#define UCC_GETH_TAD_EF 0x80 +#define UCC_GETH_TAD_V 0x40 +#define UCC_GETH_TAD_REJ 0x20 +#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2 +#define UCC_GETH_TAD_VTAG_OP_SHIFT 6 +#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20 +#define UCC_GETH_TAD_RQOS_SHIFT 0 +#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5 +#define UCC_GETH_TAD_CFI 0x10 + +#define UCC_GETH_VLAN_PRIORITY_MAX 8 +#define UCC_GETH_IP_PRIORITY_MAX 64 +#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 +#define UCC_GETH_RX_BD_RING_SIZE_MIN 8 +#define UCC_GETH_TX_BD_RING_SIZE_MIN 2 + +#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD + +/* Driver definitions */ +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x10 +#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN + +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) + +#define ENET_NUM_OCTETS_PER_ADDRESS 6 +#define ENET_GROUP_ADDR 0x01 /* Group address mask + for ethernet + addresses */ + +#define TX_TIMEOUT (1*HZ) +#define SKB_ALLOC_TIMEOUT 100000 +#define PHY_INIT_TIMEOUT 100000 +#define PHY_CHANGE_TIME 2 + +/* Fast Ethernet (10/100 Mbps) */ +#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size + */ +#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */ +#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */ +#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size + */ +#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ +#define UCC_GETH_UTFTT_INIT 128 +/* Gigabit Ethernet (1000 Mbps) */ +#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual + FIFO size */ +#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ +#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ +#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual + FIFO size */ +#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */ +#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */ + +#define UCC_GETH_REMODER_INIT 0 /* bits that must be + set */ +#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ +#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value + for this + register */ +#define UCC_GETH_MACCFG1_INIT 0 +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) +#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \ + (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112) + +/* Ethernet speed */ +typedef enum enet_speed { + ENET_SPEED_10BT, /* 10 Base T */ + ENET_SPEED_100BT, /* 100 Base T */ + ENET_SPEED_1000BT /* 1000 Base T */ +} enet_speed_e; + +/* Ethernet Address Type. */ +typedef enum enet_addr_type { + ENET_ADDR_TYPE_INDIVIDUAL, + ENET_ADDR_TYPE_GROUP, + ENET_ADDR_TYPE_BROADCAST +} enet_addr_type_e; + +/* TBI / MII Set Register */ +typedef enum enet_tbi_mii_reg { + ENET_TBI_MII_CR = 0x00, /* Control (CR ) */ + ENET_TBI_MII_SR = 0x01, /* Status (SR ) */ + ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */ + ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability + (ANLPBPA) */ + ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */ + ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */ + ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page + (ANLPANP) */ + ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */ + ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */ + ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */ +} enet_tbi_mii_reg_e; + +/* UCC GETH 82xx Ethernet Address Recognition Location */ +typedef enum ucc_geth_enet_address_recognition_location { + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station + address */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional + station + address + paddr1 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional + station + address + paddr2 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional + station + address + paddr3 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional + station + address + paddr4 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual + hash */ +} ucc_geth_enet_address_recognition_location_e; + +/* UCC GETH vlan operation tagged */ +typedef enum ucc_geth_vlan_operation_tagged { + UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ + UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG + = 0x1, /* Tagged - replace vid portion of q tag */ + UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE + = 0x2, /* Tagged - if vid0 replace vid with default value */ + UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME + = 0x3 /* Tagged - extract q tag from frame */ +} ucc_geth_vlan_operation_tagged_e; + +/* UCC GETH vlan operation non-tagged */ +typedef enum ucc_geth_vlan_operation_non_tagged { + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - + q tag insert + */ +} ucc_geth_vlan_operation_non_tagged_e; + +/* UCC GETH Rx Quality of Service Mode */ +typedef enum ucc_geth_qos_mode { + UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue + determined + by L2 + criteria */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue + determined + by L3 + criteria */ +} ucc_geth_qos_mode_e; + +/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together + for combined functionality */ +typedef enum ucc_geth_statistics_gathering_mode { + UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No + statistics + gathering */ + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable + hardware + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable + firmware + tx + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable + firmware + rx + statistics + gathering + */ +} ucc_geth_statistics_gathering_mode_e; + +/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ +typedef enum ucc_geth_maccfg2_pad_and_crc_mode { + UCC_GETH_PAD_AND_CRC_MODE_NONE + = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding + short frames + nor CRC */ + UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY + = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append + CRC only */ + UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = + MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC +} ucc_geth_maccfg2_pad_and_crc_mode_e; + +/* UCC GETH upsmr Flow Control Mode */ +typedef enum ucc_geth_flow_control_mode { + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic + flow control + */ + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY + = 0x00004000 /* Send pause frame when RxFIFO reaches its + emergency threshold */ +} ucc_geth_flow_control_mode_e; + +/* UCC GETH number of threads */ +typedef enum ucc_geth_num_of_threads { + UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ + UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ + UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ + UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ + UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ +} ucc_geth_num_of_threads_e; + +/* UCC GETH number of station addresses */ +typedef enum ucc_geth_num_of_station_addresses { + UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ + UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ +} ucc_geth_num_of_station_addresses_e; + +typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS]; + +/* UCC GETH 82xx Ethernet Address Container */ +typedef struct enet_addr_container { + enet_addr_t address; /* ethernet address */ + ucc_geth_enet_address_recognition_location_e location; /* location in + 82xx address + recognition + hardware */ + struct list_head node; +} enet_addr_container_t; + +#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node) + +/* UCC GETH Termination Action Descriptor (TAD) structure. */ +typedef struct ucc_geth_tad_params { + int rx_non_dynamic_extended_features_mode; + int reject_frame; + ucc_geth_vlan_operation_tagged_e vtag_op; + ucc_geth_vlan_operation_non_tagged_e vnontag_op; + ucc_geth_qos_mode_e rqos; + u8 vpri; + u16 vid; +} ucc_geth_tad_params_t; + +/* GETH protocol initialization structure */ +typedef struct ucc_geth_info { + ucc_fast_info_t uf_info; + u8 numQueuesTx; + u8 numQueuesRx; + int ipCheckSumCheck; + int ipCheckSumGenerate; + int rxExtendedFiltering; + u32 extendedFilteringChainPointer; + u16 typeorlen; + int dynamicMaxFrameLength; + int dynamicMinFrameLength; + u8 nonBackToBackIfgPart1; + u8 nonBackToBackIfgPart2; + u8 miminumInterFrameGapEnforcement; + u8 backToBackInterFrameGap; + int ipAddressAlignment; + int lengthCheckRx; + u32 mblinterval; + u16 nortsrbytetime; + u8 fracsiz; + u8 strictpriorityq; + u8 txasap; + u8 extrabw; + int miiPreambleSupress; + u8 altBebTruncation; + int altBeb; + int backPressureNoBackoff; + int noBackoff; + int excessDefer; + u8 maxRetransmission; + u8 collisionWindow; + int pro; + int cap; + int rsh; + int rlpb; + int cam; + int bro; + int ecm; + int receiveFlowControl; + u8 maxGroupAddrInHash; + u8 maxIndAddrInHash; + u8 prel; + u16 maxFrameLength; + u16 minFrameLength; + u16 maxD1Length; + u16 maxD2Length; + u16 vlantype; + u16 vlantci; + u32 ecamptr; + u32 eventRegMask; + u16 pausePeriod; + u16 extensionField; + u8 phy_address; + u32 board_flags; + u32 phy_interrupt; + u8 weightfactor[NUM_TX_QUEUES]; + u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; + u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; + u8 l3qt[UCC_GETH_IP_PRIORITY_MAX]; + u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX]; + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u16 bdRingLenTx[NUM_TX_QUEUES]; + u16 bdRingLenRx[NUM_RX_QUEUES]; + enet_interface_e enet_interface; + ucc_geth_num_of_station_addresses_e numStationAddresses; + qe_fltr_largest_external_tbl_lookup_key_size_e + largestexternallookupkeysize; + ucc_geth_statistics_gathering_mode_e statisticsMode; + ucc_geth_vlan_operation_tagged_e vlanOperationTagged; + ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged; + ucc_geth_qos_mode_e rxQoSMode; + ucc_geth_flow_control_mode_e aufc; + ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc; + ucc_geth_num_of_threads_e numThreadsTx; + ucc_geth_num_of_threads_e numThreadsRx; + qe_risc_allocation_e riscTx; + qe_risc_allocation_e riscRx; +} ucc_geth_info_t; + +/* structure representing UCC GETH */ +typedef struct ucc_geth_private { + ucc_geth_info_t *ug_info; + ucc_fast_private_t *uccf; + struct net_device *dev; + struct net_device_stats stats; /* linux network statistics */ + ucc_geth_t *ug_regs; + ucc_geth_init_pram_t *p_init_enet_param_shadow; + ucc_geth_exf_global_pram_t *p_exf_glbl_param; + u32 exf_glbl_param_offset; + ucc_geth_rx_global_pram_t *p_rx_glbl_pram; + u32 rx_glbl_pram_offset; + ucc_geth_tx_global_pram_t *p_tx_glbl_pram; + u32 tx_glbl_pram_offset; + ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg; + u32 send_q_mem_reg_offset; + ucc_geth_thread_data_tx_t *p_thread_data_tx; + u32 thread_dat_tx_offset; + ucc_geth_thread_data_rx_t *p_thread_data_rx; + u32 thread_dat_rx_offset; + ucc_geth_scheduler_t *p_scheduler; + u32 scheduler_offset; + ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; + u32 tx_fw_statistics_pram_offset; + ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; + u32 rx_fw_statistics_pram_offset; + ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl; + u32 rx_irq_coalescing_tbl_offset; + ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl; + u32 rx_bd_qs_tbl_offset; + u8 *p_tx_bd_ring[NUM_TX_QUEUES]; + u32 tx_bd_ring_offset[NUM_TX_QUEUES]; + u8 *p_rx_bd_ring[NUM_RX_QUEUES]; + u32 rx_bd_ring_offset[NUM_RX_QUEUES]; + u8 *confBd[NUM_TX_QUEUES]; + u8 *txBd[NUM_TX_QUEUES]; + u8 *rxBd[NUM_RX_QUEUES]; + int badFrame[NUM_RX_QUEUES]; + u16 cpucount[NUM_TX_QUEUES]; + volatile u16 *p_cpucount[NUM_TX_QUEUES]; + int indAddrRegUsed[NUM_OF_PADDRS]; + enet_addr_t paddr[NUM_OF_PADDRS]; + u8 numGroupAddrInHash; + u8 numIndAddrInHash; + u8 numIndAddrInReg; + int rx_extended_features; + int rx_non_dynamic_extended_features; + struct list_head conf_skbs; + struct list_head group_hash_q; + struct list_head ind_hash_q; + u32 saved_uccm; + spinlock_t lock; + /* pointers to arrays of skbuffs for tx and rx */ + struct sk_buff **tx_skbuff[NUM_TX_QUEUES]; + struct sk_buff **rx_skbuff[NUM_RX_QUEUES]; + /* indices pointing to the next free sbk in skb arrays */ + u16 skb_curtx[NUM_TX_QUEUES]; + u16 skb_currx[NUM_RX_QUEUES]; + /* index of the first skb which hasn't been transmitted yet. */ + u16 skb_dirtytx[NUM_TX_QUEUES]; + + struct work_struct tq; + struct timer_list phy_info_timer; + struct ugeth_mii_info *mii_info; + int oldspeed; + int oldduplex; + int oldlink; +} ucc_geth_private_t; + +#endif /* __UCC_GETH_H__ */ diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c new file mode 100644 index 000000000000..f91028c5386d --- /dev/null +++ b/drivers/net/ucc_geth_phy.c @@ -0,0 +1,801 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish + * + * Description: + * UCC GETH Driver -- PHY handling + * + * Changelog: + * Jun 28, 2006 Li Yang + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ucc_geth.h" +#include "ucc_geth_phy.h" +#include + +#define ugphy_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugphy_dbg(format, arg...) \ + ugphy_printk(KERN_DEBUG, format , ## arg) +#define ugphy_err(format, arg...) \ + ugphy_printk(KERN_ERR, format , ## arg) +#define ugphy_info(format, arg...) \ + ugphy_printk(KERN_INFO, format , ## arg) +#define ugphy_warn(format, arg...) \ + ugphy_printk(KERN_WARNING, format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugphy_vdbg ugphy_dbg +#else +#define ugphy_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ + +static void config_genmii_advert(struct ugeth_mii_info *mii_info); +static void genmii_setup_forced(struct ugeth_mii_info *mii_info); +static void genmii_restart_aneg(struct ugeth_mii_info *mii_info); +static int gbit_config_aneg(struct ugeth_mii_info *mii_info); +static int genmii_config_aneg(struct ugeth_mii_info *mii_info); +static int genmii_update_link(struct ugeth_mii_info *mii_info); +static int genmii_read_status(struct ugeth_mii_info *mii_info); +u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum); +void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val); + +static u8 *bcsr_regs = NULL; + +/* Write value to the PHY for this device to the register at regnum, */ +/* waiting until the write is done before it returns. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_mii_mng_t *mii_regs; + enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; + u32 tmp_reg; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + mii_regs = ugeth->mii_info->mii_regs; + + /* Set this UCC to be the master of the MII managment */ + ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); + + /* Stop the MII management read cycle */ + out_be32(&mii_regs->miimcom, 0); + /* Setting up the MII Mangement Address Register */ + tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; + out_be32(&mii_regs->miimadd, tmp_reg); + + /* Setting up the MII Mangement Control Register with the value */ + out_be32(&mii_regs->miimcon, (u32) value); + + /* Wait till MII management write is complete */ + while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) + cpu_relax(); + + spin_unlock_irq(&ugeth->lock); + + udelay(10000); +} + +/* Reads from register regnum in the PHY for device dev, */ +/* returning the value. Clears miimcom first. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +int read_phy_reg(struct net_device *dev, int mii_id, int regnum) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_mii_mng_t *mii_regs; + enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; + u32 tmp_reg; + u16 value; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + mii_regs = ugeth->mii_info->mii_regs; + + /* Setting up the MII Mangement Address Register */ + tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; + out_be32(&mii_regs->miimadd, tmp_reg); + + /* Perform an MII management read cycle */ + out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE); + + /* Wait till MII management write is complete */ + while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) + cpu_relax(); + + udelay(10000); + + /* Read MII management status */ + value = (u16) in_be32(&mii_regs->miimstat); + out_be32(&mii_regs->miimcom, 0); + if (value == 0xffff) + ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x", + mii_id, mii_reg, (u32) & (mii_regs->miimcfg)); + + spin_unlock_irq(&ugeth->lock); + + return (value); +} + +void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->phyinfo->ack_interrupt) + mii_info->phyinfo->ack_interrupt(mii_info); +} + +void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, + u32 interrupts) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + mii_info->interrupts = interrupts; + if (mii_info->phyinfo->config_intr) + mii_info->phyinfo->config_intr(mii_info); +} + +/* Writes MII_ADVERTISE with the appropriate values, after + * sanitizing advertise to make sure only supported features + * are advertised + */ +static void config_genmii_advert(struct ugeth_mii_info *mii_info) +{ + u32 advertise; + u16 adv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Only allow advertising what this PHY supports */ + mii_info->advertising &= mii_info->phyinfo->features; + advertise = mii_info->advertising; + + /* Setup standard advertisement */ + adv = phy_read(mii_info, MII_ADVERTISE); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + phy_write(mii_info, MII_ADVERTISE, adv); +} + +static void genmii_setup_forced(struct ugeth_mii_info *mii_info) +{ + u16 ctrl; + u32 features = mii_info->phyinfo->features; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + ctrl = phy_read(mii_info, MII_BMCR); + + ctrl &= + ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + ctrl |= BMCR_RESET; + + switch (mii_info->speed) { + case SPEED_1000: + if (features & (SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full)) { + ctrl |= BMCR_SPEED1000; + break; + } + mii_info->speed = SPEED_100; + case SPEED_100: + if (features & (SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full)) { + ctrl |= BMCR_SPEED100; + break; + } + mii_info->speed = SPEED_10; + case SPEED_10: + if (features & (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full)) + break; + default: /* Unsupported speed! */ + ugphy_err("%s: Bad speed!", mii_info->dev->name); + break; + } + + phy_write(mii_info, MII_BMCR, ctrl); +} + +/* Enable and Restart Autonegotiation */ +static void genmii_restart_aneg(struct ugeth_mii_info *mii_info) +{ + u16 ctl; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + ctl = phy_read(mii_info, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(mii_info, MII_BMCR, ctl); +} + +static int gbit_config_aneg(struct ugeth_mii_info *mii_info) +{ + u16 adv; + u32 advertise; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->autoneg) { + /* Configure the ADVERTISE register */ + config_genmii_advert(mii_info); + advertise = mii_info->advertising; + + adv = phy_read(mii_info, MII_1000BASETCONTROL); + adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | + MII_1000BASETCONTROL_HALFDUPLEXCAP); + if (advertise & SUPPORTED_1000baseT_Half) + adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; + if (advertise & SUPPORTED_1000baseT_Full) + adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; + phy_write(mii_info, MII_1000BASETCONTROL, adv); + + /* Start/Restart aneg */ + genmii_restart_aneg(mii_info); + } else + genmii_setup_forced(mii_info); + + return 0; +} + +static int genmii_config_aneg(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->autoneg) { + config_genmii_advert(mii_info); + genmii_restart_aneg(mii_info); + } else + genmii_setup_forced(mii_info); + + return 0; +} + +static int genmii_update_link(struct ugeth_mii_info *mii_info) +{ + u16 status; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Do a fake read */ + phy_read(mii_info, MII_BMSR); + + /* Read link and autonegotiation status */ + status = phy_read(mii_info, MII_BMSR); + if ((status & BMSR_LSTATUS) == 0) + mii_info->link = 0; + else + mii_info->link = 1; + + /* If we are autonegotiating, and not done, + * return an error */ + if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE)) + return -EAGAIN; + + return 0; +} + +static int genmii_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + if (mii_info->autoneg) { + status = phy_read(mii_info, MII_LPA); + + if (status & (LPA_10FULL | LPA_100FULL)) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + if (status & (LPA_100FULL | LPA_100HALF)) + mii_info->speed = SPEED_100; + else + mii_info->speed = SPEED_10; + mii_info->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +static int marvell_init(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_write(mii_info, 0x14, 0x0cd2); + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) | BMCR_RESET); + msleep(4000); + + return 0; +} + +static int marvell_config_aneg(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* The Marvell PHY has an errata which requires + * that certain registers get written in order + * to restart autonegotiation */ + phy_write(mii_info, MII_BMCR, BMCR_RESET); + + phy_write(mii_info, 0x1d, 0x1f); + phy_write(mii_info, 0x1e, 0x200c); + phy_write(mii_info, 0x1d, 0x5); + phy_write(mii_info, 0x1e, 0); + phy_write(mii_info, 0x1e, 0x100); + + gbit_config_aneg(mii_info); + + return 0; +} + +static int marvell_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + int speed; + status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS); + + /* Get the duplexity */ + if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + + /* Get the speed */ + speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK; + switch (speed) { + case MII_M1011_PHY_SPEC_STATUS_1000: + mii_info->speed = SPEED_1000; + break; + case MII_M1011_PHY_SPEC_STATUS_100: + mii_info->speed = SPEED_100; + break; + default: + mii_info->speed = SPEED_10; + break; + } + mii_info->pause = 0; + } + + return 0; +} + +static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupts by reading the reg */ + phy_read(mii_info, MII_M1011_IEVENT); + + return 0; +} + +static int marvell_config_intr(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT); + else + phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR); + + return 0; +} + +static int cis820x_init(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_write(mii_info, MII_CIS8201_AUX_CONSTAT, + MII_CIS8201_AUXCONSTAT_INIT); + phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT); + + return 0; +} + +static int cis820x_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + int speed; + + status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT); + if (status & MII_CIS8201_AUXCONSTAT_DUPLEX) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + + speed = status & MII_CIS8201_AUXCONSTAT_SPEED; + + switch (speed) { + case MII_CIS8201_AUXCONSTAT_GBIT: + mii_info->speed = SPEED_1000; + break; + case MII_CIS8201_AUXCONSTAT_100: + mii_info->speed = SPEED_100; + break; + default: + mii_info->speed = SPEED_10; + break; + } + } + + return 0; +} + +static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_read(mii_info, MII_CIS8201_ISTAT); + + return 0; +} + +static int cis820x_config_intr(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); + else + phy_write(mii_info, MII_CIS8201_IMASK, 0); + + return 0; +} + +#define DM9161_DELAY 10 + +static int dm9161_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + status = phy_read(mii_info, MII_DM9161_SCSR); + if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H)) + mii_info->speed = SPEED_100; + else + mii_info->speed = SPEED_10; + + if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F)) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + } + + return 0; +} + +static int dm9161_config_aneg(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv = mii_info->priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (0 == priv->resetdone) + return -EAGAIN; + + return 0; +} + +static void dm9161_timer(unsigned long data) +{ + struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; + struct dm9161_private *priv = mii_info->priv; + u16 status = phy_read(mii_info, MII_BMSR); + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (status & BMSR_ANEGCOMPLETE) { + priv->resetdone = 1; + } else + mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); +} + +static int dm9161_init(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Allocate the private data structure */ + priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL); + + if (NULL == priv) + return -ENOMEM; + + mii_info->priv = priv; + + /* Reset is not done yet */ + priv->resetdone = 0; + + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) | BMCR_RESET); + + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE); + + config_genmii_advert(mii_info); + /* Start/Restart aneg */ + genmii_config_aneg(mii_info); + + /* Start a timer for DM9161_DELAY seconds to wait + * for the PHY to be ready */ + init_timer(&priv->timer); + priv->timer.function = &dm9161_timer; + priv->timer.data = (unsigned long)mii_info; + mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); + + return 0; +} + +static void dm9161_close(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv = mii_info->priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + del_timer_sync(&priv->timer); + kfree(priv); +} + +static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info) +{ +/* FIXME: This lines are for BUG fixing in the mpc8325. +Remove this from here when it's fixed */ + if (bcsr_regs == NULL) + bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); + bcsr_regs[14] |= 0x40; + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupts by reading the reg */ + phy_read(mii_info, MII_DM9161_INTR); + + + return 0; +} + +static int dm9161_config_intr(struct ugeth_mii_info *mii_info) +{ +/* FIXME: This lines are for BUG fixing in the mpc8325. +Remove this from here when it's fixed */ + if (bcsr_regs == NULL) { + bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); + bcsr_regs[14] &= ~0x40; + } + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT); + else + phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP); + + return 0; +} + +/* Cicada 820x */ +static struct phy_info phy_info_cis820x = { + .phy_id = 0x000fc440, + .name = "Cicada Cis8204", + .phy_id_mask = 0x000fffc0, + .features = MII_GBIT_FEATURES, + .init = &cis820x_init, + .config_aneg = &gbit_config_aneg, + .read_status = &cis820x_read_status, + .ack_interrupt = &cis820x_ack_interrupt, + .config_intr = &cis820x_config_intr, +}; + +static struct phy_info phy_info_dm9161 = { + .phy_id = 0x0181b880, + .phy_id_mask = 0x0ffffff0, + .name = "Davicom DM9161E", + .init = dm9161_init, + .config_aneg = dm9161_config_aneg, + .read_status = dm9161_read_status, + .close = dm9161_close, +}; + +static struct phy_info phy_info_dm9161a = { + .phy_id = 0x0181b8a0, + .phy_id_mask = 0x0ffffff0, + .name = "Davicom DM9161A", + .features = MII_BASIC_FEATURES, + .init = dm9161_init, + .config_aneg = dm9161_config_aneg, + .read_status = dm9161_read_status, + .ack_interrupt = dm9161_ack_interrupt, + .config_intr = dm9161_config_intr, + .close = dm9161_close, +}; + +static struct phy_info phy_info_marvell = { + .phy_id = 0x01410c00, + .phy_id_mask = 0xffffff00, + .name = "Marvell 88E11x1", + .features = MII_GBIT_FEATURES, + .init = &marvell_init, + .config_aneg = &marvell_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, +}; + +static struct phy_info phy_info_genmii = { + .phy_id = 0x00000000, + .phy_id_mask = 0x00000000, + .name = "Generic MII", + .features = MII_BASIC_FEATURES, + .config_aneg = genmii_config_aneg, + .read_status = genmii_read_status, +}; + +static struct phy_info *phy_info[] = { + &phy_info_cis820x, + &phy_info_marvell, + &phy_info_dm9161, + &phy_info_dm9161a, + &phy_info_genmii, + NULL +}; + +u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum) +{ + u16 retval; + unsigned long flags; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irqsave(&mii_info->mdio_lock, flags); + retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum); + spin_unlock_irqrestore(&mii_info->mdio_lock, flags); + + return retval; +} + +void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val) +{ + unsigned long flags; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irqsave(&mii_info->mdio_lock, flags); + mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val); + spin_unlock_irqrestore(&mii_info->mdio_lock, flags); +} + +/* Use the PHY ID registers to determine what type of PHY is attached + * to device dev. return a struct phy_info structure describing that PHY + */ +struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info) +{ + u16 phy_reg; + u32 phy_ID; + int i; + struct phy_info *theInfo = NULL; + struct net_device *dev = mii_info->dev; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Grab the bits from PHYIR1, and put them in the upper half */ + phy_reg = phy_read(mii_info, MII_PHYSID1); + phy_ID = (phy_reg & 0xffff) << 16; + + /* Grab the bits from PHYIR2, and put them in the lower half */ + phy_reg = phy_read(mii_info, MII_PHYSID2); + phy_ID |= (phy_reg & 0xffff); + + /* loop through all the known PHY types, and find one that */ + /* matches the ID we read from the PHY. */ + for (i = 0; phy_info[i]; i++) + if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){ + theInfo = phy_info[i]; + break; + } + + /* This shouldn't happen, as we have generic PHY support */ + if (theInfo == NULL) { + ugphy_info("%s: PHY id %x is not supported!", dev->name, + phy_ID); + return NULL; + } else { + ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name, + phy_ID); + } + + return theInfo; +} diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h new file mode 100644 index 000000000000..2f98b8f1bb0a --- /dev/null +++ b/drivers/net/ucc_geth_phy.h @@ -0,0 +1,217 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish + * + * Description: + * UCC GETH Driver -- PHY handling + * + * Changelog: + * Jun 28, 2006 Li Yang + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __UCC_GETH_PHY_H__ +#define __UCC_GETH_PHY_H__ + +#define MII_end ((u32)-2) +#define MII_read ((u32)-1) + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 + +#define UGETH_AN_TIMEOUT 2000 + +/* 1000BT control (Marvell & BCM54xx at least) */ +#define MII_1000BASETCONTROL 0x09 +#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 +#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 + +/* Cicada Extended Control Register 1 */ +#define MII_CIS8201_EXT_CON1 0x17 +#define MII_CIS8201_EXTCON1_INIT 0x0000 + +/* Cicada Interrupt Mask Register */ +#define MII_CIS8201_IMASK 0x19 +#define MII_CIS8201_IMASK_IEN 0x8000 +#define MII_CIS8201_IMASK_SPEED 0x4000 +#define MII_CIS8201_IMASK_LINK 0x2000 +#define MII_CIS8201_IMASK_DUPLEX 0x1000 +#define MII_CIS8201_IMASK_MASK 0xf000 + +/* Cicada Interrupt Status Register */ +#define MII_CIS8201_ISTAT 0x1a +#define MII_CIS8201_ISTAT_STATUS 0x8000 +#define MII_CIS8201_ISTAT_SPEED 0x4000 +#define MII_CIS8201_ISTAT_LINK 0x2000 +#define MII_CIS8201_ISTAT_DUPLEX 0x1000 + +/* Cicada Auxiliary Control/Status Register */ +#define MII_CIS8201_AUX_CONSTAT 0x1c +#define MII_CIS8201_AUXCONSTAT_INIT 0x0004 +#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 +#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 +#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 +#define MII_CIS8201_AUXCONSTAT_100 0x0008 + +/* 88E1011 PHY Status Register */ +#define MII_M1011_PHY_SPEC_STATUS 0x11 +#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 +#define MII_M1011_PHY_SPEC_STATUS_100 0x4000 +#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 +#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 +#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 +#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400 + +#define MII_M1011_IEVENT 0x13 +#define MII_M1011_IEVENT_CLEAR 0x0000 + +#define MII_M1011_IMASK 0x12 +#define MII_M1011_IMASK_INIT 0x6400 +#define MII_M1011_IMASK_CLEAR 0x0000 + +#define MII_DM9161_SCR 0x10 +#define MII_DM9161_SCR_INIT 0x0610 + +/* DM9161 Specified Configuration and Status Register */ +#define MII_DM9161_SCSR 0x11 +#define MII_DM9161_SCSR_100F 0x8000 +#define MII_DM9161_SCSR_100H 0x4000 +#define MII_DM9161_SCSR_10F 0x2000 +#define MII_DM9161_SCSR_10H 0x1000 + +/* DM9161 Interrupt Register */ +#define MII_DM9161_INTR 0x15 +#define MII_DM9161_INTR_PEND 0x8000 +#define MII_DM9161_INTR_DPLX_MASK 0x0800 +#define MII_DM9161_INTR_SPD_MASK 0x0400 +#define MII_DM9161_INTR_LINK_MASK 0x0200 +#define MII_DM9161_INTR_MASK 0x0100 +#define MII_DM9161_INTR_DPLX_CHANGE 0x0010 +#define MII_DM9161_INTR_SPD_CHANGE 0x0008 +#define MII_DM9161_INTR_LINK_CHANGE 0x0004 +#define MII_DM9161_INTR_INIT 0x0000 +#define MII_DM9161_INTR_STOP \ +(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ + | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) + +/* DM9161 10BT Configuration/Status */ +#define MII_DM9161_10BTCSR 0x12 +#define MII_DM9161_10BTCSR_INIT 0x7800 + +#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP | \ + SUPPORTED_MII) + +#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full) + +#define MII_READ_COMMAND 0x00000001 + +#define MII_INTERRUPT_DISABLED 0x0 +#define MII_INTERRUPT_ENABLED 0x1 +/* Taken from mii_if_info and sungem_phy.h */ +struct ugeth_mii_info { + /* Information about the PHY type */ + /* And management functions */ + struct phy_info *phyinfo; + + ucc_mii_mng_t *mii_regs; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + + /* The most recently read link state */ + int link; + + /* Enabled Interrupts */ + u32 interrupts; + + u32 advertising; + int autoneg; + int mii_id; + + /* private data pointer */ + /* For use by PHYs to maintain extra state */ + void *priv; + + /* Provided by host chip */ + struct net_device *dev; + + /* A lock to ensure that only one thing can read/write + * the MDIO bus at a time */ + spinlock_t mdio_lock; + + /* Provided by ethernet driver */ + int (*mdio_read) (struct net_device * dev, int mii_id, int reg); + void (*mdio_write) (struct net_device * dev, int mii_id, int reg, + int val); +}; + +/* struct phy_info: a structure which defines attributes for a PHY + * + * id will contain a number which represents the PHY. During + * startup, the driver will poll the PHY to find out what its + * UID--as defined by registers 2 and 3--is. The 32-bit result + * gotten from the PHY will be ANDed with phy_id_mask to + * discard any bits which may change based on revision numbers + * unimportant to functionality + * + * There are 6 commands which take a ugeth_mii_info structure. + * Each PHY must declare config_aneg, and read_status. + */ +struct phy_info { + u32 phy_id; + char *name; + unsigned int phy_id_mask; + u32 features; + + /* Called to initialize the PHY */ + int (*init) (struct ugeth_mii_info * mii_info); + + /* Called to suspend the PHY for power */ + int (*suspend) (struct ugeth_mii_info * mii_info); + + /* Reconfigures autonegotiation (or disables it) */ + int (*config_aneg) (struct ugeth_mii_info * mii_info); + + /* Determines the negotiated speed and duplex */ + int (*read_status) (struct ugeth_mii_info * mii_info); + + /* Clears any pending interrupts */ + int (*ack_interrupt) (struct ugeth_mii_info * mii_info); + + /* Enables or disables interrupts */ + int (*config_intr) (struct ugeth_mii_info * mii_info); + + /* Clears up any memory if needed */ + void (*close) (struct ugeth_mii_info * mii_info); +}; + +struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info); +void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value); +int read_phy_reg(struct net_device *dev, int mii_id, int regnum); +void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info); +void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, + u32 interrupts); + +struct dm9161_private { + struct timer_list timer; + int resetdone; +}; + +#endif /* __UCC_GETH_PHY_H__ */ diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index d3d0ec970318..ae971080e2e4 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -30,8 +30,8 @@ */ #define DRV_NAME "via-rhine" -#define DRV_VERSION "1.4.0" -#define DRV_RELDATE "June-27-2006" +#define DRV_VERSION "1.4.1" +#define DRV_RELDATE "July-24-2006" /* A few user-configurable values. @@ -44,6 +44,10 @@ static int max_interrupt_work = 20; Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; +/* Work-around for broken BIOSes: they are unable to get the chip back out of + power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ +static int avoid_D3; + /* * In case you are looking for 'options[]' or 'full_duplex[]', they * are gone. Use ethtool(8) instead. @@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32; There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ +#ifdef CONFIG_VIA_RHINE_NAPI +#define RX_RING_SIZE 64 +#else #define RX_RING_SIZE 16 +#endif /* Operational parameters that usually are not changed. */ @@ -116,9 +124,11 @@ MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); +module_param(avoid_D3, bool, 0); MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); /* Theory of Operation @@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev); static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static void rhine_tx(struct net_device *dev); -static void rhine_rx(struct net_device *dev); +static int rhine_rx(struct net_device *dev, int limit); static void rhine_error(struct net_device *dev, int intr_status); static void rhine_set_rx_mode(struct net_device *dev); static struct net_device_stats *rhine_get_stats(struct net_device *dev); @@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev) } #endif +#ifdef CONFIG_VIA_RHINE_NAPI +static int rhine_napipoll(struct net_device *dev, int *budget) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int done, limit = min(dev->quota, *budget); + + done = rhine_rx(dev, limit); + *budget -= done; + dev->quota -= done; + + if (done < limit) { + netif_rx_complete(dev); + + iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | + IntrRxDropped | IntrRxNoBuf | IntrTxAborted | + IntrTxDone | IntrTxError | IntrTxUnderrun | + IntrPCIErr | IntrStatsMax | IntrLinkChange, + ioaddr + IntrEnable); + return 0; + } + else + return 1; +} +#endif + static void rhine_hw_init(struct net_device *dev, long pioaddr) { struct rhine_private *rp = netdev_priv(dev); @@ -743,6 +779,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll; +#endif +#ifdef CONFIG_VIA_RHINE_NAPI + dev->poll = rhine_napipoll; + dev->weight = 64; #endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; @@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, } } rp->mii_if.phy_id = phy_id; + if (debug > 1 && avoid_D3) + printk(KERN_INFO "%s: No D3 power state at shutdown.\n", + dev->name); return 0; @@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev) rhine_set_rx_mode(dev); + netif_poll_enable(dev); + /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | @@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs * dev->name, intr_status); if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | - IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) - rhine_rx(dev); + IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { +#ifdef CONFIG_VIA_RHINE_NAPI + iowrite16(IntrTxAborted | + IntrTxDone | IntrTxError | IntrTxUnderrun | + IntrPCIErr | IntrStatsMax | IntrLinkChange, + ioaddr + IntrEnable); + + netif_rx_schedule(dev); +#else + rhine_rx(dev, RX_RING_SIZE); +#endif + } if (intr_status & (IntrTxErrSummary | IntrTxDone)) { if (intr_status & IntrTxErrSummary) { @@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev) spin_unlock(&rp->lock); } -/* This routine is logically part of the interrupt handler, but isolated - for clarity and better register allocation. */ -static void rhine_rx(struct net_device *dev) +/* Process up to limit frames from receive ring */ +static int rhine_rx(struct net_device *dev, int limit) { struct rhine_private *rp = netdev_priv(dev); + int count; int entry = rp->cur_rx % RX_RING_SIZE; - int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx; if (debug > 4) { printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", @@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev) } /* If EOP is set on the next entry, it's a new packet. Send it up. */ - while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { + for (count = 0; count < limit; ++count) { struct rx_desc *desc = rp->rx_head_desc; u32 desc_status = le32_to_cpu(desc->rx_status); int data_size = desc_status >> 16; + if (desc_status & DescOwn) + break; + if (debug > 4) printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", desc_status); - if (--boguscnt < 0) - break; + if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { if ((desc_status & RxWholePkt) != RxWholePkt) { printk(KERN_WARNING "%s: Oversized Ethernet " @@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev) PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); +#ifdef CONFIG_VIA_RHINE_NAPI + netif_receive_skb(skb); +#else netif_rx(skb); +#endif dev->last_rx = jiffies; rp->stats.rx_bytes += pkt_len; rp->stats.rx_packets++; @@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev) } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); } + + return count; } /* @@ -1776,6 +1838,7 @@ static int rhine_close(struct net_device *dev) spin_lock_irq(&rp->lock); netif_stop_queue(dev); + netif_poll_disable(dev); if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " @@ -1857,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev) } /* Hit power state D3 (sleep) */ - iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); + if (!avoid_D3) + iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); /* TODO: Check use of pci_enable_wake() */ diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index f5b0078eb4ad..aa9cd92f46b2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs) if (PHYSR0 & PHYSR0_SPDG) status |= VELOCITY_SPEED_1000; - if (PHYSR0 & PHYSR0_SPD10) + else if (PHYSR0 & PHYSR0_SPD10) status |= VELOCITY_SPEED_10; else status |= VELOCITY_SPEED_100; @@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd u32 status; status = check_connection_type(vptr->mac_regs); - cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; - if (status & VELOCITY_SPEED_100) + cmd->supported = SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + if (status & VELOCITY_SPEED_1000) + cmd->speed = SPEED_1000; + else if (status & VELOCITY_SPEED_100) cmd->speed = SPEED_100; else cmd->speed = SPEED_10; @@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); struct mac_regs __iomem * regs = vptr->mac_regs; - return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; } static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 435e91ec4620..6b63b350cd52 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page) static inline void set_carrier(port_t *port) { - if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) + if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) netif_carrier_on(port_to_dev(port)); else netif_carrier_off(port_to_dev(port)); @@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port) static void sca_msci_intr(port_t *port) { - u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ + u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ - /* Reset MSCI TX underrun status bit */ - sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); + /* Reset MSCI TX underrun and CDCD (ignored) status bit */ + sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); if (stat & ST1_UDRN) { struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); @@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port) stats->tx_fifo_errors++; } + stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 7caa8dc88a58..b1ba1872f315 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c @@ -500,8 +500,8 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int -init_module(void) + +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index fa9d2c4edc93..2e8ac995d56f 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -447,6 +447,7 @@ config AIRO_CS tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) select CRYPTO + select CRYPTO_AES ---help--- This is the standard Linux driver to support Cisco/Aironet PCMCIA 802.11 wireless cards. This driver is the same as the Aironet diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 3889f79e7128..df317c1e12a8 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, } if (sec->flags & SEC_AUTH_MODE) { secinfo->auth_mode = sec->auth_mode; - dprintk(", .auth_mode = %d\n", sec->auth_mode); + dprintk(", .auth_mode = %d", sec->auth_mode); } dprintk("\n"); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index dafaa5ff5aa6..d500012fdc7a 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev) dev->name, local->fragm_threshold); } + /* Some firmwares lose antenna selection settings on reset */ + (void) hostap_set_antsel(local); + return res; } diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index d6ed5781b93a..317ace7f9aae 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, if (orinoco_lock(priv, &flags) != 0) return -EBUSY; - if (erq->pointer) { + if (erq->length > 0) { if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; @@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, if (erq->flags & IW_ENCODE_RESTRICTED) restricted = 1; - if (erq->pointer) { + if (erq->pointer && erq->length > 0) { priv->keys[index].len = cpu_to_le16(xlen); memset(priv->keys[index].data, 0, sizeof(priv->keys[index].data)); diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index 7f78b7801fb3..bcc7038130f6 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c @@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle) u_int save_cor; /* Doing it if hardware is gone is guaranteed crash */ - if (pcmcia_dev_present(link)) + if (!pcmcia_dev_present(link)) return -ENODEV; /* Save original COR value */ diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 662ecc8a33ff..c52e9bcf8d02 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface, zd->dev->name); usb_set_intfdata(interface, zd); + zd1201_enable(zd); /* zd1201 likes to startup enabled, */ + zd1201_disable(zd); /* interfering with all the wifis in range */ return 0; err_net: diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index efc9c4bd826f..da9d06bdb818 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -797,7 +797,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip) { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, { CR_ZD1211_RETRY_MAX, 0x2 }, { CR_SNIFFER_ON, 0 }, - { CR_RX_FILTER, AP_RX_FILTER }, + { CR_RX_FILTER, STA_RX_FILTER }, { CR_GROUP_HASH_P1, 0x00 }, { CR_GROUP_HASH_P2, 0x80000000 }, { CR_REG1, 0xa4 }, @@ -844,7 +844,7 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip) { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, { CR_ZD1211B_TXOP, 0x01800824 }, { CR_SNIFFER_ON, 0 }, - { CR_RX_FILTER, AP_RX_FILTER }, + { CR_RX_FILTER, STA_RX_FILTER }, { CR_GROUP_HASH_P1, 0x00 }, { CR_GROUP_HASH_P2, 0x80000000 }, { CR_REG1, 0xa4 }, diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index 805121093ab5..069d2b467339 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -461,10 +461,15 @@ #define CR_RX_FILTER CTL_REG(0x068c) #define RX_FILTER_ASSOC_RESPONSE 0x0002 +#define RX_FILTER_REASSOC_RESPONSE 0x0008 #define RX_FILTER_PROBE_RESPONSE 0x0020 #define RX_FILTER_BEACON 0x0100 +#define RX_FILTER_DISASSOC 0x0400 #define RX_FILTER_AUTH 0x0800 -/* Sniff modus sets filter to 0xfffff */ +#define AP_RX_FILTER 0x0400feff +#define STA_RX_FILTER 0x0000ffff + +/* Monitor mode sets filter to 0xfffff */ #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) @@ -546,9 +551,6 @@ #define CR_ZD1211B_TXOP CTL_REG(0x0b20) #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) -#define AP_RX_FILTER 0x0400feff -#define STA_RX_FILTER 0x0000ffff - #define CWIN_SIZE 0x007f043f diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 3bdc54d128d0..d6f3e02a0b54 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -108,7 +108,9 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) if (r) goto disable_int; - r = zd_set_encryption_type(chip, NO_WEP); + /* We must inform the device that we are doing encryption/decryption in + * software at the moment. */ + r = zd_set_encryption_type(chip, ENC_SNIFFER); if (r) goto disable_int; @@ -136,10 +138,8 @@ static int reset_mode(struct zd_mac *mac) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); struct zd_ioreq32 ioreqs[3] = { - { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| - RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE }, + { CR_RX_FILTER, STA_RX_FILTER }, { CR_SNIFFER_ON, 0U }, - { CR_ENCRYPTION_TYPE, NO_WEP }, }; if (ieee->iw_mode == IW_MODE_MONITOR) { @@ -713,10 +713,10 @@ static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri) struct zd_rt_hdr { struct ieee80211_radiotap_header rt_hdr; u8 rt_flags; + u8 rt_rate; u16 rt_channel; u16 rt_chbitmask; - u16 rt_rate; -}; +} __attribute__((packed)); static void fill_rt_header(void *buffer, struct zd_mac *mac, const struct ieee80211_rx_stats *stats, @@ -735,14 +735,14 @@ static void fill_rt_header(void *buffer, struct zd_mac *mac, if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; + hdr->rt_rate = stats->rate / 5; + /* FIXME: 802.11a */ hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( _zd_chip_get_channel(&mac->chip))); hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); - - hdr->rt_rate = stats->rate / 5; } /* Returns 1 if the data packet is for us and 0 otherwise. */ diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 72f90525bf68..6320984126c7 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -323,7 +323,6 @@ static void disable_read_regs_int(struct zd_usb *usb) { struct zd_usb_interrupt *intr = &usb->intr; - ZD_ASSERT(in_interrupt()); spin_lock(&intr->lock); intr->read_regs_enabled = 0; spin_unlock(&intr->lock); @@ -545,11 +544,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, * be padded. Unaligned access might also happen if the length_info * structure is not present. */ - if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { + if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) + { unsigned int l, k, n; for (i = 0, l = 0;; i++) { - k = le16_to_cpu(get_unaligned( - &length_info->length[i])); + k = le16_to_cpu(get_unaligned(&length_info->length[i])); n = l+k; if (n > length) return; diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 3fae77ffb2fa..8a60f391ffcf 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM config HOTPLUG_PCI_ACPI tristate "ACPI PCI Hotplug driver" - depends on ACPI_DOCK && HOTPLUG_PCI + depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI) help Say Y here if you have a system that supports PCI Hotplug using ACPI. @@ -153,13 +153,6 @@ config HOTPLUG_PCI_SHPC_POLL_EVENT_MODE When in doubt, say N. -config HOTPLUG_PCI_SHPC_PHPRM_LEGACY - bool "For AMD SHPC only: Use $HRT for resource/configuration" - depends on HOTPLUG_PCI_SHPC && !ACPI - help - Say Y here for AMD SHPC. You have to select this option if you are - using this driver on platform with AMD SHPC. - config HOTPLUG_PCI_RPA tristate "RPA PCI Hotplug driver" depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 34de5697983d..e2fef60c2d06 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -27,8 +27,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * Send feedback to , - * + * Send feedback to * */ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index ef95d12fb32c..ae67a8f55ba1 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -26,7 +26,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * - * Send feedback to + * Send feedback to * */ diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 02be74caa89f..4afcaffd031c 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot) int cpci_configure_slot(struct slot* slot) { - unsigned char busnr; - struct pci_bus *child; + struct pci_bus *parent; + int fn; dbg("%s - enter", __FUNCTION__); @@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot) */ n = pci_scan_slot(slot->bus, slot->devfn); dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n); - if (n > 0) - pci_bus_add_devices(slot->bus); slot->dev = pci_get_slot(slot->bus, slot->devfn); if (slot->dev == NULL) { err("Could not find PCI device for slot %02x", slot->number); - return 1; + return -ENODEV; } } + parent = slot->dev->bus; - if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr); - child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr); - pci_do_scan_bus(child); - pci_bus_size_bridges(child); + for (fn = 0; fn < 8; fn++) { + struct pci_dev *dev; + + dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn)); + if (!dev) + continue; + if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || + (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { + /* Find an unused bus number for the new bridge */ + struct pci_bus *child; + unsigned char busnr, start = parent->secondary; + unsigned char end = parent->subordinate; + + for (busnr = start; busnr <= end; busnr++) { + if (!pci_find_bus(pci_domain_nr(parent), + busnr)) + break; + } + if (busnr >= end) { + err("No free bus for hot-added bridge\n"); + pci_dev_put(dev); + continue; + } + child = pci_add_new_bus(parent, dev, busnr); + if (!child) { + err("Cannot add new bus for %s\n", + pci_name(dev)); + pci_dev_put(dev); + continue; + } + child->subordinate = pci_do_scan_bus(child); + pci_bus_size_bridges(child); + } + pci_dev_put(dev); } - pci_bus_assign_resources(slot->dev->bus); + pci_bus_assign_resources(parent); + pci_bus_add_devices(parent); + pci_enable_bridges(parent); dbg("%s - exit", __FUNCTION__); return 0; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index ce89f5815861..eaea9d36a1bb 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -279,6 +279,11 @@ struct hpc_ops { #ifdef CONFIG_ACPI +#include +#include +#include +#include + #define pciehp_get_hp_hw_control_from_firmware(dev) \ pciehp_acpi_get_hp_hw_control_from_firmware(dev) static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 0d8fb6e607a1..6ab3b6cd2b54 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -38,10 +38,6 @@ #include "../pci.h" #include "pciehp.h" -#include -#include -#include -#include #ifdef DEBUG #define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */ #define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */ diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 10e1a905c144..474e9cd0e9e4 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, /** * pci_match_device - Tell if a PCI device structure has a matching * PCI device id structure - * @ids: array of PCI device id structures to search in - * @dev: the PCI device structure to match against * @drv: the PCI driver to match against + * @dev: the PCI device structure to match against * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 50bfc1b2f3bf..478d0d28f7ad 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -30,23 +30,6 @@ MODULE_LICENSE("GPL"); /* global data */ static const char device_name[] = "pcieport-driver"; -static int pcie_portdrv_save_config(struct pci_dev *dev) -{ - return pci_save_state(dev); -} - -static int pcie_portdrv_restore_config(struct pci_dev *dev) -{ - int retval; - - pci_restore_state(dev); - retval = pci_enable_device(dev); - if (retval) - return retval; - pci_set_master(dev); - return 0; -} - /* * pcie_portdrv_probe - Probe PCI-Express port devices * @dev: PCI-Express port device being probed @@ -73,8 +56,10 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", __FUNCTION__, dev->device, dev->vendor); } - if (pcie_port_device_register(dev)) + if (pcie_port_device_register(dev)) { + pci_disable_device(dev); return -ENOMEM; + } return 0; } @@ -86,6 +71,23 @@ static void pcie_portdrv_remove (struct pci_dev *dev) } #ifdef CONFIG_PM +static int pcie_portdrv_save_config(struct pci_dev *dev) +{ + return pci_save_state(dev); +} + +static int pcie_portdrv_restore_config(struct pci_dev *dev) +{ + int retval; + + pci_restore_state(dev); + retval = pci_enable_device(dev); + if (retval) + return retval; + pci_set_master(dev); + return 0; +} + static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) { int ret = pcie_port_device_suspend(dev, state); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e3c78c39b7e4..73177429fe74 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) pci_read_config_dword(dev, 0x48, ®ion); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); /* @@ -990,6 +991,11 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) case 0x8070: /* P4G8X Deluxe */ asus_hides_smbus = 1; } + if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) + switch (dev->subsystem_device) { + case 0x80c9: /* PU-DLS */ + asus_hides_smbus = 1; + } if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) switch (dev->subsystem_device) { case 0x1751: /* M2N notebook */ @@ -1058,6 +1064,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asu DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); @@ -1081,10 +1088,10 @@ static void __init asus_hides_smbus_lpc(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc ); static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) { @@ -1511,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); +static void __devinit quirk_e100_interrupt(struct pci_dev *dev) +{ + u16 command; + u32 bar; + u8 __iomem *csr; + u8 cmd_hi; + + switch (dev->device) { + /* PCI IDs taken from drivers/net/e100.c */ + case 0x1029: + case 0x1030 ... 0x1034: + case 0x1038 ... 0x103E: + case 0x1050 ... 0x1057: + case 0x1059: + case 0x1064 ... 0x106B: + case 0x1091 ... 0x1095: + case 0x1209: + case 0x1229: + case 0x2449: + case 0x2459: + case 0x245D: + case 0x27DC: + break; + default: + return; + } + + /* + * Some firmware hands off the e100 with interrupts enabled, + * which can cause a flood of interrupts if packets are + * received before the driver attaches to the device. So + * disable all e100 interrupts here. The driver will + * re-enable them when it's ready. + */ + pci_read_config_word(dev, PCI_COMMAND, &command); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); + + if (!(command & PCI_COMMAND_MEMORY) || !bar) + return; + + csr = ioremap(bar, 8); + if (!csr) { + printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", + pci_name(dev)); + return; + } + + cmd_hi = readb(csr + 3); + if (cmd_hi == 0) { + printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts " + "enabled, disabling\n", pci_name(dev)); + writeb(1, csr + 3); + } + + iounmap(csr); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); static void __devinit fixup_rev1_53c810(struct pci_dev* dev) { diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 622b3f8ba820..d529462d1b53 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -41,7 +41,7 @@ pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) * in the global list of PCI buses. If the bus is found, a pointer to its * data structure is returned. If no bus is found, %NULL is returned. */ -struct pci_bus * __devinit pci_find_bus(int domain, int busnr) +struct pci_bus * pci_find_bus(int domain, int busnr) { struct pci_bus *bus = NULL; struct pci_bus *tmp_bus; @@ -61,7 +61,7 @@ struct pci_bus * __devinit pci_find_bus(int domain, int busnr) * @from: Previous PCI bus found, or %NULL for new search. * * Iterates through the list of known PCI busses. A new search is - * initiated by passing %NULL to the @from argument. Otherwise if + * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ @@ -148,13 +148,14 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is - * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its - * device structure is returned. Otherwise, %NULL is returned. - * A new search is initiated by passing %NULL to the @from argument. - * Otherwise if @from is not %NULL, searches continue from next device on the global list. + * found with a matching @vendor, @device, @ss_vendor and @ss_device, a + * pointer to its device structure is returned. Otherwise, %NULL is returned. + * A new search is initiated by passing %NULL as the @from argument. + * Otherwise if @from is not %NULL, searches continue from next device + * on the global list. * - * NOTE: Do not use this function anymore, use pci_get_subsys() instead, as - * the pci device returned by this function can disappear at any moment in + * NOTE: Do not use this function any more; use pci_get_subsys() instead, as + * the PCI device returned by this function can disappear at any moment in * time. */ static struct pci_dev * pci_find_subsys(unsigned int vendor, @@ -191,14 +192,15 @@ exit: * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * - * Iterates through the list of known PCI devices. If a PCI device is - * found with a matching @vendor and @device, a pointer to its device structure is + * Iterates through the list of known PCI devices. If a PCI device is found + * with a matching @vendor and @device, a pointer to its device structure is * returned. Otherwise, %NULL is returned. - * A new search is initiated by passing %NULL to the @from argument. - * Otherwise if @from is not %NULL, searches continue from next device on the global list. + * A new search is initiated by passing %NULL as the @from argument. + * Otherwise if @from is not %NULL, searches continue from next device + * on the global list. * - * NOTE: Do not use this function anymore, use pci_get_device() instead, as - * the pci device returned by this function can disappear at any moment in + * NOTE: Do not use this function any more; use pci_get_device() instead, as + * the PCI device returned by this function can disappear at any moment in * time. */ struct pci_dev * @@ -215,11 +217,11 @@ pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev * * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * - * Iterates through the list of known PCI devices. If a PCI device is - * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its + * Iterates through the list of known PCI devices. If a PCI device is found + * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its * device structure is returned, and the reference count to the device is * incremented. Otherwise, %NULL is returned. A new search is initiated by - * passing %NULL to the @from argument. Otherwise if @from is not %NULL, + * passing %NULL as the @from argument. Otherwise if @from is not %NULL, * searches continue from next device on the global list. * The reference count for @from is always decremented if it is not %NULL. */ @@ -262,7 +264,7 @@ exit: * found with a matching @vendor and @device, the reference count to the * device is incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. A new search is initiated by passing %NULL - * to the @from argument. Otherwise if @from is not %NULL, searches continue + * as the @from argument. Otherwise if @from is not %NULL, searches continue * from next device on the global list. The reference count for @from is * always decremented if it is not %NULL. */ @@ -279,11 +281,13 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * - * Iterates through the list of known PCI devices in the reverse order of pci_find_device(). + * Iterates through the list of known PCI devices in the reverse order of + * pci_find_device(). * If a PCI device is found with a matching @vendor and @device, a pointer to * its device structure is returned. Otherwise, %NULL is returned. - * A new search is initiated by passing %NULL to the @from argument. - * Otherwise if @from is not %NULL, searches continue from previous device on the global list. + * A new search is initiated by passing %NULL as the @from argument. + * Otherwise if @from is not %NULL, searches continue from previous device + * on the global list. */ struct pci_dev * pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from) @@ -317,7 +321,7 @@ exit: * found with a matching @class, the reference count to the device is * incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. - * A new search is initiated by passing %NULL to the @from argument. + * A new search is initiated by passing %NULL as the @from argument. * Otherwise if @from is not %NULL, searches continue from next device * on the global list. The reference count for @from is always decremented * if it is not %NULL. diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index 738b1ef595a3..9ad18e62658d 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c @@ -601,12 +601,8 @@ static int ds_ioctl(struct inode * inode, struct file * file, ret = CS_BAD_ARGS; else { struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); - if (p_dev == NULL) - ret = CS_BAD_ARGS; - else { - ret = pccard_get_configuration_info(s, p_dev, &buf->config); - pcmcia_put_dev(p_dev); - } + ret = pccard_get_configuration_info(s, p_dev, &buf->config); + pcmcia_put_dev(p_dev); } break; case DS_GET_FIRST_TUPLE: @@ -636,12 +632,8 @@ static int ds_ioctl(struct inode * inode, struct file * file, ret = CS_BAD_ARGS; else { struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); - if (p_dev == NULL) - ret = CS_BAD_ARGS; - else { - ret = pccard_get_status(s, p_dev, &buf->status); - pcmcia_put_dev(p_dev); - } + ret = pccard_get_status(s, p_dev, &buf->status); + pcmcia_put_dev(p_dev); } break; case DS_VALIDATE_CIS: diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 7bf25b88ea31..c8323399e9e4 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -245,10 +245,17 @@ int pccard_get_configuration_info(struct pcmcia_socket *s, return CS_SUCCESS; } - /* !!! This is a hack !!! */ - memcpy(&config->Attributes, &c->Attributes, sizeof(config_t)); - config->Attributes |= CONF_VALID_CLIENT; - config->CardValues = c->CardValues; + config->Attributes = c->Attributes | CONF_VALID_CLIENT; + config->Vcc = s->socket.Vcc; + config->Vpp1 = config->Vpp2 = s->socket.Vpp; + config->IntType = c->IntType; + config->ConfigBase = c->ConfigBase; + config->Status = c->Status; + config->Pin = c->Pin; + config->Copy = c->Copy; + config->Option = c->Option; + config->ExtStatus = c->ExtStatus; + config->Present = config->CardValues = c->CardValues; config->IRQAttributes = c->irq.Attributes; config->AssignedIRQ = s->irq.AssignedIRQ; config->BasePort1 = c->io.BasePort1; diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c index 3163e3d73da1..9d8b415eca79 100644 --- a/drivers/pnp/interface.c +++ b/drivers/pnp/interface.c @@ -265,8 +265,8 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at pnp_printf(buffer," disabled\n"); else pnp_printf(buffer," 0x%llx-0x%llx\n", - pnp_port_start(dev, i), - pnp_port_end(dev, i)); + (unsigned long long)pnp_port_start(dev, i), + (unsigned long long)pnp_port_end(dev, i)); } } for (i = 0; i < PNP_MAX_MEM; i++) { @@ -276,8 +276,8 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at pnp_printf(buffer," disabled\n"); else pnp_printf(buffer," 0x%llx-0x%llx\n", - pnp_mem_start(dev, i), - pnp_mem_end(dev, i)); + (unsigned long long)pnp_mem_start(dev, i), + (unsigned long long)pnp_mem_end(dev, i)); } } for (i = 0; i < PNP_MAX_IRQ; i++) { @@ -287,7 +287,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at pnp_printf(buffer," disabled\n"); else pnp_printf(buffer," %lld\n", - pnp_irq(dev, i)); + (unsigned long long)pnp_irq(dev, i)); } } for (i = 0; i < PNP_MAX_DMA; i++) { @@ -297,7 +297,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at pnp_printf(buffer," disabled\n"); else pnp_printf(buffer," %lld\n", - pnp_dma(dev, i)); + (unsigned long long)pnp_dma(dev, i)); } } ret = (buffer->curr - buf); diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 212268881857..dc79b0a0059f 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -173,6 +173,9 @@ pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table, return; } + if (p->producer_consumer == ACPI_PRODUCER) + return; + if (p->resource_type == ACPI_MEMORY_RANGE) pnpacpi_parse_allocated_memresource(res_table, p->minimum, p->address_length); @@ -252,9 +255,14 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, break; case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: + if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER) + return AE_OK; break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: + if (res->data.extended_irq.producer_consumer == ACPI_PRODUCER) + return AE_OK; + for (i = 0; i < res->data.extended_irq.interrupt_count; i++) { pnpacpi_parse_allocated_irqresource(res_table, res->data.extended_irq.interrupts[i], diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index b154b3f52cbe..551f58e29810 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -346,7 +346,7 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node) dev->flags = node->flags; if (!(dev->flags & PNPBIOS_NO_CONFIG)) dev->capabilities |= PNP_CONFIGURABLE; - if (!(dev->flags & PNPBIOS_NO_DISABLE)) + if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_DISABLE; dev->capabilities |= PNP_READ; if (pnpbios_is_dynamic(dev)) diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index d6d1bff52b8e..2c7de79c83b9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to) pr_debug("%s: aie=%d\n", __FUNCTION__, to); - tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; + tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; if (to) tmp |= S3C2410_RTCALM_ALMEN; - writeb(tmp, S3C2410_RTCALM); + writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); } static void s3c_rtc_setpie(int to) @@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to) pr_debug("%s: pie=%d\n", __FUNCTION__, to); spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; + tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; if (to) tmp |= S3C2410_TICNT_ENABLE; - writeb(tmp, S3C2410_TICNT); + writeb(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); } @@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq) unsigned int tmp; spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE; + tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE; s3c_rtc_freq = freq; tmp |= (128 / freq)-1; - writeb(tmp, S3C2410_TICNT); + writeb(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); } @@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq) static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { unsigned int have_retried = 0; + void __iomem *base = s3c_rtc_base; retry_get_time: - rtc_tm->tm_min = readb(S3C2410_RTCMIN); - rtc_tm->tm_hour = readb(S3C2410_RTCHOUR); - rtc_tm->tm_mday = readb(S3C2410_RTCDATE); - rtc_tm->tm_mon = readb(S3C2410_RTCMON); - rtc_tm->tm_year = readb(S3C2410_RTCYEAR); - rtc_tm->tm_sec = readb(S3C2410_RTCSEC); + rtc_tm->tm_min = readb(base + S3C2410_RTCMIN); + rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR); + rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE); + rtc_tm->tm_mon = readb(base + S3C2410_RTCMON); + rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR); + rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC); /* the only way to work out wether the system was mid-update * when we read it is to check the second counter, and if it @@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) { - /* the rtc gets round the y2k problem by just not supporting it */ + void __iomem *base = s3c_rtc_base; + int year = tm->tm_year - 100; - if (tm->tm_year < 100) + pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n", + tm->tm_year, tm->tm_mon, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); + + /* we get around y2k by simply not supporting it */ + + if (year < 0 || year >= 100) { + dev_err(dev, "rtc only supports 100 years\n"); return -EINVAL; + } - writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC); - writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN); - writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR); - writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE); - writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON); - writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR); + writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC); + writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN); + writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR); + writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE); + writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON); + writeb(BIN2BCD(year), base + S3C2410_RTCYEAR); return 0; } @@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *alm_tm = &alrm->time; + void __iomem *base = s3c_rtc_base; unsigned int alm_en; - alm_tm->tm_sec = readb(S3C2410_ALMSEC); - alm_tm->tm_min = readb(S3C2410_ALMMIN); - alm_tm->tm_hour = readb(S3C2410_ALMHOUR); - alm_tm->tm_mon = readb(S3C2410_ALMMON); - alm_tm->tm_mday = readb(S3C2410_ALMDATE); - alm_tm->tm_year = readb(S3C2410_ALMYEAR); + alm_tm->tm_sec = readb(base + S3C2410_ALMSEC); + alm_tm->tm_min = readb(base + S3C2410_ALMMIN); + alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR); + alm_tm->tm_mon = readb(base + S3C2410_ALMMON); + alm_tm->tm_mday = readb(base + S3C2410_ALMDATE); + alm_tm->tm_year = readb(base + S3C2410_ALMYEAR); - alm_en = readb(S3C2410_RTCALM); + alm_en = readb(base + S3C2410_RTCALM); pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", alm_en, @@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *tm = &alrm->time; + void __iomem *base = s3c_rtc_base; unsigned int alrm_en; pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", @@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); - alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; - writeb(0x00, S3C2410_RTCALM); + alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; + writeb(0x00, base + S3C2410_RTCALM); if (tm->tm_sec < 60 && tm->tm_sec >= 0) { alrm_en |= S3C2410_RTCALM_SECEN; - writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC); + writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC); } if (tm->tm_min < 60 && tm->tm_min >= 0) { alrm_en |= S3C2410_RTCALM_MINEN; - writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN); + writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN); } if (tm->tm_hour < 24 && tm->tm_hour >= 0) { alrm_en |= S3C2410_RTCALM_HOUREN; - writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR); + writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR); } pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en); - writeb(alrm_en, S3C2410_RTCALM); + writeb(alrm_en, base + S3C2410_RTCALM); if (0) { - alrm_en = readb(S3C2410_RTCALM); + alrm_en = readb(base + S3C2410_RTCALM); alrm_en &= ~S3C2410_RTCALM_ALMEN; - writeb(alrm_en, S3C2410_RTCALM); + writeb(alrm_en, base + S3C2410_RTCALM); disable_irq_wake(s3c_rtc_alarmno); } @@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev, static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned int rtcalm = readb(S3C2410_RTCALM); - unsigned int ticnt = readb (S3C2410_TICNT); + unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM); + unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); seq_printf(seq, "alarm_IRQ\t: %s\n", (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" ); @@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = { static void s3c_rtc_enable(struct platform_device *pdev, int en) { + void __iomem *base = s3c_rtc_base; unsigned int tmp; if (s3c_rtc_base == NULL) return; if (!en) { - tmp = readb(S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON); - tmp = readb(S3C2410_TICNT); - writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT); + tmp = readb(base + S3C2410_TICNT); + writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT); } else { /* re-enable the device, and check it is ok */ - if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ + if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); - tmp = readb(S3C2410_RTCCON); - writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON); } - if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ + if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n"); - tmp = readb(S3C2410_RTCCON); - writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON); } - if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ + if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ dev_info(&pdev->dev, "removing RTCCON_CLKRST\n"); - tmp = readb(S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON); } } } @@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev) } s3c_rtc_mem = request_mem_region(res->start, - res->end-res->start+1, - pdev->name); + res->end-res->start+1, + pdev->name); if (s3c_rtc_mem == NULL) { dev_err(&pdev->dev, "failed to reserve memory region\n"); @@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev) s3c_rtc_enable(pdev, 1); - pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON)); + pr_debug("s3c2410_rtc: RTCCON=%02x\n", + readb(s3c_rtc_base + S3C2410_RTCCON)); s3c_rtc_setfreq(s3c_rtc_freq); @@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) /* save TICNT for anyone using periodic interrupts */ - ticnt_save = readb(S3C2410_TICNT); + ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); /* calculate time delta for suspend */ @@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev) rtc_tm_to_time(&tm, &time.tv_sec); restore_time_delta(&s3c_rtc_delta, &time); - writeb(ticnt_save, S3C2410_TICNT); + writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); return 0; } #else diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 4bf03fb67f8d..d8e9b95f0a1a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1730,8 +1730,8 @@ dasd_flush_request_queue(struct dasd_device * device) req = elv_next_request(device->request_queue); if (req == NULL) break; - dasd_end_request(req, 0); blkdev_dequeue_request(req); + dasd_end_request(req, 0); } spin_unlock_irq(&device->request_queue_lock); } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 7f6fdac74706..9af02c79ce8a 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -48,18 +48,20 @@ struct dasd_devmap { }; /* - * dasd_servermap is used to store the server_id of all storage servers - * accessed by DASD device driver. + * dasd_server_ssid_map contains a globally unique storage server subsystem ID. + * dasd_server_ssid_list contains the list of all subsystem IDs accessed by + * the DASD device driver. */ -struct dasd_servermap { +struct dasd_server_ssid_map { struct list_head list; - struct server_id { + struct system_id { char vendor[4]; char serial[15]; + __u16 ssid; } sid; }; -static struct list_head dasd_serverlist; +static struct list_head dasd_server_ssid_list; /* * Parameter parsing functions for dasd= parameter. The syntax is: @@ -89,7 +91,7 @@ static char *dasd[256]; module_param_array(dasd, charp, NULL, 0); /* - * Single spinlock to protect devmap structures and lists. + * Single spinlock to protect devmap and servermap structures and lists. */ static DEFINE_SPINLOCK(dasd_devmap_lock); @@ -264,8 +266,9 @@ dasd_parse_keyword( char *parsestring ) { if (dasd_page_cache) return residual_str; dasd_page_cache = - kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0, - SLAB_CACHE_DMA, NULL, NULL ); + kmem_cache_create("dasd_page_cache", PAGE_SIZE, + PAGE_SIZE, SLAB_CACHE_DMA, + NULL, NULL ); if (!dasd_page_cache) MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " "fixed buffer mode disabled."); @@ -858,39 +861,6 @@ static struct attribute_group dasd_attr_group = { .attrs = dasd_attrs, }; -/* - * Check if the related storage server is already contained in the - * dasd_serverlist. If server is not contained, create new entry. - * Return 0 if server was already in serverlist, - * 1 if the server was added successfully - * <0 in case of error. - */ -static int -dasd_add_server(struct dasd_uid *uid) -{ - struct dasd_servermap *new, *tmp; - - /* check if server is already contained */ - list_for_each_entry(tmp, &dasd_serverlist, list) - // normale cmp? - if (strncmp(tmp->sid.vendor, uid->vendor, - sizeof(tmp->sid.vendor)) == 0 - && strncmp(tmp->sid.serial, uid->serial, - sizeof(tmp->sid.serial)) == 0) - return 0; - - new = (struct dasd_servermap *) - kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL); - if (!new) - return -ENOMEM; - - strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor)); - strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial)); - list_add(&new->list, &dasd_serverlist); - return 1; -} - - /* * Return copy of the device unique identifier. */ @@ -910,6 +880,9 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid) /* * Register the given device unique identifier into devmap struct. + * In addition check if the related storage server subsystem ID is already + * contained in the dasd_server_ssid_list. If subsystem ID is not contained, + * create new entry. * Return 0 if server was already in serverlist, * 1 if the server was added successful * <0 in case of error. @@ -918,16 +891,39 @@ int dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) { struct dasd_devmap *devmap; - int rc; + struct dasd_server_ssid_map *srv, *tmp; devmap = dasd_find_busid(cdev->dev.bus_id); if (IS_ERR(devmap)) return PTR_ERR(devmap); + + /* generate entry for server_ssid_map */ + srv = (struct dasd_server_ssid_map *) + kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL); + if (!srv) + return -ENOMEM; + strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); + strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); + srv->sid.ssid = uid->ssid; + + /* server is already contained ? */ spin_lock(&dasd_devmap_lock); devmap->uid = *uid; - rc = dasd_add_server(uid); + list_for_each_entry(tmp, &dasd_server_ssid_list, list) { + if (!memcmp(&srv->sid, &tmp->sid, + sizeof(struct system_id))) { + kfree(srv); + srv = NULL; + break; + } + } + + /* add servermap to serverlist */ + if (srv) + list_add(&srv->list, &dasd_server_ssid_list); spin_unlock(&dasd_devmap_lock); - return rc; + + return (srv ? 1 : 0); } EXPORT_SYMBOL_GPL(dasd_set_uid); @@ -995,7 +991,7 @@ dasd_devmap_init(void) INIT_LIST_HEAD(&dasd_hashlists[i]); /* Initialize servermap structure. */ - INIT_LIST_HEAD(&dasd_serverlist); + INIT_LIST_HEAD(&dasd_server_ssid_list); return 0; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 39c2281371b5..b7a7fac3f7c3 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -468,11 +468,11 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid) return -ENODEV; memset(uid, 0, sizeof(struct dasd_uid)); - strncpy(uid->vendor, confdata->ned1.HDA_manufacturer, - sizeof(uid->vendor) - 1); + memcpy(uid->vendor, confdata->ned1.HDA_manufacturer, + sizeof(uid->vendor) - 1); EBCASC(uid->vendor, sizeof(uid->vendor) - 1); - strncpy(uid->serial, confdata->ned1.HDA_location, - sizeof(uid->serial) - 1); + memcpy(uid->serial, confdata->ned1.HDA_location, + sizeof(uid->serial) - 1); EBCASC(uid->serial, sizeof(uid->serial) - 1); uid->ssid = confdata->neq.subsystemID; if (confdata->ned2.sneq.flags == 0x40) { @@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device) * Valide storage server of current device. */ static int -dasd_eckd_validate_server(struct dasd_device *device) +dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) { int rc; @@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device) return 0; rc = dasd_eckd_psf_ssc(device); - if (rc) - /* may be requested feature is not available on server, - * therefore just report error and go ahead */ - DEV_MESSAGE(KERN_INFO, device, - "Perform Subsystem Function returned rc=%d", rc); + /* may be requested feature is not available on server, + * therefore just report error and go ahead */ + DEV_MESSAGE(KERN_INFO, device, + "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", + uid->vendor, uid->serial, uid->ssid, rc); /* RE-Read Configuration Data */ return dasd_eckd_read_conf(device); } @@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) return rc; rc = dasd_set_uid(device->cdev, &uid); if (rc == 1) /* new server found */ - rc = dasd_eckd_validate_server(device); + rc = dasd_eckd_validate_server(device, &uid); if (rc) return rc; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 1140302ff11d..ca7d51f7eccc 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -48,15 +48,6 @@ #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) -static struct sysdev_class xpram_sysclass = { - set_kset_name("xpram"), -}; - -static struct sys_device xpram_sys_device = { - .id = 0, - .cls = &xpram_sysclass, -}; - typedef struct { unsigned int size; /* size of xpram segment in pages */ unsigned int offset; /* start page of xpram segment */ @@ -451,8 +442,6 @@ static void __exit xpram_exit(void) } unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); blk_cleanup_queue(xpram_queue); - sysdev_unregister(&xpram_sys_device); - sysdev_class_unregister(&xpram_sysclass); } static int __init xpram_init(void) @@ -470,19 +459,7 @@ static int __init xpram_init(void) rc = xpram_setup_sizes(xpram_pages); if (rc) return rc; - rc = sysdev_class_register(&xpram_sysclass); - if (rc) - return rc; - - rc = sysdev_register(&xpram_sys_device); - if (rc) { - sysdev_class_unregister(&xpram_sysclass); - return rc; - } - rc = xpram_setup_blkdev(); - if (rc) - sysdev_unregister(&xpram_sys_device); - return rc; + return xpram_setup_blkdev(); } module_init(xpram_init); diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index 643b6d078563..56b87618b100 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -76,7 +76,7 @@ struct tape_class_device *register_tape_dev( device, "%s", tcd->device_name ); - rc = PTR_ERR(tcd->class_device); + rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; if (rc) goto fail_with_cdev; rc = sysfs_create_link( diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index f26a2ee3aad8..3cba6c9fab11 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -152,7 +152,6 @@ ccwgroup_create(struct device *root, struct ccwgroup_device *gdev; int i; int rc; - int del_drvdata; if (argc > 256) /* disallow dumb users */ return -EINVAL; @@ -163,7 +162,6 @@ ccwgroup_create(struct device *root, atomic_set(&gdev->onoff, 0); - del_drvdata = 0; for (i = 0; i < argc; i++) { gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); @@ -180,10 +178,8 @@ ccwgroup_create(struct device *root, rc = -EINVAL; goto free_dev; } - } - for (i = 0; i < argc; i++) gdev->cdev[i]->dev.driver_data = gdev; - del_drvdata = 1; + } gdev->creator_id = creator_id; gdev->count = argc; @@ -226,9 +222,9 @@ error: free_dev: for (i = 0; i < argc; i++) if (gdev->cdev[i]) { - put_device(&gdev->cdev[i]->dev); - if (del_drvdata) + if (gdev->cdev[i]->dev.driver_data == gdev) gdev->cdev[i]->dev.driver_data = NULL; + put_device(&gdev->cdev[i]->dev); } kfree(gdev); return rc; diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ac6e0c7e43d9..6d91c2eb205b 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) if (cdev->private->iretry) { cdev->private->iretry--; ret = cio_halt(sch); - return (ret == 0) ? -EBUSY : ret; + if (ret != -EBUSY) + return (ret == 0) ? -EBUSY : ret; } /* halt io unsuccessful. */ cdev->private->iretry = 255; /* 255 clear retries. */ @@ -771,6 +772,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) stsch(sch->schid, &sch->schib); if (sch->schib.scsw.actl != 0 || + (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index a60124264bee..9e3de0bd59b5 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -263,6 +263,9 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) /* Abuse intparm for error reporting. */ if (IS_ERR(irb)) cdev->private->intparm = -EIO; + else if (irb->scsw.cc == 1) + /* Retry for deferred condition code. */ + cdev->private->intparm = -EAGAIN; else if ((irb->scsw.dstat != (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || (irb->scsw.cstat != 0)) { diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 5fff1f93973a..e1327b8fce00 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -8510,9 +8510,9 @@ static int qeth_ipv6_init(void) { qeth_old_arp_constructor = arp_tbl.constructor; - write_lock(&arp_tbl.lock); + write_lock_bh(&arp_tbl.lock); arp_tbl.constructor = qeth_arp_constructor; - write_unlock(&arp_tbl.lock); + write_unlock_bh(&arp_tbl.lock); arp_direct_ops = (struct neigh_ops*) kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); @@ -8528,9 +8528,9 @@ qeth_ipv6_init(void) static void qeth_ipv6_uninit(void) { - write_lock(&arp_tbl.lock); + write_lock_bh(&arp_tbl.lock); arp_tbl.constructor = qeth_old_arp_constructor; - write_unlock(&arp_tbl.lock); + write_unlock_bh(&arp_tbl.lock); kfree(arp_direct_ops); } #endif /* CONFIG_QETH_IPV6 */ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cd789b8acd4..adc9d8f2c28f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count) printk("\n"); } + +/****************************************************************/ +/****** Functions to handle the request ID hash table ********/ +/****************************************************************/ + +#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF + +static int zfcp_reqlist_init(struct zfcp_adapter *adapter) +{ + int i; + + adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), + GFP_KERNEL); + + if (!adapter->req_list) + return -ENOMEM; + + for (i=0; ireq_list[i]); + + return 0; +} + +static void zfcp_reqlist_free(struct zfcp_adapter *adapter) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i; + + for (i=0; ireq_list[i])) + continue; + + list_for_each_entry_safe(request, tmp, + &adapter->req_list[i], list) + list_del(&request->list); + } + + kfree(adapter->req_list); +} + +void zfcp_reqlist_add(struct zfcp_adapter *adapter, + struct zfcp_fsf_req *fsf_req) +{ + unsigned int i; + + i = fsf_req->req_id % REQUEST_LIST_SIZE; + list_add_tail(&fsf_req->list, &adapter->req_list[i]); +} + +void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i, counter; + u64 dbg_tmp[2]; + + i = req_id % REQUEST_LIST_SIZE; + BUG_ON(list_empty(&adapter->req_list[i])); + + counter = 0; + list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) { + if (request->req_id == req_id) { + dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); + dbg_tmp[1] = (u64) counter; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + list_del(&request->list); + break; + } + counter++; + } +} + +struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter, + unsigned long req_id) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i; + + i = req_id % REQUEST_LIST_SIZE; + + list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) + if (request->req_id == req_id) + return request; + + return NULL; +} + +int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) +{ + unsigned int i; + + for (i=0; ireq_list[i])) + return 0; + + return 1; +} + +#undef ZFCP_LOG_AREA + /****************************************************************/ /************** Uncategorised Functions *************************/ /****************************************************************/ @@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_LIST_HEAD(&adapter->port_remove_lh); /* initialize list of fsf requests */ - spin_lock_init(&adapter->fsf_req_list_lock); - INIT_LIST_HEAD(&adapter->fsf_req_list_head); + spin_lock_init(&adapter->req_list_lock); + retval = zfcp_reqlist_init(adapter); + if (retval) { + ZFCP_LOG_INFO("request list initialization failed\n"); + goto failed_low_mem_buffers; + } /* initialize debug locks */ @@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) * !0 - struct zfcp_adapter data structure could not be removed * (e.g. still used) * locks: adapter list write lock is assumed to be held by caller - * adapter->fsf_req_list_lock is taken and released within this - * function and must not be held on entry */ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) @@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); dev_set_drvdata(&adapter->ccw_device->dev, NULL); /* sanity check: no pending FSF requests */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - retval = !list_empty(&adapter->fsf_req_list_head); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - if (retval) { + spin_lock_irqsave(&adapter->req_list_lock, flags); + retval = zfcp_reqlist_isempty(adapter); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + if (!retval) { ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " "%i requests outstanding\n", zfcp_get_busid_by_adapter(adapter), adapter, - atomic_read(&adapter->fsf_reqs_active)); + atomic_read(&adapter->reqs_active)); retval = -EBUSY; goto out; } @@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_free_low_mem_buffers(adapter); /* free memory of adapter data structure and queues */ zfcp_qdio_free_queues(adapter); + zfcp_reqlist_free(adapter); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); ZFCP_LOG_TRACE("freeing adapter structure\n"); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 57d8e4bfb8d9..fdabadeaa9ee 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) retval = zfcp_adapter_scsi_register(adapter); if (retval) goto out_scsi_register; + + /* initialize request counter */ + BUG_ON(!zfcp_reqlist_isempty(adapter)); + adapter->req_no = 0; + zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2df512a18e2c..94d1b74db356 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -52,7 +52,7 @@ /********************* GENERAL DEFINES *********************************/ /* zfcp version number, it consists of major, minor, and patch-level number */ -#define ZFCP_VERSION "4.7.0" +#define ZFCP_VERSION "4.8.0" /** * zfcp_sg_to_address - determine kernel address from struct scatterlist @@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list) #define REQUEST_LIST_SIZE 128 /********************* SCSI SPECIFIC DEFINES *********************************/ -#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) +#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ @@ -886,11 +886,11 @@ struct zfcp_adapter { struct list_head port_remove_lh; /* head of ports to be removed */ u32 ports; /* number of remote ports */ - struct timer_list scsi_er_timer; /* SCSI err recovery watch */ - struct list_head fsf_req_list_head; /* head of FSF req list */ - spinlock_t fsf_req_list_lock; /* lock for ops on list of - FSF requests */ - atomic_t fsf_reqs_active; /* # active FSF reqs */ + struct timer_list scsi_er_timer; /* SCSI err recovery watch */ + atomic_t reqs_active; /* # active FSF reqs */ + unsigned long req_no; /* unique FSF req number */ + struct list_head *req_list; /* list of pending reqs */ + spinlock_t req_list_lock; /* request list lock */ struct zfcp_qdio_queue request_queue; /* request queue */ u32 fsf_req_seq_no; /* FSF cmnd seq number */ wait_queue_head_t request_wq; /* can be used to wait for @@ -986,6 +986,7 @@ struct zfcp_unit { /* FSF request */ struct zfcp_fsf_req { struct list_head list; /* list of FSF requests */ + unsigned long req_id; /* unique request ID */ struct zfcp_adapter *adapter; /* adapter request belongs to */ u8 sbal_number; /* nr of SBALs free for use */ u8 sbal_first; /* first SBAL for this request */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 8ec8da0beaa8..7f60b6fdf724 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); +static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); +static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); @@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); -static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); -static int zfcp_erp_action_dismiss_port(struct zfcp_port *); -static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); -static int zfcp_erp_action_dismiss(struct zfcp_erp_action *); +static void zfcp_erp_action_dismiss_port(struct zfcp_port *); +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); @@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data) zfcp_erp_adapter_reopen(adapter, 0); } -/* - * function: zfcp_fsf_scsi_er_timeout_handler +/** + * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks * - * purpose: This function needs to be called whenever a SCSI error recovery - * action (abort/reset) does not return. - * Re-opening the adapter means that the command can be returned - * by zfcp (it is guarranteed that it does not return via the - * adapter anymore). The buffer can then be used again. - * - * returns: sod all + * This function needs to be called whenever a SCSI error recovery + * action (abort/reset) does not return. Re-opening the adapter means + * that the abort/reset command can be returned by zfcp. It won't complete + * via the adapter anymore (because qdio queues are closed). If ERP is + * already running on this adapter it will be stopped. */ -void -zfcp_fsf_scsi_er_timeout_handler(unsigned long data) +void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + unsigned long flags; ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " "Restarting all operations on the adapter %s\n", zfcp_get_busid_by_adapter(adapter)); debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); - zfcp_erp_adapter_reopen(adapter, 0); - return; + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + &adapter->status)) { + zfcp_erp_modify_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, + ZFCP_CLEAR); + zfcp_erp_action_dismiss_adapter(adapter); + write_unlock_irqrestore(&adapter->erp_lock, flags); + /* dismiss all pending requests including requests for ERP */ + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + } else + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_erp_adapter_reopen(adapter, 0); } /* @@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) return retval; } -/* - * function: - * - * purpose: disable I/O, - * return any open requests and clean them up, - * aim: no pending and incoming I/O - * - * returns: +/** + * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests */ -static void -zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) +static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) { debug_text_event(adapter->erp_dbf, 6, "a_bl"); zfcp_erp_modify_adapter_status(adapter, @@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) clear_mask, ZFCP_CLEAR); } -/* - * function: - * - * purpose: enable I/O - * - * returns: +/** + * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests */ -static void -zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) +static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { debug_text_event(adapter->erp_dbf, 6, "a_ubl"); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); @@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) struct zfcp_adapter *adapter = erp_action->adapter; if (erp_action->fsf_req) { - /* take lock to ensure that request is not being deleted meanwhile */ - spin_lock(&adapter->fsf_req_list_lock); - /* check whether fsf req does still exist */ - list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) - if (fsf_req == erp_action->fsf_req) - break; - if (fsf_req && (fsf_req->erp_action == erp_action)) { + /* take lock to ensure that request is not deleted meanwhile */ + spin_lock(&adapter->req_list_lock); + if ((!zfcp_reqlist_ismember(adapter, + erp_action->fsf_req->req_id)) && + (fsf_req->erp_action == erp_action)) { /* fsf_req still exists */ debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); debug_event(adapter->erp_dbf, 3, &fsf_req, sizeof (unsigned long)); - /* dismiss fsf_req of timed out or dismissed erp_action */ + /* dismiss fsf_req of timed out/dismissed erp_action */ if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { debug_text_event(adapter->erp_dbf, 3, @@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) */ erp_action->fsf_req = NULL; } - spin_unlock(&adapter->fsf_req_list_lock); + spin_unlock(&adapter->req_list_lock); } else debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); return retval; } -/* - * purpose: generic handler for asynchronous events related to erp_action events - * (normal completion, time-out, dismissing, retry after - * low memory condition) +/** + * zfcp_erp_async_handler_nolock - complete erp_action * - * note: deletion of timer is not required (e.g. in case of a time-out), - * but a second try does no harm, - * we leave it in here to allow for greater simplification - * - * returns: 0 - there was an action to handle - * !0 - otherwise + * Used for normal completion, time-out, dismissal and failure after + * low memory condition. */ -static int -zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { - int retval; struct zfcp_adapter *adapter = erp_action->adapter; if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { @@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, del_timer(&erp_action->timer); erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); - retval = 0; } else { /* action is ready or gone - nothing to do */ debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); - retval = 1; } - - return retval; } -/* - * purpose: generic handler for asynchronous events related to erp_action - * events (normal completion, time-out, dismissing, retry after - * low memory condition) - * - * note: deletion of timer is not required (e.g. in case of a time-out), - * but a second try does no harm, - * we leave it in here to allow for greater simplification - * - * returns: 0 - there was an action to handle - * !0 - otherwise +/** + * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking */ -int -zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; - int retval; write_lock_irqsave(&adapter->erp_lock, flags); - retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); + zfcp_erp_async_handler_nolock(erp_action, set_mask); write_unlock_irqrestore(&adapter->erp_lock, flags); - - return retval; } /* @@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data) zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); } -/* - * purpose: is called for an erp_action which needs to be ended - * though not being done, - * this is usually required if an higher is generated, - * action gets an appropriate flag and will be processed - * accordingly +/** + * zfcp_erp_action_dismiss - dismiss an erp_action * - * locks: erp_lock held (thus we need to call another handler variant) + * adapter->erp_lock must be held + * + * Dismissal of an erp_action is usually required if an erp_action of + * higher priority is generated. */ -static int -zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; @@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); - - return 0; } int @@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) return retval; } -/* - * function: zfcp_qdio_cleanup - * - * purpose: cleans up QDIO operation for the specified adapter - * - * returns: 0 - successful cleanup - * !0 - failed cleanup +/** + * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter */ -int +static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; int first_used; int used_count; struct zfcp_adapter *adapter = erp_action->adapter; @@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " "queues on adapter %s\n", zfcp_get_busid_by_adapter(adapter)); - retval = ZFCP_ERP_FAILED; - goto out; + return; } /* * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that * do_QDIO won't be called while qdio_shutdown is in progress. */ - write_lock_irq(&adapter->request_queue.queue_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); write_unlock_irq(&adapter->request_queue.queue_lock); @@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) adapter->request_queue.free_index = 0; atomic_set(&adapter->request_queue.free_count, 0); adapter->request_queue.distance_from_int = 0; - out: - return retval; } static int @@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) "%s)\n", zfcp_get_busid_by_adapter(adapter)); ret = ZFCP_ERP_FAILED; } - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { - ZFCP_LOG_INFO("error: exchange port data failed (adapter " + + /* don't treat as error for the sake of compatibility */ + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) + ZFCP_LOG_INFO("warning: exchange port data failed (adapter " "%s\n", zfcp_get_busid_by_adapter(adapter)); - ret = ZFCP_ERP_FAILED; - } return ret; } @@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action return retval; } -/* - * function: zfcp_fsf_cleanup - * - * purpose: cleanup FSF operation for specified adapter - * - * returns: 0 - FSF operation successfully cleaned up - * !0 - failed to cleanup FSF operation for this adapter +/** + * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter */ -static int +static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; struct zfcp_adapter *adapter = erp_action->adapter; /* @@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); - - return retval; } /* @@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, } -static int -zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) +void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) { - int retval = 0; struct zfcp_port *port; debug_text_event(adapter->erp_dbf, 5, "a_actab"); @@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) else list_for_each_entry(port, &adapter->port_list_head, list) zfcp_erp_action_dismiss_port(port); - - return retval; } -static int -zfcp_erp_action_dismiss_port(struct zfcp_port *port) +static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) { - int retval = 0; struct zfcp_unit *unit; struct zfcp_adapter *adapter = port->adapter; @@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port) else list_for_each_entry(unit, &port->unit_list_head, list) zfcp_erp_action_dismiss_unit(unit); - - return retval; } -static int -zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) { - int retval = 0; struct zfcp_adapter *adapter = unit->port->adapter; debug_text_event(adapter->erp_dbf, 5, "u_actab"); debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) zfcp_erp_action_dismiss(&unit->erp_action); - - return retval; } static inline void diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d02366004cdd..146d7a2b4c4a 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); extern void zfcp_qdio_free_queues(struct zfcp_adapter *); extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, struct zfcp_fsf_req *); -extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *); extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req (struct zfcp_fsf_req *, int, int); @@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); +extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); extern int zfcp_erp_port_reopen(struct zfcp_port *, int); @@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern int zfcp_erp_thread_kill(struct zfcp_adapter *); extern int zfcp_erp_wait(struct zfcp_adapter *); -extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); +extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); extern int zfcp_test_link(struct zfcp_port *); @@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, struct scsi_cmnd *); +extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); +extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long); +extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *, + unsigned long); +extern int zfcp_reqlist_isempty(struct zfcp_adapter *); #endif /* ZFCP_EXT_H */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 31db2b06faba..ff2eacf5ec8c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, struct fsf_link_down_info *); static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); -static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); /* association between FSF command and FSF QTCB type */ static u32 fsf_qtcb_type[] = { @@ -146,49 +145,50 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) kfree(fsf_req); } -/* - * function: - * - * purpose: - * - * returns: - * - * note: qdio queues shall be down (no ongoing inbound processing) +/** + * zfcp_fsf_req_dismiss - dismiss a single fsf request */ -int -zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) +static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter, + struct zfcp_fsf_req *fsf_req, + unsigned int counter) { - struct zfcp_fsf_req *fsf_req, *tmp; - unsigned long flags; - LIST_HEAD(remove_queue); + u64 dbg_tmp[2]; - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_splice_init(&adapter->fsf_req_list_head, &remove_queue); - atomic_set(&adapter->fsf_reqs_active, 0); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - - list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { - list_del(&fsf_req->list); - zfcp_fsf_req_dismiss(fsf_req); - } - - return 0; -} - -/* - * function: - * - * purpose: - * - * returns: - */ -static void -zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req) -{ + dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); + dbg_tmp[1] = (u64) counter; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + list_del(&fsf_req->list); fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; zfcp_fsf_req_complete(fsf_req); } +/** + * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests + */ +int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned long flags; + unsigned int i, counter; + + spin_lock_irqsave(&adapter->req_list_lock, flags); + atomic_set(&adapter->reqs_active, 0); + for (i=0; ireq_list[i])) + continue; + + counter = 0; + list_for_each_entry_safe(request, tmp, + &adapter->req_list[i], list) { + zfcp_fsf_req_dismiss(adapter, request, counter); + counter++; + } + } + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + return 0; +} + /* * function: zfcp_fsf_req_complete * @@ -4592,12 +4592,14 @@ static inline void zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) { if (likely(fsf_req->qtcb != NULL)) { - fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; - fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; + fsf_req->qtcb->prefix.req_seq_no = + fsf_req->adapter->fsf_req_seq_no; + fsf_req->qtcb->prefix.req_id = fsf_req->req_id; fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; - fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; + fsf_req->qtcb->prefix.qtcb_type = + fsf_qtcb_type[fsf_req->fsf_command]; fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; - fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; + fsf_req->qtcb->header.req_handle = fsf_req->req_id; fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; } } @@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, { volatile struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req = NULL; + unsigned long flags; int ret = 0; struct zfcp_qdio_queue *req_queue = &adapter->request_queue; @@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, fsf_req->adapter = adapter; fsf_req->fsf_command = fsf_cmd; + INIT_LIST_HEAD(&fsf_req->list); + + /* unique request id */ + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req->req_id = adapter->req_no++; + spin_unlock_irqrestore(&adapter->req_list_lock, flags); zfcp_fsf_req_qtcb_init(fsf_req); @@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); /* setup common SBALE fields */ - sbale[0].addr = fsf_req; + sbale[0].addr = (void *) fsf_req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; if (likely(fsf_req->qtcb != NULL)) { sbale[1].addr = (void *) fsf_req->qtcb; @@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) volatile struct qdio_buffer_element *sbale; int inc_seq_no; int new_distance_from_int; - unsigned long flags; + u64 dbg_tmp[2]; int retval = 0; adapter = fsf_req->adapter; @@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, sbale[1].length); - /* put allocated FSF request at list tail */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + /* put allocated FSF request into hash table */ + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_add(adapter, fsf_req); + spin_unlock(&adapter->req_list_lock); inc_seq_no = (fsf_req->qtcb != NULL); @@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) QDIO_FLAG_SYNC_OUTPUT, 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); + dbg_tmp[0] = (unsigned long) sbale[0].addr; + dbg_tmp[1] = (u64) retval; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + if (unlikely(retval)) { /* Queues are down..... */ retval = -EIO; @@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) */ if (timer) del_timer(timer); - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_del(&fsf_req->list); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - /* - * adjust the number of free SBALs in request queue as well as - * position of first one - */ + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_remove(adapter, fsf_req->req_id); + spin_unlock(&adapter->req_list_lock); + /* undo changes in request queue made for this request */ zfcp_qdio_zero_sbals(req_queue->buffer, fsf_req->sbal_first, fsf_req->sbal_number); atomic_add(fsf_req->sbal_number, &req_queue->free_count); - req_queue->free_index -= fsf_req->sbal_number; /* increase */ + req_queue->free_index -= fsf_req->sbal_number; req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ - ZFCP_LOG_DEBUG - ("error: do_QDIO failed. Buffers could not be enqueued " - "to request queue.\n"); + zfcp_erp_adapter_reopen(adapter, 0); } else { req_queue->distance_from_int = new_distance_from_int; /* @@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) adapter->fsf_req_seq_no++; /* count FSF requests pending */ - atomic_inc(&adapter->fsf_reqs_active); + atomic_inc(&adapter->reqs_active); } return retval; } diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 49ea5add4abc..dbd9f48e863e 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device, return; } +/** + * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status + */ +static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, + unsigned long req_id) +{ + struct zfcp_fsf_req *fsf_req; + unsigned long flags; + + debug_long_event(adapter->erp_dbf, 4, req_id); + + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req = zfcp_reqlist_ismember(adapter, req_id); + + if (!fsf_req) { + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); + zfcp_erp_adapter_reopen(adapter, 0); + return -EINVAL; + } + + zfcp_reqlist_remove(adapter, req_id); + atomic_dec(&adapter->reqs_active); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + /* finish the FSF request */ + zfcp_fsf_req_complete(fsf_req); + + return 0; +} + /* * function: zfcp_qdio_response_handler * @@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, /* look for QDIO request identifiers in SB */ buffere = &buffer->element[buffere_index]; retval = zfcp_qdio_reqid_check(adapter, - (void *) buffere->addr); + (unsigned long) buffere->addr); if (retval) { ZFCP_LOG_NORMAL("bug: unexpected inbound " @@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, return; } -/* - * function: zfcp_qdio_reqid_check - * - * purpose: checks for valid reqids or unsolicited status - * - * returns: 0 - valid request id or unsolicited status - * !0 - otherwise - */ -int -zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) -{ - struct zfcp_fsf_req *fsf_req; - unsigned long flags; - - /* invalid (per convention used in this driver) */ - if (unlikely(!sbale_addr)) { - ZFCP_LOG_NORMAL("bug: invalid reqid\n"); - return -EINVAL; - } - - /* valid request id and thus (hopefully :) valid fsf_req address */ - fsf_req = (struct zfcp_fsf_req *) sbale_addr; - - /* serialize with zfcp_fsf_req_dismiss_all */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - if (list_empty(&adapter->fsf_req_list_head)) { - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - return 0; - } - list_del(&fsf_req->list); - atomic_dec(&adapter->fsf_reqs_active); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - - if (unlikely(adapter != fsf_req->adapter)) { - ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " - "fsf_req->adapter=%p, adapter=%p)\n", - fsf_req, fsf_req->adapter, adapter); - return -EINVAL; - } - - /* finish the FSF request */ - zfcp_fsf_req_complete(fsf_req); - - return 0; -} - /** * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue * @queue: queue from which SBALE should be returned diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 671f4a6a5d18..1bb55086db9f 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, void (*done) (struct scsi_cmnd *)); static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); -static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); static int zfcp_task_management_function(struct zfcp_unit *, u8, struct scsi_cmnd *); @@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = { .scsi_host_template = { .name = ZFCP_NAME, .proc_name = "zfcp", - .proc_info = NULL, - .detect = NULL, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .queuecommand = zfcp_scsi_queuecommand, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, + .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .can_queue = 4096, .this_id = -1, - /* - * FIXME: - * one less? can zfcp_create_sbale cope with it? - */ .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, .cmd_per_lun = 1, - .unchecked_isa_dma = 0, .use_clustering = 1, .sdev_attrs = zfcp_sysfs_sdev_attrs, }, .driver_version = ZFCP_VERSION, - /* rest initialised with zeros */ }; /* Find start of Response Information in FCP response unit*/ @@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) return retval; } -static void -zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) +/** + * zfcp_scsi_slave_destroy - called when scsi device is removed + * + * Remove reference to associated scsi device for an zfcp_unit. + * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs + * or a scan for this device might have failed. + */ +static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; @@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); sdpnt->hostdata = NULL; unit->device = NULL; + zfcp_erp_unit_failed(unit); zfcp_unit_put(unit); } else { ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " @@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, } /** - * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) + * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset + * + * If ERP is already running it will be stopped. */ -int -zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) +int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_unit *unit; + struct zfcp_adapter *adapter; + unsigned long flags; - ZFCP_LOG_NORMAL("bus reset because of problems with " + unit = (struct zfcp_unit*) scpnt->device->hostdata; + adapter = unit->port->adapter; + + ZFCP_LOG_NORMAL("host/bus reset because of problems with " "unit 0x%016Lx\n", unit->fcp_lun); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); - return SUCCESS; -} - -/** - * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) - */ -int -zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) -{ - struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; - - ZFCP_LOG_NORMAL("host reset because of problems with " - "unit 0x%016Lx\n", unit->fcp_lun); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + &adapter->status)) { + zfcp_erp_modify_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, + ZFCP_CLEAR); + zfcp_erp_action_dismiss_adapter(adapter); + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + zfcp_erp_adapter_reopen(adapter, 0); + } else { + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_erp_adapter_reopen(adapter, 0); + zfcp_erp_wait(adapter); + } return SUCCESS; } diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 77e7202a0eba..904c25fb4ba4 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c @@ -940,14 +940,8 @@ static void ahci_host_intr(struct ata_port *ap) return; /* ignore interim PIO setup fis interrupts */ - if (ata_tag_valid(ap->active_tag)) { - struct ata_queued_cmd *qc = - ata_qc_from_tag(ap, ap->active_tag); - - if (qc && qc->tf.protocol == ATA_PROT_PIO && - (status & PORT_IRQ_PIOS_FIS)) - return; - } + if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) + return; if (ata_ratelimit()) ata_port_printk(ap, KERN_INFO, "spurious interrupt " diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile index 8c91fda6482c..b98c5c1056c3 100644 --- a/drivers/scsi/aic7xxx/aicasm/Makefile +++ b/drivers/scsi/aic7xxx/aicasm/Makefile @@ -14,6 +14,8 @@ LIBS= -ldb clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) # Override default kernel CFLAGS. This is a userland app. AICASM_CFLAGS:= -I/usr/include -I. +LEX= flex +YACC= bison YFLAGS= -d NOMAN= noman diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig index 06d7601cdf56..d006a8cb4a74 100644 --- a/drivers/scsi/arm/Kconfig +++ b/drivers/scsi/arm/Kconfig @@ -69,6 +69,7 @@ comment "The following drivers are not fully supported" config SCSI_CUMANA_1 tristate "CumanaSCSI I support (EXPERIMENTAL)" depends on ARCH_ACORN && EXPERIMENTAL && SCSI + select SCSI_SPI_ATTRS help This enables support for the Cumana SCSI I card. If you have an Acorn system with one of these, say Y. If unsure, say N. @@ -76,6 +77,7 @@ config SCSI_CUMANA_1 config SCSI_ECOSCSI tristate "EcoScsi support (EXPERIMENTAL)" depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI + select SCSI_SPI_ATTRS help This enables support for the EcoSCSI card -- a small card that sits in the Econet socket. If you have an Acorn system with one of these, @@ -84,6 +86,7 @@ config SCSI_ECOSCSI config SCSI_OAK1 tristate "Oak SCSI support (EXPERIMENTAL)" depends on ARCH_ACORN && EXPERIMENTAL && SCSI + select SCSI_SPI_ATTRS help This enables support for the Oak SCSI card. If you have an Acorn system with one of these, say Y. If unsure, say N. diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h index 6dd544a5eb56..8c2600ffc6af 100644 --- a/drivers/scsi/arm/scsi.h +++ b/drivers/scsi/arm/scsi.h @@ -74,7 +74,7 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt) unsigned long len = 0; int buf; - SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer; + SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; SCpnt->SCp.ptr = (char *) (page_address(SCpnt->SCp.buffer->page) + diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 19745a31072b..2d20caf377f5 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -390,7 +390,8 @@ static struct ata_port_info piix_port_info[] = { /* ich5_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | + PIIX_FLAG_IGNORE_PCS, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ @@ -467,6 +468,11 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, piix_pci_tbl); MODULE_VERSION(DRV_VERSION); +static int force_pcs = 0; +module_param(force_pcs, int, 0444); +MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " + "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); + /** * piix_pata_cbl_detect - Probe host controller cable detect info * @ap: Port for which cable detect info is desired @@ -531,27 +537,25 @@ static void piix_pata_error_handler(struct ata_port *ap) } /** - * piix_sata_prereset - prereset for SATA host controller + * piix_sata_present_mask - determine present mask for SATA host controller * @ap: Target port * - * Reads and configures SATA PCI device's PCI config register - * Port Configuration and Status (PCS) to determine port and - * device availability. Return -ENODEV to skip reset if no - * device is present. + * Reads SATA PCI device's PCI config register Port Configuration + * and Status (PCS) to determine port and device availability. * * LOCKING: * None (inherited from caller). * * RETURNS: - * 0 if device is present, -ENODEV otherwise. + * determined present_mask */ -static int piix_sata_prereset(struct ata_port *ap) +static unsigned int piix_sata_present_mask(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); struct piix_host_priv *hpriv = ap->host_set->private_data; const unsigned int *map = hpriv->map; int base = 2 * ap->hard_port_no; - unsigned int present = 0; + unsigned int present_mask = 0; int port, i; u16 pcs; @@ -564,24 +568,52 @@ static int piix_sata_prereset(struct ata_port *ap) continue; if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || (pcs & 1 << (hpriv->map_db->present_shift + port))) - present = 1; + present_mask |= 1 << i; } DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", ap->id, pcs, present_mask); - if (!present) { - ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); - ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; - return 0; + return present_mask; +} + +/** + * piix_sata_softreset - reset SATA host port via ATA SRST + * @ap: port to reset + * @classes: resulting classes of attached devices + * + * Reset SATA host port via ATA SRST. On controllers with + * reliable PCS present bits, the bits are used to determine + * device presence. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes) +{ + unsigned int present_mask; + int i, rc; + + present_mask = piix_sata_present_mask(ap); + + rc = ata_std_softreset(ap, classes); + if (rc) + return rc; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + if (!(present_mask & (1 << i))) + classes[i] = ATA_DEV_NONE; } - return ata_std_prereset(ap); + return 0; } static void piix_sata_error_handler(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, + ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, ata_std_postreset); } @@ -785,6 +817,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) } static void __devinit piix_init_pcs(struct pci_dev *pdev, + struct ata_port_info *pinfo, const struct piix_map_db *map_db) { u16 pcs, new_pcs; @@ -798,6 +831,18 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev, pci_write_config_word(pdev, ICH5_PCS, new_pcs); msleep(150); } + + if (force_pcs == 1) { + dev_printk(KERN_INFO, &pdev->dev, + "force ignoring PCS (0x%x)\n", new_pcs); + pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS; + pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS; + } else if (force_pcs == 2) { + dev_printk(KERN_INFO, &pdev->dev, + "force honoring PCS (0x%x)\n", new_pcs); + pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS; + pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS; + } } static void __devinit piix_init_sata_map(struct pci_dev *pdev, @@ -828,6 +873,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev, case IDE: WARN_ON((i & 1) || map[i + 1] != IDE); pinfo[i / 2] = piix_port_info[ich5_pata]; + pinfo[i / 2].private_data = hpriv; i++; printk(" IDE IDE"); break; @@ -905,7 +951,8 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (host_flags & ATA_FLAG_SATA) { piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); - piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); + piix_init_pcs(pdev, port_info, + piix_map_db_table[ent->driver_data]); } /* On ICH5, some BIOSen disable the interrupt using the diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c index 98bd22714d0d..5630868c1b25 100644 --- a/drivers/scsi/esp.c +++ b/drivers/scsi/esp.c @@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev; static int __init esp_sun4_probe(struct scsi_host_template *tpnt) { if (sun4_esp_physaddr) { - memset(&sun4_esp_dev, 0, sizeof(esp_dev)); + memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; sun4_esp_dev.irqs[0] = 4; sun4_esp_dev.resource[0].start = sun4_esp_physaddr; @@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt) static int __devexit esp_sun4_remove(void) { + struct of_device *dev = &sun4_esp_dev.ofdev; struct esp *esp = dev_get_drvdata(&dev->dev); return esp_remove_common(esp); diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 6b41c2ef6e21..28bfb8f9f81d 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -44,10 +44,6 @@ static char driver_name[] = "hptiop"; static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; static const char driver_ver[] = "v1.0 (060426)"; -static DEFINE_SPINLOCK(hptiop_hba_list_lock); -static LIST_HEAD(hptiop_hba_list); -static int hptiop_cdev_major = -1; - static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); @@ -576,7 +572,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); writel(IOPMU_INBOUND_MSG0_RESET, - &hba->iop->outbound_msgaddr0); + &hba->iop->inbound_msgaddr0); hptiop_pci_posting_flush(hba->iop); } @@ -619,532 +615,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, return queue_depth; } -struct hptiop_getinfo { - char __user *buffer; - loff_t buflength; - loff_t bufoffset; - loff_t buffillen; - loff_t filpos; -}; - -static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, - char *data, int datalen) -{ - if (pinfo->filpos < pinfo->bufoffset) { - if (pinfo->filpos + datalen <= pinfo->bufoffset) { - pinfo->filpos += datalen; - return; - } else { - data += (pinfo->bufoffset - pinfo->filpos); - datalen -= (pinfo->bufoffset - pinfo->filpos); - pinfo->filpos = pinfo->bufoffset; - } - } - - pinfo->filpos += datalen; - if (pinfo->buffillen == pinfo->buflength) - return; - - if (pinfo->buflength - pinfo->buffillen < datalen) - datalen = pinfo->buflength - pinfo->buffillen; - - if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) - return; - - pinfo->buffillen += datalen; -} - -static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) -{ - va_list args; - char buf[128]; - int len; - - va_start(args, fmt); - len = vsnprintf(buf, sizeof(buf), fmt, args); - va_end(args); - hptiop_copy_mem_info(pinfo, buf, len); - return len; -} - -static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) -{ - arg->done = NULL; - wake_up(&arg->hba->ioctl_wq); -} - -static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) -{ - struct hptiop_hba *hba = arg->hba; - u32 val; - struct hpt_iop_request_ioctl_command __iomem *req; - int ioctl_retry = 0; - - dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); - - /* - * check (in + out) buff size from application. - * outbuf must be dword aligned. - */ - if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > - hba->max_request_size - - sizeof(struct hpt_iop_request_header) - - 4 * sizeof(u32)) { - dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", - hba->host->host_no, - arg->inbuf_size, arg->outbuf_size); - arg->result = HPT_IOCTL_RESULT_FAILED; - return; - } - -retry: - spin_lock_irq(hba->host->host_lock); - - val = readl(&hba->iop->inbound_queue); - if (val == IOPMU_QUEUE_EMPTY) { - spin_unlock_irq(hba->host->host_lock); - dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); - arg->result = -1; - return; - } - - req = (struct hpt_iop_request_ioctl_command __iomem *) - ((unsigned long)hba->iop + val); - - writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), - &req->ioctl_code); - writel(arg->inbuf_size, &req->inbuf_size); - writel(arg->outbuf_size, &req->outbuf_size); - - /* - * use the buffer on the IOP local memory first, then copy it - * back to host. - * the caller's request buffer shoudl be little-endian. - */ - if (arg->inbuf_size) - memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); - - /* correct the controller ID for IOP */ - if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || - arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || - arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) - && arg->inbuf_size >= sizeof(u32)) - writel(0, req->buf); - - writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); - writel(0, &req->header.flags); - writel(offsetof(struct hpt_iop_request_ioctl_command, buf) - + arg->inbuf_size, &req->header.size); - writel((u32)(unsigned long)arg, &req->header.context); - writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, - &req->header.context_hi32); - writel(IOP_RESULT_PENDING, &req->header.result); - - arg->result = HPT_IOCTL_RESULT_FAILED; - arg->done = hptiop_ioctl_done; - - writel(val, &hba->iop->inbound_queue); - hptiop_pci_posting_flush(hba->iop); - - spin_unlock_irq(hba->host->host_lock); - - wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); - - if (arg->done != NULL) { - hptiop_reset_hba(hba); - if (ioctl_retry++ < 3) - goto retry; - } - - dprintk("hpt_iop_ioctl %x result %d\n", - arg->ioctl_code, arg->result); -} - -static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, - u32 insize, void *outbuf, u32 outsize) -{ - struct hpt_ioctl_k arg; - arg.hba = hba; - arg.ioctl_code = code; - arg.inbuf = inbuf; - arg.outbuf = outbuf; - arg.inbuf_size = insize; - arg.outbuf_size = outsize; - arg.bytes_returned = NULL; - hptiop_do_ioctl(&arg); - return arg.result; -} - -static inline int hpt_id_valid(__le32 id) -{ - return id != 0 && id != cpu_to_le32(0xffffffff); -} - -static int hptiop_get_controller_info(struct hptiop_hba *hba, - struct hpt_controller_info *pinfo) -{ - int id = 0; - - return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, - &id, sizeof(int), pinfo, sizeof(*pinfo)); -} - - -static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, - struct hpt_channel_info *pinfo) -{ - u32 ids[2]; - - ids[0] = 0; - ids[1] = bus; - return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, - ids, sizeof(ids), pinfo, sizeof(*pinfo)); - -} - -static int hptiop_get_logical_devices(struct hptiop_hba *hba, - __le32 *pids, int maxcount) -{ - int i; - u32 count = maxcount - 1; - - if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, - &count, sizeof(u32), - pids, sizeof(u32) * maxcount)) - return -1; - - maxcount = le32_to_cpu(pids[0]); - for (i = 0; i < maxcount; i++) - pids[i] = pids[i+1]; - - return maxcount; -} - -static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, - struct hpt_logical_device_info_v3 *pinfo) -{ - return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, - &id, sizeof(u32), - pinfo, sizeof(*pinfo)); -} - -static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) -{ - static char s[64]; - u32 flags = le32_to_cpu(devinfo->u.array.flags); - u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); - u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); - - if (flags & ARRAY_FLAG_DISABLED) - return "Disabled"; - else if (flags & ARRAY_FLAG_TRANSFORMING) - sprintf(s, "Expanding/Migrating %d.%d%%%s%s", - trans_prog / 100, - trans_prog % 100, - (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? - ", Critical" : "", - ((flags & ARRAY_FLAG_NEEDINITIALIZING) && - !(flags & ARRAY_FLAG_REBUILDING) && - !(flags & ARRAY_FLAG_INITIALIZING))? - ", Unintialized" : ""); - else if ((flags & ARRAY_FLAG_BROKEN) && - devinfo->u.array.array_type != AT_RAID6) - return "Critical"; - else if (flags & ARRAY_FLAG_REBUILDING) - sprintf(s, - (flags & ARRAY_FLAG_NEEDINITIALIZING)? - "%sBackground initializing %d.%d%%" : - "%sRebuilding %d.%d%%", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - reb_prog / 100, - reb_prog % 100); - else if (flags & ARRAY_FLAG_VERIFYING) - sprintf(s, "%sVerifying %d.%d%%", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - reb_prog / 100, - reb_prog % 100); - else if (flags & ARRAY_FLAG_INITIALIZING) - sprintf(s, "%sForground initializing %d.%d%%", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - reb_prog / 100, - reb_prog % 100); - else if (flags & ARRAY_FLAG_NEEDTRANSFORM) - sprintf(s,"%s%s%s", "Need Expanding/Migrating", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - ((flags & ARRAY_FLAG_NEEDINITIALIZING) && - !(flags & ARRAY_FLAG_REBUILDING) && - !(flags & ARRAY_FLAG_INITIALIZING))? - ", Unintialized" : ""); - else if (flags & ARRAY_FLAG_NEEDINITIALIZING && - !(flags & ARRAY_FLAG_REBUILDING) && - !(flags & ARRAY_FLAG_INITIALIZING)) - sprintf(s,"%sUninitialized", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); - else if ((flags & ARRAY_FLAG_NEEDBUILDING) || - (flags & ARRAY_FLAG_BROKEN)) - return "Critical"; - else - return "Normal"; - return s; -} - -static void hptiop_dump_devinfo(struct hptiop_hba *hba, - struct hptiop_getinfo *pinfo, __le32 id, int indent) -{ - struct hpt_logical_device_info_v3 devinfo; - int i; - u64 capacity; - - for (i = 0; i < indent; i++) - hptiop_copy_info(pinfo, "\t"); - - if (hptiop_get_device_info_v3(hba, id, &devinfo)) { - hptiop_copy_info(pinfo, "unknown\n"); - return; - } - - switch (devinfo.type) { - - case LDT_DEVICE: { - struct hd_driveid *driveid; - u32 flags = le32_to_cpu(devinfo.u.device.flags); - - driveid = (struct hd_driveid *)devinfo.u.device.ident; - /* model[] is 40 chars long, but we just want 20 chars here */ - driveid->model[20] = 0; - - if (indent) - if (flags & DEVICE_FLAG_DISABLED) - hptiop_copy_info(pinfo,"Missing\n"); - else - hptiop_copy_info(pinfo, "CH%d %s\n", - devinfo.u.device.path_id + 1, - driveid->model); - else { - capacity = le64_to_cpu(devinfo.capacity) * 512; - do_div(capacity, 1000000); - hptiop_copy_info(pinfo, - "CH%d %s, %lluMB, %s %s%s%s%s\n", - devinfo.u.device.path_id + 1, - driveid->model, - capacity, - (flags & DEVICE_FLAG_DISABLED)? - "Disabled" : "Normal", - devinfo.u.device.read_ahead_enabled? - "[RA]" : "", - devinfo.u.device.write_cache_enabled? - "[WC]" : "", - devinfo.u.device.TCQ_enabled? - "[TCQ]" : "", - devinfo.u.device.NCQ_enabled? - "[NCQ]" : "" - ); - } - break; - } - - case LDT_ARRAY: - if (devinfo.target_id != INVALID_TARGET_ID) - hptiop_copy_info(pinfo, "[DISK %d_%d] ", - devinfo.vbus_id, devinfo.target_id); - - capacity = le64_to_cpu(devinfo.capacity) * 512; - do_div(capacity, 1000000); - hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", - devinfo.u.array.name, - devinfo.u.array.array_type==AT_RAID0? "RAID0" : - devinfo.u.array.array_type==AT_RAID1? "RAID1" : - devinfo.u.array.array_type==AT_RAID5? "RAID5" : - devinfo.u.array.array_type==AT_RAID6? "RAID6" : - devinfo.u.array.array_type==AT_JBOD? "JBOD" : - "unknown", - capacity, - get_array_status(&devinfo)); - for (i = 0; i < devinfo.u.array.ndisk; i++) { - if (hpt_id_valid(devinfo.u.array.members[i])) { - if (cpu_to_le16(1<private_data; - struct hptiop_getinfo info; - int i, j, ndev; - struct hpt_controller_info con_info; - struct hpt_channel_info chan_info; - __le32 ids[32]; - - info.buffer = buf; - info.buflength = count; - info.bufoffset = ppos ? *ppos : 0; - info.filpos = 0; - info.buffillen = 0; - - if (hptiop_get_controller_info(hba, &con_info)) - return -EIO; - - for (i = 0; i < con_info.num_buses; i++) { - if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { - if (hpt_id_valid(chan_info.devices[0])) - hptiop_dump_devinfo(hba, &info, - chan_info.devices[0], 0); - if (hpt_id_valid(chan_info.devices[1])) - hptiop_dump_devinfo(hba, &info, - chan_info.devices[1], 0); - } - } - - ndev = hptiop_get_logical_devices(hba, ids, - sizeof(ids) / sizeof(ids[0])); - - /* - * if hptiop_get_logical_devices fails, ndev==-1 and it just - * output nothing here - */ - for (j = 0; j < ndev; j++) - hptiop_dump_devinfo(hba, &info, ids[j], 0); - - if (ppos) - *ppos += info.buffillen; - - return info.buffillen; -} - -static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct hptiop_hba *hba = file->private_data; - struct hpt_ioctl_u ioctl_u; - struct hpt_ioctl_k ioctl_k; - u32 bytes_returned; - int err = -EINVAL; - - if (copy_from_user(&ioctl_u, - (void __user *)arg, sizeof(struct hpt_ioctl_u))) - return -EINVAL; - - if (ioctl_u.magic != HPT_IOCTL_MAGIC) - return -EINVAL; - - ioctl_k.ioctl_code = ioctl_u.ioctl_code; - ioctl_k.inbuf = NULL; - ioctl_k.inbuf_size = ioctl_u.inbuf_size; - ioctl_k.outbuf = NULL; - ioctl_k.outbuf_size = ioctl_u.outbuf_size; - ioctl_k.hba = hba; - ioctl_k.bytes_returned = &bytes_returned; - - /* verify user buffer */ - if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, - ioctl_u.inbuf, ioctl_k.inbuf_size)) || - (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, - ioctl_u.outbuf, ioctl_k.outbuf_size)) || - (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, - ioctl_u.bytes_returned, sizeof(u32))) || - ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { - - dprintk("scsi%d: got bad user address\n", hba->host->host_no); - return -EINVAL; - } - - /* map buffer to kernel. */ - if (ioctl_k.inbuf_size) { - ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); - if (!ioctl_k.inbuf) { - dprintk("scsi%d: fail to alloc inbuf\n", - hba->host->host_no); - err = -ENOMEM; - goto err_exit; - } - - if (copy_from_user(ioctl_k.inbuf, - ioctl_u.inbuf, ioctl_k.inbuf_size)) { - goto err_exit; - } - } - - if (ioctl_k.outbuf_size) { - ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); - if (!ioctl_k.outbuf) { - dprintk("scsi%d: fail to alloc outbuf\n", - hba->host->host_no); - err = -ENOMEM; - goto err_exit; - } - } - - hptiop_do_ioctl(&ioctl_k); - - if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { - if (ioctl_k.outbuf_size && - copy_to_user(ioctl_u.outbuf, - ioctl_k.outbuf, ioctl_k.outbuf_size)) - goto err_exit; - - if (ioctl_u.bytes_returned && - copy_to_user(ioctl_u.bytes_returned, - &bytes_returned, sizeof(u32))) - goto err_exit; - - err = 0; - } - -err_exit: - kfree(ioctl_k.inbuf); - kfree(ioctl_k.outbuf); - - return err; -} - -static int hptiop_cdev_open(struct inode *inode, struct file *file) -{ - struct hptiop_hba *hba; - unsigned i = 0, minor = iminor(inode); - int ret = -ENODEV; - - spin_lock(&hptiop_hba_list_lock); - list_for_each_entry(hba, &hptiop_hba_list, link) { - if (i == minor) { - file->private_data = hba; - ret = 0; - goto out; - } - i++; - } - -out: - spin_unlock(&hptiop_hba_list_lock); - return ret; -} - -static struct file_operations hptiop_cdev_fops = { - .owner = THIS_MODULE, - .read = hptiop_cdev_read, - .ioctl = hptiop_cdev_ioctl, - .open = hptiop_cdev_open, -}; - static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) { struct Scsi_Host *host = class_to_shost(class_dev); @@ -1295,19 +770,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, goto unmap_pci_bar; } - if (scsi_add_host(host, &pcidev->dev)) { - printk(KERN_ERR "scsi%d: scsi_add_host failed\n", - hba->host->host_no); - goto unmap_pci_bar; - } - pci_set_drvdata(pcidev, host); if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, driver_name, hba)) { printk(KERN_ERR "scsi%d: request irq %d failed\n", hba->host->host_no, pcidev->irq); - goto remove_scsi_host; + goto unmap_pci_bar; } /* Allocate request mem */ @@ -1354,9 +823,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, if (hptiop_initialize_iop(hba)) goto free_request_mem; - spin_lock(&hptiop_hba_list_lock); - list_add_tail(&hba->link, &hptiop_hba_list); - spin_unlock(&hptiop_hba_list_lock); + if (scsi_add_host(host, &pcidev->dev)) { + printk(KERN_ERR "scsi%d: scsi_add_host failed\n", + hba->host->host_no); + goto free_request_mem; + } + scsi_scan_host(host); @@ -1371,9 +843,6 @@ free_request_mem: free_request_irq: free_irq(hba->pcidev->irq, hba); -remove_scsi_host: - scsi_remove_host(host); - unmap_pci_bar: iounmap(hba->iop); @@ -1421,10 +890,6 @@ static void hptiop_remove(struct pci_dev *pcidev) scsi_remove_host(host); - spin_lock(&hptiop_hba_list_lock); - list_del_init(&hba->link); - spin_unlock(&hptiop_hba_list_lock); - hptiop_shutdown(pcidev); free_irq(hba->pcidev->irq, hba); @@ -1461,27 +926,12 @@ static struct pci_driver hptiop_pci_driver = { static int __init hptiop_module_init(void) { - int error; - printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); - - error = pci_register_driver(&hptiop_pci_driver); - if (error < 0) - return error; - - hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); - if (hptiop_cdev_major < 0) { - printk(KERN_WARNING "unable to register hptiop device.\n"); - return hptiop_cdev_major; - } - - return 0; + return pci_register_driver(&hptiop_pci_driver); } static void __exit hptiop_module_exit(void) { - dprintk("hptiop_module_exit\n"); - unregister_chrdev(hptiop_cdev_major, "hptiop"); pci_unregister_driver(&hptiop_pci_driver); } diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index f7b5d7372d26..94d1de55607f 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) /* No more interrupts */ if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); - local_irq_enable(); + local_irq_enable_in_hardirq(); if (status.b.check) rq->errors++; idescsi_end_request (drive, 1, 0); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 848fb2aa4ca3..058f094f945a 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -43,13 +43,10 @@ #include "iscsi_tcp.h" -#define ISCSI_TCP_VERSION "1.0-595" - MODULE_AUTHOR("Dmitry Yusupov , " "Alex Aizman "); MODULE_DESCRIPTION("iSCSI/TCP data-path"); MODULE_LICENSE("GPL"); -MODULE_VERSION(ISCSI_TCP_VERSION); /* #define DEBUG_TCP */ #define DEBUG_ASSERT @@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) * must be called with session lock */ static void -__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_r2t_info *r2t; struct scsi_cmnd *sc; + /* flush ctask's r2t queues */ + while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { + __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); + } + sc = ctask->sc; if (unlikely(!sc)) return; @@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) spin_unlock(&session->lock); return 0; } + rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); BUG_ON(!rc); @@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->exp_r2tsn = r2tsn + 1; tcp_ctask->xmstate |= XMSTATE_SOL_HDR; __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); - __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + list_move_tail(&ctask->running, &conn->xmitqueue); scsi_queue_work(session->host, &conn->xmitwork); conn->r2t_pdus_cnt++; @@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) case ISCSI_OP_SCSI_DATA_IN: tcp_conn->in.ctask = session->cmds[itt]; rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); + if (rc) + return rc; /* fall through */ case ISCSI_OP_SCSI_CMD_RSP: tcp_conn->in.ctask = session->cmds[itt]; @@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) goto copy_hdr; spin_lock(&session->lock); - __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); + iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); spin_unlock(&session->lock); break; @@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: - case ISCSI_OP_LOGOUT_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_REJECT: case ISCSI_OP_ASYNC_EVENT: + /* + * It is possible that we could get a PDU with a buffer larger + * than 8K, but there are no targets that currently do this. + * For now we fail until we find a vendor that needs it + */ + if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < + tcp_conn->in.datalen) { + printk(KERN_ERR "iscsi_tcp: received buffer of len %u " + "but conn buffer is only %u (opcode %0x)\n", + tcp_conn->in.datalen, + DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); + rc = ISCSI_ERR_PROTO; + break; + } + if (tcp_conn->in.datalen) goto copy_hdr; /* fall through */ + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; @@ -523,7 +546,7 @@ copy_hdr: * skbs to complete the command then we have to copy the header * for later use */ - if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < + if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= (tcp_conn->in.datalen + tcp_conn->in.padding + (conn->datadgst_en ? 4 : 0))) { debug_tcp("Copying header for later use. in.copy %d in.datalen" @@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, * byte counters. **/ static inline int -iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) +iscsi_tcp_copy(struct iscsi_conn *conn) { - void *buf = tcp_conn->data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int buf_size = tcp_conn->in.datalen; int buf_left = buf_size - tcp_conn->data_copied; int size = min(tcp_conn->in.copy, buf_left); @@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) BUG_ON(size <= 0); rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, - (char*)buf + tcp_conn->data_copied, size); + (char*)conn->data + tcp_conn->data_copied, size); BUG_ON(rc); tcp_conn->in.offset += size; @@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) done: /* check for non-exceptional status */ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { - debug_scsi("done [sc %lx res %d itt 0x%x]\n", - (long)sc, sc->result, ctask->itt); + debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", + (long)sc, sc->result, ctask->itt, + tcp_conn->in.hdr->flags); spin_lock(&conn->session->lock); - __iscsi_ctask_cleanup(conn, ctask); + iscsi_tcp_cleanup_ctask(conn, ctask); __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); spin_unlock(&conn->session->lock); } @@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn) break; case ISCSI_OP_SCSI_CMD_RSP: spin_lock(&conn->session->lock); - __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); + iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); spin_unlock(&conn->session->lock); case ISCSI_OP_TEXT_RSP: case ISCSI_OP_LOGIN_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_ASYNC_EVENT: case ISCSI_OP_REJECT: /* * Collect data segment to the connection's data * placeholder */ - if (iscsi_tcp_copy(tcp_conn)) { + if (iscsi_tcp_copy(conn)) { rc = -EAGAIN; goto exit; } - rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, + rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, tcp_conn->in.datalen); if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) - iscsi_recv_digest_update(tcp_conn, tcp_conn->data, + iscsi_recv_digest_update(tcp_conn, conn->data, tcp_conn->in.datalen); break; default: @@ -843,7 +866,7 @@ more: if (rc == -EAGAIN) goto nomore; else { - iscsi_conn_failure(conn, rc); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } @@ -897,7 +920,7 @@ more: if (rc) { if (rc == -EAGAIN) goto again; - iscsi_conn_failure(conn, rc); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } tcp_conn->in.copy -= tcp_conn->in.padding; @@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn) } static void -iscsi_conn_restore_callbacks(struct iscsi_conn *conn) +iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) { - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct sock *sk = tcp_conn->sock->sk; /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ @@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) ctask->imm_count - ctask->unsol_count; - debug_scsi("cmd [itt %x total %d imm %d imm_data %d " + debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " "r2t_data %d]\n", ctask->itt, ctask->total_length, ctask->imm_count, ctask->unsol_count, tcp_ctask->r2t_data_count); @@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } solicit_again: /* - * send Data-Out whitnin this R2T sequence. + * send Data-Out within this R2T sequence. */ if (!r2t->data_count) goto data_out_done; @@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_data_task *dtask = tcp_ctask->dtask; - int sent, rc; + int sent = 0, rc; tcp_ctask->xmstate &= ~XMSTATE_W_PAD; iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, @@ -1900,26 +1922,31 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; /* initial operational parameters */ tcp_conn->hdr_size = sizeof(struct iscsi_hdr); - tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; - - /* allocate initial PDU receive place holder */ - if (tcp_conn->data_size <= PAGE_SIZE) - tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); - else - tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, - get_order(tcp_conn->data_size)); - if (!tcp_conn->data) - goto max_recv_dlenght_alloc_fail; return cls_conn; -max_recv_dlenght_alloc_fail: - kfree(tcp_conn); tcp_conn_alloc_fail: iscsi_conn_teardown(cls_conn); return NULL; } +static void +iscsi_tcp_release_conn(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + + if (!tcp_conn->sock) + return; + + sock_hold(tcp_conn->sock->sk); + iscsi_conn_restore_callbacks(tcp_conn); + sock_put(tcp_conn->sock->sk); + + sock_release(tcp_conn->sock); + tcp_conn->sock = NULL; + conn->recv_lock = NULL; +} + static void iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) { @@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) if (conn->hdrdgst_en || conn->datadgst_en) digest = 1; + iscsi_tcp_release_conn(conn); iscsi_conn_teardown(cls_conn); /* now free tcp_conn */ @@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) crypto_free_tfm(tcp_conn->data_rx_tfm); } - /* free conn->data, size = MaxRecvDataSegmentLength */ - if (tcp_conn->data_size <= PAGE_SIZE) - kfree(tcp_conn->data); - else - free_pages((unsigned long)tcp_conn->data, - get_order(tcp_conn->data_size)); kfree(tcp_conn); } +static void +iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + iscsi_conn_stop(cls_conn, flag); + iscsi_tcp_release_conn(conn); +} + static int iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, @@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, return 0; } -static void -iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) -{ - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_r2t_info *r2t; - - /* flush ctask's r2t queues */ - while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, - sizeof(void*)); - - __iscsi_ctask_cleanup(conn, ctask); -} - -static void -iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct sock *sk; - - if (!tcp_conn->sock) - return; - - sk = tcp_conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - write_unlock_bh(&sk->sk_callback_lock); -} - -static void -iscsi_tcp_terminate_conn(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - - if (!tcp_conn->sock) - return; - - sock_hold(tcp_conn->sock->sk); - iscsi_conn_restore_callbacks(conn); - sock_put(tcp_conn->sock->sk); - - sock_release(tcp_conn->sock); - tcp_conn->sock = NULL; - conn->recv_lock = NULL; -} - /* called with host lock */ static void iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, @@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); tcp_mtask->xmstate = XMSTATE_IMM_HDR; + tcp_mtask->sent = 0; if (mtask->data_count) iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, @@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, int value; switch(param) { - case ISCSI_PARAM_MAX_RECV_DLENGTH: { - char *saveptr = tcp_conn->data; - gfp_t flags = GFP_KERNEL; - - sscanf(buf, "%d", &value); - if (tcp_conn->data_size >= value) { - iscsi_set_param(cls_conn, param, buf, buflen); - break; - } - - spin_lock_bh(&session->lock); - if (conn->stop_stage == STOP_CONN_RECOVER) - flags = GFP_ATOMIC; - spin_unlock_bh(&session->lock); - - if (value <= PAGE_SIZE) - tcp_conn->data = kmalloc(value, flags); - else - tcp_conn->data = (void*)__get_free_pages(flags, - get_order(value)); - if (tcp_conn->data == NULL) { - tcp_conn->data = saveptr; - return -ENOMEM; - } - if (tcp_conn->data_size <= PAGE_SIZE) - kfree(saveptr); - else - free_pages((unsigned long)saveptr, - get_order(tcp_conn->data_size)); - iscsi_set_param(cls_conn, param, buf, buflen); - tcp_conn->data_size = value; - break; - } case ISCSI_PARAM_HDRDGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); tcp_conn->hdr_size = sizeof(struct iscsi_hdr); @@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) } static struct scsi_host_template iscsi_sht = { - .name = "iSCSI Initiator over TCP/IP, v" - ISCSI_TCP_VERSION, + .name = "iSCSI Initiator over TCP/IP", .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .can_queue = ISCSI_XMIT_CMDS_MAX - 1, @@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = { .get_conn_param = iscsi_tcp_conn_get_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_conn_start, - .stop_conn = iscsi_conn_stop, - /* these are called as part of conn recovery */ - .suspend_conn_recv = iscsi_tcp_suspend_conn_rx, - .terminate_conn = iscsi_tcp_terminate_conn, + .stop_conn = iscsi_tcp_conn_stop, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_conn_get_stats, diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 808302832e68..6a4ee704e46e 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -78,8 +78,6 @@ struct iscsi_tcp_conn { char hdrext[4*sizeof(__u16) + sizeof(__u32)]; int data_copied; - char *data; /* data placeholder */ - int data_size; /* actual recv_dlength */ int stop_stage; /* conn_stop() flag: * * stop to recover, * * stop to terminate */ diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 386e5f21e191..73dd6c8deede 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -2746,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) return rc; - scontrol = (scontrol & 0x0f0) | 0x302; + scontrol = (scontrol & 0x0f0) | 0x304; if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) return rc; @@ -5185,28 +5185,6 @@ void ata_host_stop (struct ata_host_set *host_set) iounmap(host_set->mmio_base); } - -/** - * ata_host_remove - Unregister SCSI host structure with upper layers - * @ap: Port to unregister - * @do_unregister: 1 if we fully unregister, 0 to just stop the port - * - * LOCKING: - * Inherited from caller. - */ - -static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) -{ - struct Scsi_Host *sh = ap->host; - - DPRINTK("ENTER\n"); - - if (do_unregister) - scsi_remove_host(sh); - - ap->ops->port_stop(ap); -} - /** * ata_dev_init - Initialize an ata_device structure * @dev: Device structure to initialize @@ -5532,8 +5510,11 @@ int ata_device_add(const struct ata_probe_ent *ent) err_out: for (i = 0; i < count; i++) { - ata_host_remove(host_set->ports[i], 1); - scsi_host_put(host_set->ports[i]->host); + struct ata_port *ap = host_set->ports[i]; + if (ap) { + ap->ops->port_stop(ap); + scsi_host_put(ap->host); + } } err_free_ret: kfree(host_set); @@ -5558,7 +5539,7 @@ void ata_port_detach(struct ata_port *ap) int i; if (!ap->ops->error_handler) - return; + goto skip_eh; /* tell EH we're leaving & flush EH */ spin_lock_irqsave(ap->lock, flags); @@ -5594,6 +5575,7 @@ void ata_port_detach(struct ata_port *ap) cancel_delayed_work(&ap->hotplug_task); flush_workqueue(ata_aux_wq); + skip_eh: /* remove the associated SCSI host */ scsi_remove_host(ap->host); } @@ -5662,7 +5644,7 @@ int ata_scsi_release(struct Scsi_Host *host) DPRINTK("ENTER\n"); ap->ops->port_disable(ap); - ata_host_remove(ap, 0); + ap->ops->port_stop(ap); DPRINTK("EXIT\n"); return 1; diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index b3095fd92863..2c34af99627d 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c @@ -763,12 +763,27 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, unsigned int action) { unsigned long flags; + struct ata_eh_info *ehi = &ap->eh_info; + struct ata_eh_context *ehc = &ap->eh_context; spin_lock_irqsave(ap->lock, flags); - ata_eh_clear_action(dev, &ap->eh_info, action); + /* Reset is represented by combination of actions and EHI + * flags. Suck in all related bits before clearing eh_info to + * avoid losing requested action. + */ + if (action & ATA_EH_RESET_MASK) { + ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; + ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; - if (!(ap->eh_context.i.flags & ATA_EHI_QUIET)) + /* make sure all reset actions are cleared & clear EHI flags */ + action |= ATA_EH_RESET_MASK; + ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; + } + + ata_eh_clear_action(dev, ehi, action); + + if (!(ehc->i.flags & ATA_EHI_QUIET)) ap->pflags |= ATA_PFLAG_RECOVERED; spin_unlock_irqrestore(ap->lock, flags); @@ -789,6 +804,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, unsigned int action) { + /* if reset is complete, clear all reset actions & reset modifier */ + if (action & ATA_EH_RESET_MASK) { + action |= ATA_EH_RESET_MASK; + ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; + } + ata_eh_clear_action(dev, &ap->eh_context.i, action); } @@ -1275,8 +1296,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io, static void ata_eh_autopsy(struct ata_port *ap) { struct ata_eh_context *ehc = &ap->eh_context; - unsigned int action = ehc->i.action; - struct ata_device *failed_dev = NULL; unsigned int all_err_mask = 0; int tag, is_io = 0; u32 serror; @@ -1293,7 +1312,7 @@ static void ata_eh_autopsy(struct ata_port *ap) ehc->i.serror |= serror; ata_eh_analyze_serror(ap); } else if (rc != -EOPNOTSUPP) - action |= ATA_EH_HARDRESET; + ehc->i.action |= ATA_EH_HARDRESET; /* analyze NCQ failure */ ata_eh_analyze_ncq_error(ap); @@ -1314,7 +1333,7 @@ static void ata_eh_autopsy(struct ata_port *ap) qc->err_mask |= ehc->i.err_mask; /* analyze TF */ - action |= ata_eh_analyze_tf(qc, &qc->result_tf); + ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); /* DEV errors are probably spurious in case of ATA_BUS error */ if (qc->err_mask & AC_ERR_ATA_BUS) @@ -1328,11 +1347,11 @@ static void ata_eh_autopsy(struct ata_port *ap) /* SENSE_VALID trumps dev/unknown error and revalidation */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) { qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - action &= ~ATA_EH_REVALIDATE; + ehc->i.action &= ~ATA_EH_REVALIDATE; } /* accumulate error info */ - failed_dev = qc->dev; + ehc->i.dev = qc->dev; all_err_mask |= qc->err_mask; if (qc->flags & ATA_QCFLAG_IO) is_io = 1; @@ -1341,25 +1360,22 @@ static void ata_eh_autopsy(struct ata_port *ap) /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) - action |= ATA_EH_SOFTRESET; + ehc->i.action |= ATA_EH_SOFTRESET; else if (all_err_mask) - action |= ATA_EH_REVALIDATE; + ehc->i.action |= ATA_EH_REVALIDATE; /* if we have offending qcs and the associated failed device */ - if (failed_dev) { + if (ehc->i.dev) { /* speed down */ - action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); + ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io, + all_err_mask); /* perform per-dev EH action only on the offending device */ - ehc->i.dev_action[failed_dev->devno] |= - action & ATA_EH_PERDEV_MASK; - action &= ~ATA_EH_PERDEV_MASK; + ehc->i.dev_action[ehc->i.dev->devno] |= + ehc->i.action & ATA_EH_PERDEV_MASK; + ehc->i.action &= ~ATA_EH_PERDEV_MASK; } - /* record autopsy result */ - ehc->i.dev = failed_dev; - ehc->i.action |= action; - DPRINTK("EXIT\n"); } @@ -1482,6 +1498,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ata_reset_fn_t reset; int i, did_followup_srst, rc; + /* about to reset */ + ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); + /* Determine which reset to use and record in ehc->i.action. * prereset() may examine and modify it. */ @@ -1530,8 +1549,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, ata_port_printk(ap, KERN_INFO, "%s resetting port\n", reset == softreset ? "soft" : "hard"); - /* reset */ - ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); + /* mark that this EH session started with reset */ ehc->i.flags |= ATA_EHI_DID_RESET; rc = ata_do_reset(ap, reset, classes); @@ -1594,7 +1612,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify, postreset(ap, classes); /* reset successful, schedule revalidation */ - ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); + ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); ehc->i.action |= ATA_EH_REVALIDATE; } @@ -1847,15 +1865,16 @@ static int ata_eh_skip_recovery(struct ata_port *ap) for (i = 0; i < ata_port_max_devices(ap); i++) { struct ata_device *dev = &ap->device[i]; - if (ata_dev_absent(dev) || ata_dev_ready(dev)) + if (!(dev->flags & ATA_DFLAG_SUSPENDED)) break; } if (i == ata_port_max_devices(ap)) return 1; - /* always thaw frozen port and recover failed devices */ - if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap)) + /* thaw frozen port, resume link and recover failed devices */ + if ((ap->pflags & ATA_PFLAG_FROZEN) || + (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) return 0; /* skip if class codes for all vacant slots are ATA_DEV_NONE */ diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 7ced41ecde86..e92c31d698ff 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -2353,6 +2353,19 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) ata_gen_ata_desc_sense(qc); } + /* SCSI EH automatically locks door if sdev->locked is + * set. Sometimes door lock request continues to + * fail, for example, when no media is present. This + * creates a loop - SCSI EH issues door lock which + * fails and gets invoked again to acquire sense data + * for the failed command. + * + * If door lock fails, always clear sdev->locked to + * avoid this infinite loop. + */ + if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) + qc->dev->sdev->locked = 0; + qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; qc->scsidone(cmd); ata_qc_free(qc); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7e6e031cc41b..5884cd26d53a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session, { struct scsi_cmnd *sc = ctask->sc; + ctask->state = ISCSI_TASK_COMPLETED; ctask->sc = NULL; list_del_init(&ctask->running); __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); @@ -275,6 +276,25 @@ out: return rc; } +static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) +{ + struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; + + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + conn->tmfrsp_pdus_cnt++; + + if (conn->tmabort_state != TMABORT_INITIAL) + return; + + if (tmf->response == ISCSI_TMF_RSP_COMPLETE) + conn->tmabort_state = TMABORT_SUCCESS; + else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) + conn->tmabort_state = TMABORT_NOT_FOUND; + else + conn->tmabort_state = TMABORT_FAILED; + wake_up(&conn->ehwait); +} + /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn @@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, switch(opcode) { case ISCSI_OP_LOGOUT_RSP: + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; /* fall through */ case ISCSI_OP_LOGIN_RSP: @@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * login related PDU's exp_statsn is handled in * userspace */ - rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, break; } - conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - conn->tmfrsp_pdus_cnt++; - if (conn->tmabort_state == TMABORT_INITIAL) { - conn->tmabort_state = - ((struct iscsi_tm_rsp *)hdr)-> - response == ISCSI_TMF_RSP_COMPLETE ? - TMABORT_SUCCESS:TMABORT_FAILED; - /* unblock eh_abort() */ - wake_up(&conn->ehwait); - } + iscsi_tmf_rsp(conn, hdr); break; case ISCSI_OP_NOOP_IN: - if (hdr->ttt != ISCSI_RESERVED_TAG) { + if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else if (itt == ISCSI_RESERVED_TAG) { switch(opcode) { case ISCSI_OP_NOOP_IN: - if (!datalen) { - rc = iscsi_check_assign_cmdsn(session, - (struct iscsi_nopin*)hdr); - if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) - rc = iscsi_recv_pdu(conn->cls_conn, - hdr, NULL, 0); - } else + if (datalen) { rc = ISCSI_ERR_PROTO; + break; + } + + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + break; + + if (hdr->ttt == ISCSI_RESERVED_TAG) + break; + + if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) + rc = ISCSI_ERR_CONN_FAILED; break; case ISCSI_OP_REJECT: /* we need sth like iscsi_reject_rsp()*/ @@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } /* process command queue */ - while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, - sizeof(void*))) { + spin_lock_bh(&conn->session->lock); + while (!list_empty(&conn->xmitqueue)) { /* * iscsi tcp may readd the task to the xmitqueue to send * write data */ - spin_lock_bh(&conn->session->lock); - if (list_empty(&conn->ctask->running)) - list_add_tail(&conn->ctask->running, &conn->run_list); + conn->ctask = list_entry(conn->xmitqueue.next, + struct iscsi_cmd_task, running); + conn->ctask->state = ISCSI_TASK_RUNNING; + list_move_tail(conn->xmitqueue.next, &conn->run_list); spin_unlock_bh(&conn->session->lock); + rc = tt->xmit_cmd_task(conn, conn->ctask); if (rc) goto again; + spin_lock_bh(&conn->session->lock); } + spin_unlock_bh(&conn->session->lock); /* done with this ctask */ conn->ctask = NULL; @@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.phase = session->age; sc->SCp.ptr = (char *)ctask; + ctask->state = ISCSI_TASK_PENDING; ctask->mtask = NULL; ctask->conn = conn; ctask->sc = sc; @@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) session->tt->init_cmd_task(ctask); - __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + list_add_tail(&ctask->running, &conn->xmitqueue); debug_scsi( "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", @@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, /* * xmit mutex and session lock must be held */ -#define iscsi_remove_task(tasktype) \ -static struct iscsi_##tasktype * \ -iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ -{ \ - int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ - struct iscsi_##tasktype *task; \ - \ - debug_scsi("searching %d tasks\n", nr_tasks); \ - \ - for (i = 0; i < nr_tasks; i++) { \ - __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ - debug_scsi("check task %u\n", task->itt); \ - \ - if (task->itt == itt) { \ - debug_scsi("matched task\n"); \ - return task; \ - } \ - \ - __kfifo_put(fifo, (void*)&task, sizeof(void*)); \ - } \ - return NULL; \ -} +static struct iscsi_mgmt_task * +iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) +{ + int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); + struct iscsi_mgmt_task *task; -iscsi_remove_task(mgmt_task); -iscsi_remove_task(cmd_task); + debug_scsi("searching %d tasks\n", nr_tasks); + + for (i = 0; i < nr_tasks; i++) { + __kfifo_get(fifo, (void*)&task, sizeof(void*)); + debug_scsi("check task %u\n", task->itt); + + if (task->itt == itt) { + debug_scsi("matched task\n"); + return task; + } + + __kfifo_put(fifo, (void*)&task, sizeof(void*)); + } + return NULL; +} static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) { @@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, { struct scsi_cmnd *sc; - conn->session->tt->cleanup_cmd_task(conn, ctask); - iscsi_ctask_mtask_cleanup(ctask); - sc = ctask->sc; if (!sc) return; + + conn->session->tt->cleanup_cmd_task(conn, ctask); + iscsi_ctask_mtask_cleanup(ctask); + sc->result = err; sc->resid = sc->request_bufflen; iscsi_complete_command(conn->session, ctask); @@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; struct iscsi_conn *conn = ctask->conn; struct iscsi_session *session = conn->session; - struct iscsi_cmd_task *pending_ctask; int rc; conn->eh_abort_cnt++; @@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; /* ctask completed before time out */ - if (!ctask->sc) - goto success; + if (!ctask->sc) { + spin_unlock_bh(&session->lock); + debug_scsi("sc completed while abort in progress\n"); + goto success_rel_mutex; + } /* what should we do here ? */ if (conn->ctask == ctask) { @@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; } - /* check for the easy pending cmd abort */ - pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); - if (pending_ctask) { - /* iscsi_tcp queues write transfers on the xmitqueue */ - if (list_empty(&pending_ctask->running)) { - debug_scsi("found pending task\n"); - goto success; - } else - __kfifo_put(conn->xmitqueue, (void*)&pending_ctask, - sizeof(void*)); - } + if (ctask->state == ISCSI_TASK_PENDING) + goto success_cleanup; conn->tmabort_state = TMABORT_INITIAL; @@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) rc = iscsi_exec_abort_task(sc, ctask); spin_lock_bh(&session->lock); - iscsi_ctask_mtask_cleanup(ctask); if (rc || sc->SCp.phase != session->age || session->state != ISCSI_STATE_LOGGED_IN) goto failed; + iscsi_ctask_mtask_cleanup(ctask); - /* ctask completed before tmf abort response */ - if (!ctask->sc) { - debug_scsi("sc completed while abort in progress\n"); - goto success; - } - - if (conn->tmabort_state != TMABORT_SUCCESS) { + switch (conn->tmabort_state) { + case TMABORT_SUCCESS: + goto success_cleanup; + case TMABORT_NOT_FOUND: + if (!ctask->sc) { + /* ctask completed before tmf abort response */ + spin_unlock_bh(&session->lock); + debug_scsi("sc completed while abort in progress\n"); + goto success_rel_mutex; + } + /* fall through */ + default: + /* timedout or failed */ spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->lock); goto failed; } -success: +success_cleanup: debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); spin_unlock_bh(&session->lock); @@ -1121,6 +1146,7 @@ success: spin_unlock(&session->lock); write_unlock_bh(conn->recv_lock); +success_rel_mutex: mutex_unlock(&conn->xmitmutex); return SUCCESS; @@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (cmd_task_size) ctask->dd_data = &ctask[1]; ctask->itt = cmd_i; + INIT_LIST_HEAD(&ctask->running); } spin_lock_init(&session->lock); @@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (mgmt_task_size) mtask->dd_data = &mtask[1]; mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; + INIT_LIST_HEAD(&mtask->running); } if (scsi_add_host(shost, NULL)) @@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct module *owner = cls_session->transport->owner; scsi_remove_host(shost); iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); iscsi_pool_free(&session->cmdpool, (void**)session->cmds); + kfree(session->targetname); + iscsi_destroy_session(cls_session); scsi_host_put(shost); - module_put(cls_session->transport->owner); + module_put(owner); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); @@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) conn->tmabort_state = TMABORT_INITIAL; INIT_LIST_HEAD(&conn->run_list); INIT_LIST_HEAD(&conn->mgmt_run_list); - - /* initialize general xmit PDU commands queue */ - conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), - GFP_KERNEL, NULL); - if (conn->xmitqueue == ERR_PTR(-ENOMEM)) - goto xmitqueue_alloc_fail; + INIT_LIST_HEAD(&conn->xmitqueue); /* initialize general immediate & non-immediate PDU commands queue */ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), @@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); if (!data) goto login_mtask_data_alloc_fail; - conn->login_mtask->data = data; + conn->login_mtask->data = conn->data = data; init_timer(&conn->tmabort_timer); mutex_init(&conn->xmitmutex); @@ -1410,8 +1436,6 @@ login_mtask_alloc_fail: mgmtqueue_alloc_fail: kfifo_free(conn->immqueue); immqueue_alloc_fail: - kfifo_free(conn->xmitqueue); -xmitqueue_alloc_fail: iscsi_destroy_conn(cls_conn); return NULL; } @@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); mutex_lock(&conn->xmitmutex); - if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) { - if (session->tt->suspend_conn_recv) - session->tt->suspend_conn_recv(conn); - - session->tt->terminate_conn(conn); - } spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; @@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) } spin_lock_bh(&session->lock); - kfree(conn->login_mtask->data); + kfree(conn->data); + kfree(conn->persistent_address); __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, sizeof(void*)); list_del(&conn->item); @@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; spin_unlock_bh(&session->lock); - kfifo_free(conn->xmitqueue); kfifo_free(conn->immqueue); kfifo_free(conn->mgmtqueue); @@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn) struct iscsi_cmd_task *ctask, *tmp; /* flush pending */ - while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { + list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, ctask->itt); fail_command(conn, ctask, DID_BUS_BUSY << 16); @@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); spin_unlock_bh(&session->lock); - if (session->tt->suspend_conn_recv) - session->tt->suspend_conn_recv(conn); + write_lock_bh(conn->recv_lock); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(conn->recv_lock); mutex_lock(&conn->xmitmutex); /* @@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, } } - session->tt->terminate_conn(conn); /* * flush queues. */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5c68cdd8736f..d384c16f4a87 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host) pmboxq->mb.mbxCommand = MBX_DOWN_LINK; pmboxq->mb.mbxOwner = OWN_HOST; - mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); @@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) phba->sysfs_mbox.mbox == NULL ) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } } @@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - phba->fc_ratov * 2); + lpfc_mbox_tmo_val(phba, + phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); spin_lock_irq(phba->host->host_lock); } if (rc != MBX_SUCCESS) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -ENODEV; + return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; } @@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); @@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost) struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; + unsigned long seconds; int rc = 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; + hs->link_failure_count -= lso->link_failure_count; + hs->loss_of_sync_count -= lso->loss_of_sync_count; + hs->loss_of_signal_count -= lso->loss_of_signal_count; + hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; + hs->invalid_tx_word_count -= lso->invalid_tx_word_count; + hs->invalid_crc_count -= lso->invalid_crc_count; + hs->error_frames -= lso->error_frames; + if (phba->fc_topology == TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); + hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); + hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; -/* FIX ME */ - /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ + seconds = get_seconds(); + if (seconds < psli->stats_start) + hs->seconds_since_last_reset = seconds + + ((unsigned long)-1 - psli->stats_start); + else + hs->seconds_since_last_reset = seconds - psli->stats_start; return hs; } +static void +lpfc_reset_stats(struct Scsi_Host *shost) +{ + struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return; + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + pmb = &pmboxq->mb; + pmb->mbxCommand = MBX_READ_STATUS; + pmb->mbxOwner = OWN_HOST; + pmb->un.varWords[0] = 0x1; /* reset request */ + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmb->mbxCommand = MBX_READ_LNK_STAT; + pmb->mbxOwner = OWN_HOST; + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free( pmboxq, phba->mbox_mem_pool); + return; + } + + lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; + lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; + lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; + lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; + lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; + lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; + lso->error_frames = pmb->un.varRdLnk.crcCnt; + lso->link_events = (phba->fc_eventTag >> 1); + + psli->stats_start = get_seconds(); + + return; +} /* * The LPFC driver treats linkdown handling as target loss events so there @@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = { */ .get_fc_host_stats = lpfc_get_stats, - - /* the LPFC driver doesn't support resetting stats yet */ + .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 517e9e4dd461..2a176467f71b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +int lpfc_mbox_tmo_val(struct lpfc_hba *, int); int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index b65ee57af53e..bbb7310210b0 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, } ct_unsol_event_exit_piocbq: + list_del(&head); if (pmbuf) { list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { lpfc_mbuf_free(phba, matp->virt, matp->phys); @@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0239 NameServer Rsp " + "%d:0208 NameServer Rsp " "Data: x%x\n", phba->brd_no, phba->fc_flag); @@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) lpfc_decode_firmware_rev(phba, fwrev, 0); - if (phba->Port[0]) { - sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, - phba->Port, fwrev, lpfc_release_version); - } else { - sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, - fwrev, lpfc_release_version); - } + sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, + fwrev, lpfc_release_version); + return; } /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b89f6cb641e6..3567de613162 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1848,9 +1848,12 @@ static void lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) { + IOCB_t *irsp; struct lpfc_nodelist *ndlp; LPFC_MBOXQ_t *mbox = NULL; + irsp = &rspiocb->iocb; + ndlp = (struct lpfc_nodelist *) cmdiocb->context1; if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, mempool_free( mbox, phba->mbox_mem_pool); } else { mempool_free( mbox, phba->mbox_mem_pool); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - ndlp = NULL; + /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ + if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || + (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || + (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { + if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + ndlp = NULL; + } } } } @@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* Xmit ELS RPS ACC response tag */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPS ACC response tag x%x " + "%d:0118 Xmit ELS RPS ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPL ACC response tag x%x " + "%d:0120 Xmit ELS RPL ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist *ndlp, *next_ndlp; /* FAN received */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", phba->brd_no); icmd = &cmdiocb->iocb; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 4d6cf990c4fc..b2f1552f1848 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } + + spin_lock_irq(phba->host->host_lock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { @@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mempool_free(mb, phba->mbox_mem_pool); } } + spin_unlock_irq(phba->host->host_lock); lpfc_els_abort(phba,ndlp,0); spin_lock_irq(phba->host->host_lock); @@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to REGLOGIN */ /* FIND node DID reglogin */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID reglogin" + "%d:0901 FIND node DID reglogin" " Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to PRLI */ /* FIND node DID prli */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID prli " + "%d:0902 FIND node DID prli " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to NPR */ /* FIND node DID npr */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID npr " + "%d:0903 FIND node DID npr " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to UNUSED */ /* FIND node DID unused */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID unused " + "%d:0905 FIND node DID unused " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0206 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; @@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) if (!clearlambox) { clrlaerr = 1; lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0207 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ef47b824cbed..f6948ffe689a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba) /* stop all timers associated with this hba */ lpfc_stop_timer(phba); phba->work_hba_events = 0; + phba->work_ha = 0; lpfc_printf_log(phba, KERN_WARNING, @@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocbq; } - /* We can rely on a queue depth attribute only after SLI HBA setup */ + /* + * Set initial can_queue value since 0 is no longer supported and + * scsi_add_host will fail. This will be adjusted later based on the + * max xri value determined in hba setup. + */ host->can_queue = phba->cfg_hba_queue_depth - 10; /* Tell the midlayer we support 16 byte commands */ @@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_irq; } + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ + host->can_queue = phba->cfg_hba_queue_depth - 10; + lpfc_discovery_wait(phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71b..4d016c2a1b26 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } + +int +lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) +{ + switch (cmd) { + case MBX_WRITE_NV: /* 0x03 */ + case MBX_UPDATE_CFG: /* 0x1B */ + case MBX_DOWN_LOAD: /* 0x1C */ + case MBX_DEL_LD_ENTRY: /* 0x1D */ + case MBX_LOAD_AREA: /* 0x81 */ + case MBX_FLASH_WR_ULA: /* 0x98 */ + case MBX_LOAD_EXP_ROM: /* 0x9C */ + return LPFC_MBOX_TMO_FLASH_CMD; + } + return LPFC_MBOX_TMO; +} diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bd0b0e293d63..20449a8dd53d 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* Abort outstanding I/O on NPort */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0201 Abort outstanding I/O on NPort x%x " + "%d:0205 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, mbox->context2 = ndlp; ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); + /* + * If there is an outstanding PLOGI issued, abort it before + * sending ACC rsp for received PLOGI. If pending plogi + * is not canceled here, the plogi will be rejected by + * remote port and will be retried. On a configuration with + * single discovery thread, this will cause a huge delay in + * discovery. Also this will cause multiple state machines + * running in parallel for this node. + */ + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); + } + lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); return 1; @@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, lpfc_rcv_padisc(phba, ndlp, cmdiocb); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + /* + * Do not start discovery if discovery is about to start + * or discovery in progress for this node. Starting discovery + * here will affect the counting of discovery threads. + */ + if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && + (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ if (ndlp->nlp_flag & NLP_NPR_ADISC) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; ndlp->nlp_state = NLP_STE_ADISC_ISSUE; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a760a44173df..a8816a8738f8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; } +static void +lpfc_block_error_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + spin_lock_irq(shost->host_lock); + while (rport->port_state == FC_PORTSTATE_BLOCKED) { + spin_unlock_irq(shost->host_lock); + msleep(1000); + spin_lock_irq(shost->host_lock); + } + spin_unlock_irq(shost->host_lock); + return; +} static int lpfc_abort_handler(struct scsi_cmnd *cmnd) @@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) unsigned int loop_count = 0; int ret = SUCCESS; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; @@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) int ret = FAILED; int cnt, loopcnt; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); /* * If target is not in a MAPPED state, delay the reset until @@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int cnt, loopcnt; struct lpfc_scsi_buf * lpfc_cmd; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = lpfc_get_scsi_buf(phba); @@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0713 Bus Reset on target %d failed\n", + "%d:0700 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 350a625fa224..70f4d5a1348e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) kfree(old_arr); return iotag; } - } + } else + spin_unlock_irq(phba->host->host_lock); lpfc_printf_log(phba, KERN_ERR,LOG_SLI, "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", @@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed." - " Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0314 IOCB cmd 0x%x" + " processed. Skipping" + " completion", phba->brd_no, + irsp->ulpCommand); break; } @@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, if (unlikely(irsp->ulpStatus)) { /* Rsp ring error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "%d:0326 Rsp Ring %d error: IOCB Data: " + "%d:0336 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], @@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " - "Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0333 IOCB cmd 0x%x" + " processed. Skipping" + " completion\n", phba->brd_no, + irsp->ulpCommand); break; } @@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0334 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", phba->brd_no, type, irsp->ulpCommand, irsp->ulpStatus, irsp->ulpIoTag, @@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0312 Ring %d handler: portRspPut %d " + "%d:0303 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", phba->brd_no, pring->ringno, portRspPut, portRspMax); @@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0335 Unknown IOCB command " "Data: x%x x%x x%x x%x\n", phba->brd_no, irsp->ulpCommand, @@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, next_iocb, &saveq->list, list) { + list_del(&rspiocbp->list); lpfc_sli_release_iocbq(phba, rspiocbp); } } - lpfc_sli_release_iocbq(phba, saveq); } } @@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) phba->fc_myDID = 0; phba->fc_prevDID = 0; - psli->sli_flag = 0; - /* Turn off parity checking and serr during the physical reset */ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, + "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, phba->hba_state, psli->sli_flag); word0 = 0; @@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + if (skip_post) mdelay(100); else @@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) } while (resetcount < 2 && !done) { + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); phba->hba_state = LPFC_STATE_UNKNOWN; lpfc_sli_brdrestart(phba); msleep(2500); @@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) if (rc) break; + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); resetcount++; /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 @@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) return (MBX_NOT_FINISHED); } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); } /* Mailbox cmd issue */ @@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) break; case MBX_POLL: - i = 0; psli->mbox_active = NULL; if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ @@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Read the HBA Host Attention Register */ ha_copy = readl(phba->HAregaddr); + i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); + i *= 1000; /* Convert to ms */ + /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->hba_state > LPFC_WARM_START))) { - if (i++ >= 100) { + if (i-- <= 0) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); @@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Can be in interrupt context, do not sleep */ /* (or might be called with interrupts disabled) */ - mdelay(i); + mdelay(1); spin_lock_irqsave(phba->host->host_lock, drvr_flag); @@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0329 IOCB wait timeout error - no " + "%d:0338 IOCB wait timeout error - no " "wake response Data x%x\n", phba->brd_no, timeout); retval = IOCB_TIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index d8ef0d2894d4..e26de6809358 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -172,6 +172,18 @@ struct lpfc_sli_stat { uint32_t mbox_busy; /* Mailbox cmd busy */ }; +/* Structure to store link status values when port stats are reset */ +struct lpfc_lnk_stat { + uint32_t link_failure_count; + uint32_t loss_of_sync_count; + uint32_t loss_of_signal_count; + uint32_t prim_seq_protocol_err_count; + uint32_t invalid_tx_word_count; + uint32_t invalid_crc_count; + uint32_t error_frames; + uint32_t link_events; +}; + /* Structure used to hold SLI information */ struct lpfc_sli { uint32_t num_rings; @@ -201,6 +213,8 @@ struct lpfc_sli { struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ size_t iocbq_lookup_len; /* current lengs of the array */ uint16_t last_iotag; /* last allocated IOTAG */ + unsigned long stats_start; /* in seconds */ + struct lpfc_lnk_stat lnk_stat_offsets; }; /* Given a pointer to the start of the ring, and the slot number of @@ -211,3 +225,9 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write + * or erase cmds. This is especially + * long because of the potential of + * multiple flash erases that can be + * spawned. + */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 10e89c6ae823..c7091ea29f3f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.7" +#define LPFC_DRIVER_VERSION "8.1.9" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 4675343228ad..8cd0bd1d0f7c 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h @@ -37,6 +37,12 @@ #define LSI_MAX_CHANNELS 16 #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) +#define HBA_SIGNATURE_64_BIT 0x299 +#define PCI_CONF_AMISIG64 0xa4 + +#define MEGA_SCSI_INQ_EVPD 1 +#define MEGA_INVALID_FIELD_IN_CDB 0x24 + /** * scb_t - scsi command control block diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index bdaee144a1c3..b8aa34202ec3 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -132,6 +132,10 @@ typedef struct uioc { /* Driver Data: */ void __user * user_data; uint32_t user_data_len; + + /* 64bit alignment */ + uint32_t pad_for_64bit_align; + mraid_passthru_t __user *user_pthru; mraid_passthru_t *pthru32; diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 7ae580f17e64..266b3910846b 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.8 (Apr 11 2006) + * Version : v2.20.4.9 (Jul 16 2006) * * Authors: * Atul Mukker @@ -736,6 +736,7 @@ megaraid_init_mbox(adapter_t *adapter) struct pci_dev *pdev; mraid_device_t *raid_dev; int i; + uint32_t magic64; adapter->ito = MBOX_TIMEOUT; @@ -879,12 +880,33 @@ megaraid_init_mbox(adapter_t *adapter) // Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range - if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { + pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); - con_log(CL_ANN, (KERN_WARNING - "megaraid: could not set DMA mask for 64-bit.\n")); + if (((magic64 == HBA_SIGNATURE_64_BIT) && + ((adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_6) || + (adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_VERDE) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { + if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: DMA mask for 64-bit failed\n")); - goto out_free_sysfs_res; + if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: 32-bit DMA mask failed\n")); + goto out_free_sysfs_res; + } + } } // setup tasklet for DPC @@ -1638,6 +1660,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) rdev->last_disp |= (1L << SCP2CHANNEL(scp)); } + if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { + scp->sense_buffer[0] = 0x70; + scp->sense_buffer[2] = ILLEGAL_REQUEST; + scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; + scp->result = CHECK_CONDITION << 1; + return NULL; + } + /* Fall through */ case READ_CAPACITY: diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 868fb0ec93e7..2b5a3285f799 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.8" -#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" +#define MEGARAID_VERSION "2.20.4.9" +#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" /* diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index e8f534fb336b..d85b9a8f1b8d 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mm.c - * Version : v2.20.2.6 (Mar 7 2005) + * Version : v2.20.2.7 (Jul 16 2006) * * Common management module */ diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index 3d9e67d6849d..c8762b2b8ed1 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -27,9 +27,9 @@ #include "megaraid_ioctl.h" -#define LSI_COMMON_MOD_VERSION "2.20.2.6" +#define LSI_COMMON_MOD_VERSION "2.20.2.7" #define LSI_COMMON_MOD_EXT_VERSION \ - "(Release Date: Mon Mar 7 00:01:03 EST 2005)" + "(Release Date: Sun Jul 16 00:01:03 EST 2006)" #define LSI_DBGLVL dbglevel diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index d1f38c32aa15..efc8fff1d250 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c @@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = { { .sht = &adma_ata_sht, .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | - ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, + ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | + ATA_FLAG_PIO_POLLING, .pio_mask = 0x10, /* pio4 */ .udma_mask = 0x1f, /* udma0-4 */ .port_ops = &adma_ata_ops, diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 139ea0e27fd7..0930260aec2c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -487,6 +487,7 @@ typedef struct { #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ /* used. */ +#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9758dba95542..859649160caa 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha) int qla2x00_abort_isp(scsi_qla_host_t *ha) { + int rval; unsigned long flags = 0; uint16_t cnt; srb_t *sp; @@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); + + if (ha->eft) { + rval = qla2x00_trace_control(ha, TC_ENABLE, + ha->eft_dma, EFT_NUM_BUFFERS); + if (rval) { + qla_printk(KERN_WARNING, ha, + "Unable to reinitialize EFT " + "(%d).\n", rval); + } + } } else { /* failed the ISP abort */ ha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2b60a27eff0b..c5b3c610a32a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, mrk24->nport_handle = cpu_to_le16(loop_id); mrk24->lun[1] = LSB(lun); mrk24->lun[2] = MSB(lun); + host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 795bf15b1b8f..de0613135f70 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); break; + + case MBA_TRACE_NOTIFICATION: + DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", + ha->host_no, mb[1], mb[2])); + break; } } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ec7ebb6037e6..65cbe2f5eea2 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data) next_loopid = 0; list_for_each_entry(fcport, &ha->fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - /* * If the port is not ONLINE then try to login * to it if we haven't run out of retries. diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index d2d683440659..971259032ef7 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.01.05-k3" +#define QLA2XXX_VERSION "8.01.07-k1" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 1 -#define QLA_DRIVER_PATCH_VER 5 +#define QLA_DRIVER_PATCH_VER 7 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 64631bd38952..4776f4e55839 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = { { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_20619 }, +/* TODO: remove all associated board_20771 code, as it completely + * duplicates board_2037x code, unless reason for separation can be + * divined. + */ +#if 0 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_20771 }, +#endif + { } /* terminate list */ }; diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index 2e0f4a4076af..3f368c7d3ef9 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c @@ -1106,7 +1106,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) probe_ent->irq = pdev->irq; probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = port_base; probe_ent->private_data = hpriv; hpriv->host_base = host_base; diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index 03baec2191bf..01d40369a8a5 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c @@ -74,6 +74,7 @@ enum { static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static void vt6420_error_handler(struct ata_port *ap); static const struct pci_device_id svia_pci_tbl[] = { { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, @@ -107,7 +108,38 @@ static struct scsi_host_template svia_sht = { .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations svia_sata_ops = { +static const struct ata_port_operations vt6420_sata_ops = { + .port_disable = ata_port_disable, + + .tf_load = ata_tf_load, + .tf_read = ata_tf_read, + .check_status = ata_check_status, + .exec_command = ata_exec_command, + .dev_select = ata_std_dev_select, + + .bmdma_setup = ata_bmdma_setup, + .bmdma_start = ata_bmdma_start, + .bmdma_stop = ata_bmdma_stop, + .bmdma_status = ata_bmdma_status, + + .qc_prep = ata_qc_prep, + .qc_issue = ata_qc_issue_prot, + .data_xfer = ata_pio_data_xfer, + + .freeze = ata_bmdma_freeze, + .thaw = ata_bmdma_thaw, + .error_handler = vt6420_error_handler, + .post_internal_cmd = ata_bmdma_post_internal_cmd, + + .irq_handler = ata_interrupt, + .irq_clear = ata_bmdma_irq_clear, + + .port_start = ata_port_start, + .port_stop = ata_port_stop, + .host_stop = ata_host_stop, +}; + +static const struct ata_port_operations vt6421_sata_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, @@ -141,13 +173,13 @@ static const struct ata_port_operations svia_sata_ops = { .host_stop = ata_host_stop, }; -static struct ata_port_info svia_port_info = { +static struct ata_port_info vt6420_port_info = { .sht = &svia_sht, .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x7f, - .port_ops = &svia_sata_ops, + .port_ops = &vt6420_sata_ops, }; MODULE_AUTHOR("Jeff Garzik"); @@ -170,6 +202,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); } +/** + * vt6420_prereset - prereset for vt6420 + * @ap: target ATA port + * + * SCR registers on vt6420 are pieces of shit and may hang the + * whole machine completely if accessed with the wrong timing. + * To avoid such catastrophe, vt6420 doesn't provide generic SCR + * access operations, but uses SStatus and SControl only during + * boot probing in controlled way. + * + * As the old (pre EH update) probing code is proven to work, we + * strictly follow the access pattern. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int vt6420_prereset(struct ata_port *ap) +{ + struct ata_eh_context *ehc = &ap->eh_context; + unsigned long timeout = jiffies + (HZ * 5); + u32 sstatus, scontrol; + int online; + + /* don't do any SCR stuff if we're not loading */ + if (!ATA_PFLAG_LOADING) + goto skip_scr; + + /* Resume phy. This is the old resume sequence from + * __sata_phy_reset(). + */ + svia_scr_write(ap, SCR_CONTROL, 0x300); + svia_scr_read(ap, SCR_CONTROL); /* flush */ + + /* wait for phy to become ready, if necessary */ + do { + msleep(200); + if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) + break; + } while (time_before(jiffies, timeout)); + + /* open code sata_print_link_status() */ + sstatus = svia_scr_read(ap, SCR_STATUS); + scontrol = svia_scr_read(ap, SCR_CONTROL); + + online = (sstatus & 0xf) == 0x3; + + ata_port_printk(ap, KERN_INFO, + "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", + online ? "up" : "down", sstatus, scontrol); + + /* SStatus is read one more time */ + svia_scr_read(ap, SCR_STATUS); + + if (!online) { + /* tell EH to bail */ + ehc->i.action &= ~ATA_EH_RESET_MASK; + return 0; + } + + skip_scr: + /* wait for !BSY */ + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + return 0; +} + +static void vt6420_error_handler(struct ata_port *ap) +{ + return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, + NULL, ata_std_postreset); +} + static const unsigned int svia_bar_sizes[] = { 8, 4, 8, 4, 16, 256 }; @@ -210,7 +317,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent, static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) { struct ata_probe_ent *probe_ent; - struct ata_port_info *ppi = &svia_port_info; + struct ata_port_info *ppi = &vt6420_port_info; probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) @@ -239,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev) probe_ent->sht = &svia_sht; probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; - probe_ent->port_ops = &svia_sata_ops; + probe_ent->port_ops = &vt6421_sata_ops; probe_ent->n_ports = N_PORTS; probe_ent->irq = pdev->irq; probe_ent->irq_flags = IRQF_SHARED; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 6a5b731bd5ba..a8ed5a22009d 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) * Return value: * SUCCESS or FAILED or NEEDS_RETRY **/ -static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) +static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, + int cmnd_size, int timeout, int copy_sense) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense old_cmd_len = scmd->cmd_len; old_use_sg = scmd->use_sg; + memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); + memcpy(scmd->cmnd, cmnd, cmnd_size); + if (copy_sense) { int gfp_mask = GFP_ATOMIC; @@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd) static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 252, 0}; - memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); - return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1); + return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1); } /** @@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) int retry_cnt = 1, rtn; retry_tur: - memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); - - - rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0); + rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", __FUNCTION__, scmd, rtn)); @@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) if (scmd->device->allow_restart) { int rtn; - memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); - rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); + rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, + START_UNIT_TIMEOUT, 0); if (rtn == SUCCESS) return 0; } diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a89c4115cfba..32293f451669 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, sshdr.asc, sshdr.ascq); break; case NOT_READY: /* This happens if there is no disc in drive */ - if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { - printk(KERN_INFO "Device not ready. Make sure" - " there is a disc in the drive.\n"); + if (sdev->removable) break; - } case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7b9e8fa1a4e0..2ecd14188574 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -34,6 +34,7 @@ #define ISCSI_SESSION_ATTRS 11 #define ISCSI_CONN_ATTRS 11 #define ISCSI_HOST_ATTRS 0 +#define ISCSI_TRANSPORT_VERSION "1.1-646" struct iscsi_internal { int daemon_pid; @@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone) } static int -iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) +iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp) { unsigned long flags; int rc; skb_get(skb); - rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); + rc = netlink_broadcast(nls, skb, 0, 1, gfp); if (rc < 0) { mempool_free(skb, zone->pool); printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); @@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_broadcast_skb(conn->z_error, skb); + iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC); dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", error); @@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb); + rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session destruction event. Check iscsi daemon\n"); @@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb); + rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session creation event. Check iscsi daemon\n"); @@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void) { int err; + printk(KERN_INFO "Loading iSCSI transport class v%s.", + ISCSI_TRANSPORT_VERSION); + err = class_register(&iscsi_transport_class); if (err) return err; @@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie , " "Alex Aizman "); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); +MODULE_VERSION(ISCSI_TRANSPORT_VERSION); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 65eef33846bb..34f9343ed0af 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -18,8 +18,8 @@ * */ -static int sg_version_num = 30533; /* 2 digits for each component */ -#define SG_VERSION_STR "3.5.33" +static int sg_version_num = 30534; /* 2 digits for each component */ +#define SG_VERSION_STR "3.5.34" /* * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: @@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */ #ifdef CONFIG_SCSI_PROC_FS #include -static char *sg_version_date = "20050908"; +static char *sg_version_date = "20060818"; static int sg_proc_init(void); static void sg_proc_cleanup(void); @@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) len = vma->vm_end - sa; len = (len < sg->length) ? len : sg->length; if (offset < len) { - page = sg->page; + page = virt_to_page(page_address(sg->page) + offset); get_page(page); /* increment page count */ break; } diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8c505076c0eb..739d3ef46a40 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index dc673e1b6fd9..cfe20f730436 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -886,6 +886,15 @@ static int sunsab_console_setup(struct console *con, char *options) unsigned long flags; unsigned int baud, quot; + /* + * The console framework calls us for each and every port + * registered. Defer the console setup until the requested + * port has been properly discovered. A bit of a hack, + * though... + */ + if (up->port.type != PORT_SUNSAB) + return -1; + printk("Console: ttyS%d (SAB82532)\n", (sunsab_reg.minor - 64) + con->index); diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index 47bc3d57e019..d34f336d53d8 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -1146,6 +1146,9 @@ static int __init sunzilog_console_setup(struct console *con, char *options) unsigned long flags; int baud, brg; + if (up->port.type != PORT_SUNZILOG) + return -1; + printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", (sunzilog_reg.minor - 64) + con->index, con->index); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 2ee742d40c43..005043197527 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -24,7 +24,7 @@ config USB_ARCH_HAS_OHCI default y if ARCH_S3C2410 default y if PXA27x default y if ARCH_EP93XX - default y if ARCH_AT91RM9200 + default y if (ARCH_AT91RM9200 || ARCH_AT91SAM9261) # PPC: default y if STB03xxx default y if PPC_MPC52xx diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index f7bdd94b3aa8..218621b9958e 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -517,19 +517,19 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsig static struct usb_device *usbdev_lookup_minor(int minor) { - struct device *device; - struct usb_device *udev = NULL; + struct class_device *class_dev; + struct usb_device *dev = NULL; down(&usb_device_class->sem); - list_for_each_entry(device, &usb_device_class->devices, node) { - if (device->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { - udev = device->platform_data; + list_for_each_entry(class_dev, &usb_device_class->children, node) { + if (class_dev->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { + dev = class_dev->class_data; break; } } up(&usb_device_class->sem); - return udev; + return dev; }; /* @@ -1580,16 +1580,16 @@ static void usbdev_add(struct usb_device *dev) { int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1); - dev->usbfs_dev = device_create(usb_device_class, &dev->dev, - MKDEV(USB_DEVICE_MAJOR, minor), + dev->class_dev = class_device_create(usb_device_class, NULL, + MKDEV(USB_DEVICE_MAJOR, minor), &dev->dev, "usbdev%d.%d", dev->bus->busnum, dev->devnum); - dev->usbfs_dev->platform_data = dev; + dev->class_dev->class_data = dev; } static void usbdev_remove(struct usb_device *dev) { - device_unregister(dev->usbfs_dev); + class_device_unregister(dev->class_dev); } static int usbdev_notify(struct notifier_block *self, unsigned long action, diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index abee0f5b6a66..8de4f8c99d61 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -194,13 +194,14 @@ int usb_register_dev(struct usb_interface *intf, ++temp; else temp = name; - intf->usb_dev = device_create(usb_class->class, &intf->dev, - MKDEV(USB_MAJOR, minor), "%s", temp); - if (IS_ERR(intf->usb_dev)) { + intf->class_dev = class_device_create(usb_class->class, NULL, + MKDEV(USB_MAJOR, minor), + &intf->dev, "%s", temp); + if (IS_ERR(intf->class_dev)) { spin_lock (&minor_lock); usb_minors[intf->minor] = NULL; spin_unlock (&minor_lock); - retval = PTR_ERR(intf->usb_dev); + retval = PTR_ERR(intf->class_dev); } exit: return retval; @@ -241,8 +242,8 @@ void usb_deregister_dev(struct usb_interface *intf, spin_unlock (&minor_lock); snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base); - device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); - intf->usb_dev = NULL; + class_device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); + intf->class_dev = NULL; intf->minor = -1; destroy_usb_class(); } diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 363b2ad74ae6..1a32d96774b4 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -207,7 +207,7 @@ config USB_AT91 config USB_GADGET_DUMMY_HCD boolean "Dummy HCD (DEVELOPMENT)" - depends on USB && EXPERIMENTAL + depends on (USB=y || (USB=m && USB_GADGET=m)) && EXPERIMENTAL select USB_GADGET_DUALSPEED help This host controller driver emulates USB, looping all data transfer diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 1c459ff037ce..cfebca05ead5 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -57,19 +57,23 @@ /* * This controller is simple and PIO-only. It's used in many AT91-series - * ARMv4T controllers, including the at91rm9200 (arm920T, with MMU), - * at91sam9261 (arm926ejs, with MMU), and several no-mmu versions. + * full speed USB controllers, including the at91rm9200 (arm920T, with MMU), + * at91sam926x (arm926ejs, with MMU), and several no-mmu versions. * * This driver expects the board has been wired with two GPIOs suppporting * a VBUS sensing IRQ, and a D+ pullup. (They may be omitted, but the - * testing hasn't covered such cases.) The pullup is most important; it + * testing hasn't covered such cases.) + * + * The pullup is most important (so it's integrated on sam926x parts). It * provides software control over whether the host enumerates the device. + * * The VBUS sensing helps during enumeration, and allows both USB clocks * (and the transceiver) to stay gated off until they're necessary, saving - * power. During USB suspend, the 48 MHz clock is gated off. + * power. During USB suspend, the 48 MHz clock is gated off in hardware; + * it may also be gated off by software during some Linux sleep states. */ -#define DRIVER_VERSION "8 March 2005" +#define DRIVER_VERSION "3 May 2006" static const char driver_name [] = "at91_udc"; static const char ep0name[] = "ep0"; @@ -316,9 +320,15 @@ static void done(struct at91_ep *ep, struct at91_request *req, int status) * * There are also state bits like FORCESTALL, EPEDS, DIR, and EPTYPE * that shouldn't normally be changed. + * + * NOTE at91sam9260 docs mention synch between UDPCK and MCK clock domains, + * implying a need to wait for one write to complete (test relevant bits) + * before starting the next write. This shouldn't be an issue given how + * infrequently we write, except maybe for write-then-read idioms. */ #define SET_FX (AT91_UDP_TXPKTRDY) -#define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP) +#define CLR_FX (RX_DATA_READY | AT91_UDP_RXSETUP \ + | AT91_UDP_STALLSENT | AT91_UDP_TXCOMP) /* pull OUT packet data from the endpoint's fifo */ static int read_fifo (struct at91_ep *ep, struct at91_request *req) @@ -472,7 +482,8 @@ static void nuke(struct at91_ep *ep, int status) /*-------------------------------------------------------------------------*/ -static int at91_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) +static int at91_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) { struct at91_ep *ep = container_of(_ep, struct at91_ep, ep); struct at91_udc *dev = ep->udc; @@ -582,11 +593,12 @@ static int at91_ep_disable (struct usb_ep * _ep) * interesting for request or buffer allocation. */ -static struct usb_request *at91_ep_alloc_request (struct usb_ep *_ep, unsigned int gfp_flags) +static struct usb_request * +at91_ep_alloc_request(struct usb_ep *_ep, unsigned int gfp_flags) { struct at91_request *req; - req = kcalloc(1, sizeof (struct at91_request), SLAB_KERNEL); + req = kcalloc(1, sizeof (struct at91_request), gfp_flags); if (!req) return NULL; @@ -862,6 +874,7 @@ static void stop_activity(struct at91_udc *udc) if (udc->gadget.speed == USB_SPEED_UNKNOWN) driver = NULL; udc->gadget.speed = USB_SPEED_UNKNOWN; + udc->suspended = 0; for (i = 0; i < NUM_ENDPOINTS; i++) { struct at91_ep *ep = &udc->ep[i]; @@ -889,8 +902,8 @@ static void clk_off(struct at91_udc *udc) return; udc->clocked = 0; udc->gadget.speed = USB_SPEED_UNKNOWN; - clk_disable(udc->iclk); clk_disable(udc->fclk); + clk_disable(udc->iclk); } /* @@ -911,9 +924,6 @@ static void pullup(struct at91_udc *udc, int is_on) at91_udp_write(AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); at91_set_gpio_value(udc->board.pullup_pin, 0); clk_off(udc); - - // REVISIT: with transceiver disabled, will D- float - // so that a host would falsely detect a device? } } @@ -1290,7 +1300,8 @@ static void handle_ep0(struct at91_udc *udc) if (udc->wait_for_addr_ack) { u32 tmp; - at91_udp_write(AT91_UDP_FADDR, AT91_UDP_FEN | udc->addr); + at91_udp_write(AT91_UDP_FADDR, + AT91_UDP_FEN | udc->addr); tmp = at91_udp_read(AT91_UDP_GLB_STAT); tmp &= ~AT91_UDP_FADDEN; if (udc->addr) @@ -1361,9 +1372,10 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc, struct pt_regs *r) u32 rescans = 5; while (rescans--) { - u32 status = at91_udp_read(AT91_UDP_ISR); + u32 status; - status &= at91_udp_read(AT91_UDP_IMR); + status = at91_udp_read(AT91_UDP_ISR) + & at91_udp_read(AT91_UDP_IMR); if (!status) break; @@ -1379,18 +1391,17 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc, struct pt_regs *r) stop_activity(udc); /* enable ep0 */ - at91_udp_write(AT91_UDP_CSR(0), AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL); + at91_udp_write(AT91_UDP_CSR(0), + AT91_UDP_EPEDS | AT91_UDP_EPTYPE_CTRL); udc->gadget.speed = USB_SPEED_FULL; udc->suspended = 0; at91_udp_write(AT91_UDP_IER, AT91_UDP_EP(0)); /* * NOTE: this driver keeps clocks off unless the - * USB host is present. That saves power, and also - * eliminates IRQs (reset, resume, suspend) that can - * otherwise flood from the controller. If your - * board doesn't support VBUS detection, suspend and - * resume irq logic may need more attention... + * USB host is present. That saves power, but for + * boards that don't support VBUS detection, both + * clocks need to be active most of the time. */ /* host initiated suspend (3+ms bus idle) */ @@ -1452,13 +1463,19 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc, struct pt_regs *r) /*-------------------------------------------------------------------------*/ +static void nop_release(struct device *dev) +{ + /* nothing to free */ +} + static struct at91_udc controller = { .gadget = { - .ops = &at91_udc_ops, - .ep0 = &controller.ep[0].ep, - .name = driver_name, - .dev = { - .bus_id = "gadget" + .ops = &at91_udc_ops, + .ep0 = &controller.ep[0].ep, + .name = driver_name, + .dev = { + .bus_id = "gadget", + .release = nop_release, } }, .ep[0] = { @@ -1468,7 +1485,8 @@ static struct at91_udc controller = { }, .udc = &controller, .maxpacket = 8, - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(0)), + .creg = (void __iomem *)(AT91_VA_BASE_UDP + + AT91_UDP_CSR(0)), .int_mask = 1 << 0, }, .ep[1] = { @@ -1479,7 +1497,8 @@ static struct at91_udc controller = { .udc = &controller, .is_pingpong = 1, .maxpacket = 64, - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(1)), + .creg = (void __iomem *)(AT91_VA_BASE_UDP + + AT91_UDP_CSR(1)), .int_mask = 1 << 1, }, .ep[2] = { @@ -1490,7 +1509,8 @@ static struct at91_udc controller = { .udc = &controller, .is_pingpong = 1, .maxpacket = 64, - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(2)), + .creg = (void __iomem *)(AT91_VA_BASE_UDP + + AT91_UDP_CSR(2)), .int_mask = 1 << 2, }, .ep[3] = { @@ -1501,7 +1521,8 @@ static struct at91_udc controller = { }, .udc = &controller, .maxpacket = 8, - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(3)), + .creg = (void __iomem *)(AT91_VA_BASE_UDP + + AT91_UDP_CSR(3)), .int_mask = 1 << 3, }, .ep[4] = { @@ -1512,7 +1533,8 @@ static struct at91_udc controller = { .udc = &controller, .is_pingpong = 1, .maxpacket = 256, - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(4)), + .creg = (void __iomem *)(AT91_VA_BASE_UDP + + AT91_UDP_CSR(4)), .int_mask = 1 << 4, }, .ep[5] = { @@ -1523,10 +1545,11 @@ static struct at91_udc controller = { .udc = &controller, .is_pingpong = 1, .maxpacket = 256, - .creg = (void __iomem *)(AT91_VA_BASE_UDP + AT91_UDP_CSR(5)), + .creg = (void __iomem *)(AT91_VA_BASE_UDP + + AT91_UDP_CSR(5)), .int_mask = 1 << 5, }, - /* ep6 and ep7 are also reserved */ + /* ep6 and ep7 are also reserved (custom silicon might use them) */ }; static irqreturn_t at91_vbus_irq(int irq, void *_udc, struct pt_regs *r) @@ -1593,6 +1616,7 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver) local_irq_disable(); udc->enabled = 0; + at91_udp_write(AT91_UDP_IDR, ~0); pullup(udc, 0); local_irq_enable(); @@ -1624,6 +1648,16 @@ static int __devinit at91udc_probe(struct platform_device *pdev) return -ENODEV; } + if (pdev->num_resources != 2) { + DBG("invalid num_resources"); + return -ENODEV; + } + if ((pdev->resource[0].flags != IORESOURCE_MEM) + || (pdev->resource[1].flags != IORESOURCE_IRQ)) { + DBG("invalid resource type"); + return -ENODEV; + } + if (!request_mem_region(AT91_BASE_UDP, SZ_16K, driver_name)) { DBG("someone's using UDC memory\n"); return -EBUSY; @@ -1649,19 +1683,26 @@ static int __devinit at91udc_probe(struct platform_device *pdev) if (retval < 0) goto fail0; - /* disable everything until there's a gadget driver and vbus */ - pullup(udc, 0); + /* don't do anything until we have both gadget driver and VBUS */ + clk_enable(udc->iclk); + at91_udp_write(AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); + at91_udp_write(AT91_UDP_IDR, 0xffffffff); + clk_disable(udc->iclk); /* request UDC and maybe VBUS irqs */ - if (request_irq(AT91_ID_UDP, at91_udc_irq, IRQF_DISABLED, driver_name, udc)) { - DBG("request irq %d failed\n", AT91_ID_UDP); + udc->udp_irq = platform_get_irq(pdev, 0); + if (request_irq(udc->udp_irq, at91_udc_irq, + IRQF_DISABLED, driver_name, udc)) { + DBG("request irq %d failed\n", udc->udp_irq); retval = -EBUSY; goto fail1; } if (udc->board.vbus_pin > 0) { - if (request_irq(udc->board.vbus_pin, at91_vbus_irq, IRQF_DISABLED, driver_name, udc)) { - DBG("request vbus irq %d failed\n", udc->board.vbus_pin); - free_irq(AT91_ID_UDP, udc); + if (request_irq(udc->board.vbus_pin, at91_vbus_irq, + IRQF_DISABLED, driver_name, udc)) { + DBG("request vbus irq %d failed\n", + udc->board.vbus_pin); + free_irq(udc->udp_irq, udc); retval = -EBUSY; goto fail1; } @@ -1670,6 +1711,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev) udc->vbus = 1; } dev_set_drvdata(dev, udc); + device_init_wakeup(dev, 1); create_debug_file(udc); INFO("%s version %s\n", driver_name, DRIVER_VERSION); @@ -1678,14 +1720,14 @@ static int __devinit at91udc_probe(struct platform_device *pdev) fail1: device_unregister(&udc->gadget.dev); fail0: - release_mem_region(AT91_VA_BASE_UDP, SZ_16K); + release_mem_region(AT91_BASE_UDP, SZ_16K); DBG("%s probe failed, %d\n", driver_name, retval); return retval; } -static int __devexit at91udc_remove(struct platform_device *dev) +static int __devexit at91udc_remove(struct platform_device *pdev) { - struct at91_udc *udc = platform_get_drvdata(dev); + struct at91_udc *udc = platform_get_drvdata(pdev); DBG("remove\n"); @@ -1694,10 +1736,11 @@ static int __devexit at91udc_remove(struct platform_device *dev) if (udc->driver != 0) usb_gadget_unregister_driver(udc->driver); + device_init_wakeup(&pdev->dev, 0); remove_debug_file(udc); if (udc->board.vbus_pin > 0) free_irq(udc->board.vbus_pin, udc); - free_irq(AT91_ID_UDP, udc); + free_irq(udc->udp_irq, udc); device_unregister(&udc->gadget.dev); release_mem_region(AT91_BASE_UDP, SZ_16K); @@ -1708,31 +1751,36 @@ static int __devexit at91udc_remove(struct platform_device *dev) } #ifdef CONFIG_PM -static int at91udc_suspend(struct platform_device *dev, pm_message_t mesg) +static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg) { - struct at91_udc *udc = platform_get_drvdata(dev); + struct at91_udc *udc = platform_get_drvdata(pdev); + int wake = udc->driver && device_may_wakeup(&pdev->dev); - /* - * The "safe" suspend transitions are opportunistic ... e.g. when - * the USB link is suspended (48MHz clock autogated off), or when - * it's disconnected (programmatically gated off, elsewhere). - * Then we can suspend, and the chip can enter slow clock mode. - * - * The problem case is some component (user mode?) suspending this - * device while it's active, with the 48 MHz clock in use. There - * are two basic approaches: (a) veto suspend levels involving slow - * clock mode, (b) disconnect, so 48 MHz will no longer be in use - * and we can enter slow clock mode. This uses (b) for now, since - * it's simplest until AT91 PM exists and supports the other option. + /* Unless we can act normally to the host (letting it wake us up + * whenever it has work for us) force disconnect. Wakeup requires + * PLLB for USB events (signaling for reset, wakeup, or incoming + * tokens) and VBUS irqs (on systems which support them). */ - if (udc->vbus && !udc->suspended) + if ((!udc->suspended && udc->addr) + || !wake + || at91_suspend_entering_slow_clock()) { pullup(udc, 0); + disable_irq_wake(udc->udp_irq); + } else + enable_irq_wake(udc->udp_irq); + + if (udc->board.vbus_pin > 0) { + if (wake) + enable_irq_wake(udc->board.vbus_pin); + else + disable_irq_wake(udc->board.vbus_pin); + } return 0; } -static int at91udc_resume(struct platform_device *dev) +static int at91udc_resume(struct platform_device *pdev) { - struct at91_udc *udc = platform_get_drvdata(dev); + struct at91_udc *udc = platform_get_drvdata(pdev); /* maybe reconnect to host; if so, clocks on */ pullup(udc, 1); @@ -1748,7 +1796,7 @@ static struct platform_driver at91_udc = { .remove = __devexit_p(at91udc_remove), .shutdown = at91udc_shutdown, .suspend = at91udc_suspend, - .resume = at91udc_resume, + .resume = at91udc_resume, .driver = { .name = (char *) driver_name, .owner = THIS_MODULE, @@ -1767,6 +1815,6 @@ static void __devexit udc_exit_module(void) } module_exit(udc_exit_module); -MODULE_DESCRIPTION("AT91RM9200 udc driver"); +MODULE_DESCRIPTION("AT91 udc driver"); MODULE_AUTHOR("Thomas Rathbone, David Brownell"); MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/at91_udc.h b/drivers/usb/gadget/at91_udc.h index 5a4799cedd19..882af42e86cc 100644 --- a/drivers/usb/gadget/at91_udc.h +++ b/drivers/usb/gadget/at91_udc.h @@ -141,6 +141,7 @@ struct at91_udc { struct clk *iclk, *fclk; struct platform_device *pdev; struct proc_dir_entry *pde; + int udp_irq; }; static inline struct at91_udc *to_udc(struct usb_gadget *g) diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index 4be47195bd38..7d1c22c34957 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -609,7 +609,8 @@ static int dummy_dequeue (struct usb_ep *_ep, struct usb_request *_req) if (!dum->driver) return -ESHUTDOWN; - spin_lock_irqsave (&dum->lock, flags); + local_irq_save (flags); + spin_lock (&dum->lock); list_for_each_entry (req, &ep->queue, queue) { if (&req->req == _req) { list_del_init (&req->queue); @@ -618,7 +619,7 @@ static int dummy_dequeue (struct usb_ep *_ep, struct usb_request *_req) break; } } - spin_unlock_irqrestore (&dum->lock, flags); + spin_unlock (&dum->lock); if (retval == 0) { dev_dbg (udc_dev(dum), @@ -626,6 +627,7 @@ static int dummy_dequeue (struct usb_ep *_ep, struct usb_request *_req) req, _ep->name, _req->length, _req->buf); _req->complete (_ep, _req); } + local_irq_restore (flags); return retval; } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 85b0b4ad4c16..d63177a8eaea 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -892,7 +892,7 @@ MODULE_LICENSE ("GPL"); #define PCI_DRIVER ehci_pci_driver #endif -#ifdef CONFIG_PPC_83xx +#ifdef CONFIG_MPC834x #include "ehci-fsl.c" #define PLATFORM_DRIVER ehci_fsl_driver #endif diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index cdbafb710000..85cc059705a6 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 SAN People (Pty) Ltd. * Copyright (C) 2005 Thibaut VARENE * - * AT91RM9200 Bus Glue + * AT91 Bus Glue * * Based on fragments of 2.4 driver by Rick Bronson. * Based on ohci-omap.c @@ -19,12 +19,13 @@ #include #include -#ifndef CONFIG_ARCH_AT91RM9200 -#error "CONFIG_ARCH_AT91RM9200 must be defined." +#ifndef CONFIG_ARCH_AT91 +#error "CONFIG_ARCH_AT91 must be defined." #endif /* interface and function clocks */ static struct clk *iclk, *fclk; +static int clocked; extern int usb_disabled(void); @@ -35,13 +36,14 @@ static void at91_start_hc(struct platform_device *pdev) struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_regs __iomem *regs = hcd->regs; - dev_dbg(&pdev->dev, "starting AT91RM9200 OHCI USB Controller\n"); + dev_dbg(&pdev->dev, "start\n"); /* * Start the USB clocks. */ clk_enable(iclk); clk_enable(fclk); + clocked = 1; /* * The USB host controller must remain in reset. @@ -54,7 +56,7 @@ static void at91_stop_hc(struct platform_device *pdev) struct usb_hcd *hcd = platform_get_drvdata(pdev); struct ohci_regs __iomem *regs = hcd->regs; - dev_dbg(&pdev->dev, "stopping AT91RM9200 OHCI USB Controller\n"); + dev_dbg(&pdev->dev, "stop\n"); /* * Put the USB host controller into reset. @@ -66,6 +68,7 @@ static void at91_stop_hc(struct platform_device *pdev) */ clk_disable(fclk); clk_disable(iclk); + clocked = 0; } @@ -78,14 +81,15 @@ static int usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); /** - * usb_hcd_at91_probe - initialize AT91RM9200-based HCDs + * usb_hcd_at91_probe - initialize AT91-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ -int usb_hcd_at91_probe (const struct hc_driver *driver, struct platform_device *pdev) +static int usb_hcd_at91_probe(const struct hc_driver *driver, + struct platform_device *pdev) { int retval; struct usb_hcd *hcd = NULL; @@ -95,12 +99,13 @@ int usb_hcd_at91_probe (const struct hc_driver *driver, struct platform_device * return -ENODEV; } - if ((pdev->resource[0].flags != IORESOURCE_MEM) || (pdev->resource[1].flags != IORESOURCE_IRQ)) { + if ((pdev->resource[0].flags != IORESOURCE_MEM) + || (pdev->resource[1].flags != IORESOURCE_IRQ)) { pr_debug("hcd probe: invalid resource type\n"); return -ENODEV; } - hcd = usb_create_hcd(driver, &pdev->dev, "at91rm9200"); + hcd = usb_create_hcd(driver, &pdev->dev, "at91"); if (!hcd) return -ENOMEM; hcd->rsrc_start = pdev->resource[0].start; @@ -149,21 +154,23 @@ int usb_hcd_at91_probe (const struct hc_driver *driver, struct platform_device * /* may be called with controller, bus, and devices active */ /** - * usb_hcd_at91_remove - shutdown processing for AT91RM9200-based HCDs + * usb_hcd_at91_remove - shutdown processing for AT91-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_at91_probe(), first invoking * the HCD's stop() method. It is always called from a thread - * context, normally "rmmod", "apmd", or something similar. + * context, "rmmod" or something similar. * */ -static int usb_hcd_at91_remove (struct usb_hcd *hcd, struct platform_device *pdev) +static int usb_hcd_at91_remove(struct usb_hcd *hcd, + struct platform_device *pdev) { usb_remove_hcd(hcd); at91_stop_hc(pdev); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + disable_irq_wake(hcd->irq); clk_put(fclk); clk_put(iclk); @@ -178,19 +185,21 @@ static int usb_hcd_at91_remove (struct usb_hcd *hcd, struct platform_device *pde static int __devinit ohci_at91_start (struct usb_hcd *hcd) { -// struct at91_ohci_data *board = hcd->self.controller->platform_data; + struct at91_usbh_data *board = hcd->self.controller->platform_data; struct ohci_hcd *ohci = hcd_to_ohci (hcd); + struct usb_device *root = hcd->self.root_hub; int ret; if ((ret = ohci_init(ohci)) < 0) return ret; + root->maxchild = board->ports; + if ((ret = ohci_run(ohci)) < 0) { err("can't start %s", hcd->self.bus_name); ohci_stop(hcd); return ret; } -// hcd->self.root_hub->maxchild = board->ports; return 0; } @@ -198,7 +207,7 @@ ohci_at91_start (struct usb_hcd *hcd) static const struct hc_driver ohci_at91_hc_driver = { .description = hcd_name, - .product_desc = "AT91RM9200 OHCI", + .product_desc = "AT91 OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), /* @@ -240,33 +249,54 @@ static const struct hc_driver ohci_at91_hc_driver = { /*-------------------------------------------------------------------------*/ -static int ohci_hcd_at91_drv_probe(struct platform_device *dev) +static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) { - return usb_hcd_at91_probe(&ohci_at91_hc_driver, dev); + device_init_wakeup(&pdev->dev, 1); + return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); } -static int ohci_hcd_at91_drv_remove(struct platform_device *dev) +static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) { - return usb_hcd_at91_remove(platform_get_drvdata(dev), dev); + device_init_wakeup(&pdev->dev, 0); + return usb_hcd_at91_remove(platform_get_drvdata(pdev), pdev); } #ifdef CONFIG_PM -/* REVISIT suspend/resume look "too" simple here */ - static int -ohci_hcd_at91_drv_suspend(struct platform_device *dev, pm_message_t mesg) +ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg) { - clk_disable(fclk); - clk_disable(iclk); + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(hcd->irq); + else + disable_irq_wake(hcd->irq); + + /* + * The integrated transceivers seem unable to notice disconnect, + * reconnect, or wakeup without the 48 MHz clock active. so for + * correctness, always discard connection state (using reset). + * + * REVISIT: some boards will be able to turn VBUS off... + */ + if (at91_suspend_entering_slow_clock()) { + ohci_usb_reset (ohci); + clk_disable(fclk); + clk_disable(iclk); + clocked = 0; + } return 0; } -static int ohci_hcd_at91_drv_resume(struct platform_device *dev) +static int ohci_hcd_at91_drv_resume(struct platform_device *pdev) { - clk_enable(iclk); - clk_enable(fclk); + if (!clocked) { + clk_enable(iclk); + clk_enable(fclk); + } return 0; } @@ -275,7 +305,7 @@ static int ohci_hcd_at91_drv_resume(struct platform_device *dev) #define ohci_hcd_at91_drv_resume NULL #endif -MODULE_ALIAS("at91rm9200-ohci"); +MODULE_ALIAS("at91_ohci"); static struct platform_driver ohci_hcd_at91_driver = { .probe = ohci_hcd_at91_drv_probe, @@ -283,7 +313,7 @@ static struct platform_driver ohci_hcd_at91_driver = { .suspend = ohci_hcd_at91_drv_suspend, .resume = ohci_hcd_at91_drv_resume, .driver = { - .name = "at91rm9200-ohci", + .name = "at91_ohci", .owner = THIS_MODULE, }, }; diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index 822914e2f43b..f7a975d5db09 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -110,7 +110,6 @@ static void au1xxx_start_ohc(struct platform_device *dev) printk(KERN_DEBUG __FILE__ ": Clock to USB host has been enabled \n"); -#endif } static void au1xxx_stop_ohc(struct platform_device *dev) diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index afef5ac35b4a..94d8cf4b36c1 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -913,7 +913,7 @@ MODULE_LICENSE ("GPL"); #include "ohci-ppc-soc.c" #endif -#ifdef CONFIG_ARCH_AT91RM9200 +#if defined(CONFIG_ARCH_AT91RM9200) || defined(CONFIG_ARCH_AT91SAM9261) #include "ohci-at91.c" #endif @@ -927,6 +927,7 @@ MODULE_LICENSE ("GPL"); || defined (CONFIG_SOC_AU1X00) \ || defined (CONFIG_USB_OHCI_HCD_PPC_SOC) \ || defined (CONFIG_ARCH_AT91RM9200) \ + || defined (CONFIG_ARCH_AT91SAM9261) \ ) #error "missing bus glue for ohci-hcd" #endif diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index c9d72ac0a1d7..66c3f61bc9d1 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -943,7 +943,9 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) /* We received a short packet */ if (urb->transfer_flags & URB_SHORT_NOT_OK) ret = -EREMOTEIO; - else if (ctrlstat & TD_CTRL_SPD) + + /* Fixup needed only if this isn't the URB's last TD */ + else if (&td->list != urbp->td_list.prev) ret = 1; } diff --git a/drivers/usb/input/appletouch.c b/drivers/usb/input/appletouch.c index 9e3f13903371..044faa07e297 100644 --- a/drivers/usb/input/appletouch.c +++ b/drivers/usb/input/appletouch.c @@ -597,9 +597,9 @@ static void atp_disconnect(struct usb_interface *iface) if (dev) { usb_kill_urb(dev->urb); input_unregister_device(dev->input); - usb_free_urb(dev->urb); usb_buffer_free(dev->udev, dev->datalen, dev->data, dev->urb->transfer_dma); + usb_free_urb(dev->urb); kfree(dev); } printk(KERN_INFO "input: appletouch disconnected\n"); diff --git a/drivers/usb/input/ati_remote.c b/drivers/usb/input/ati_remote.c index 05d2d6012eb2..3719fcb04b8f 100644 --- a/drivers/usb/input/ati_remote.c +++ b/drivers/usb/input/ati_remote.c @@ -111,14 +111,28 @@ #define NAME_BUFSIZE 80 /* size of product name, path buffers */ #define DATA_BUFSIZE 63 /* size of URB data buffers */ +/* + * Duplicate event filtering time. + * Sequential, identical KIND_FILTERED inputs with less than + * FILTER_TIME milliseconds between them are considered as repeat + * events. The hardware generates 5 events for the first keypress + * and we have to take this into account for an accurate repeat + * behaviour. + */ +#define FILTER_TIME 60 /* msec */ + static unsigned long channel_mask; -module_param(channel_mask, ulong, 0444); +module_param(channel_mask, ulong, 0644); MODULE_PARM_DESC(channel_mask, "Bitmask of remote control channels to ignore"); static int debug; -module_param(debug, int, 0444); +module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); +static int repeat_filter = FILTER_TIME; +module_param(repeat_filter, int, 0644); +MODULE_PARM_DESC(repeat_filter, "Repeat filter time, default = 60 msec"); + #define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0) #undef err #define err(format, arg...) printk(KERN_ERR format , ## arg) @@ -143,19 +157,6 @@ MODULE_DEVICE_TABLE(usb, ati_remote_table); static char init1[] = { 0x01, 0x00, 0x20, 0x14 }; static char init2[] = { 0x01, 0x00, 0x20, 0x14, 0x20, 0x20, 0x20 }; -/* Acceleration curve for directional control pad */ -static const char accel[] = { 1, 2, 4, 6, 9, 13, 20 }; - -/* Duplicate event filtering time. - * Sequential, identical KIND_FILTERED inputs with less than - * FILTER_TIME jiffies between them are considered as repeat - * events. The hardware generates 5 events for the first keypress - * and we have to take this into account for an accurate repeat - * behaviour. - * (HZ / 20) == 50 ms and works well for me. - */ -#define FILTER_TIME (HZ / 20) - struct ati_remote { struct input_dev *idev; struct usb_device *udev; @@ -412,6 +413,43 @@ static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2) return -1; } +/* + * ati_remote_compute_accel + * + * Implements acceleration curve for directional control pad + * If elapsed time since last event is > 1/4 second, user "stopped", + * so reset acceleration. Otherwise, user is probably holding the control + * pad down, so we increase acceleration, ramping up over two seconds to + * a maximum speed. + */ +static int ati_remote_compute_accel(struct ati_remote *ati_remote) +{ + static const char accel[] = { 1, 2, 4, 6, 9, 13, 20 }; + unsigned long now = jiffies; + int acc; + + if (time_after(now, ati_remote->old_jiffies + msecs_to_jiffies(250))) { + acc = 1; + ati_remote->acc_jiffies = now; + } + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(125))) + acc = accel[0]; + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(250))) + acc = accel[1]; + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(500))) + acc = accel[2]; + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1000))) + acc = accel[3]; + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1500))) + acc = accel[4]; + else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(2000))) + acc = accel[5]; + else + acc = accel[6]; + + return acc; +} + /* * ati_remote_report_input */ @@ -465,9 +503,9 @@ static void ati_remote_input_report(struct urb *urb, struct pt_regs *regs) if (ati_remote_tbl[index].kind == KIND_FILTERED) { /* Filter duplicate events which happen "too close" together. */ - if ((ati_remote->old_data[0] == data[1]) && - (ati_remote->old_data[1] == data[2]) && - time_before(jiffies, ati_remote->old_jiffies + FILTER_TIME)) { + if (ati_remote->old_data[0] == data[1] && + ati_remote->old_data[1] == data[2] && + time_before(jiffies, ati_remote->old_jiffies + msecs_to_jiffies(repeat_filter))) { ati_remote->repeat_count++; } else { ati_remote->repeat_count = 0; @@ -477,75 +515,61 @@ static void ati_remote_input_report(struct urb *urb, struct pt_regs *regs) ati_remote->old_data[1] = data[2]; ati_remote->old_jiffies = jiffies; - if ((ati_remote->repeat_count > 0) - && (ati_remote->repeat_count < 5)) + if (ati_remote->repeat_count > 0 && + ati_remote->repeat_count < 5) return; input_regs(dev, regs); input_event(dev, ati_remote_tbl[index].type, ati_remote_tbl[index].code, 1); + input_sync(dev); input_event(dev, ati_remote_tbl[index].type, ati_remote_tbl[index].code, 0); input_sync(dev); - return; - } + } else { - /* - * Other event kinds are from the directional control pad, and have an - * acceleration factor applied to them. Without this acceleration, the - * control pad is mostly unusable. - * - * If elapsed time since last event is > 1/4 second, user "stopped", - * so reset acceleration. Otherwise, user is probably holding the control - * pad down, so we increase acceleration, ramping up over two seconds to - * a maximum speed. The acceleration curve is #defined above. - */ - if (time_after(jiffies, ati_remote->old_jiffies + (HZ >> 2))) { - acc = 1; - ati_remote->acc_jiffies = jiffies; - } - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ >> 3))) acc = accel[0]; - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ >> 2))) acc = accel[1]; - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ >> 1))) acc = accel[2]; - else if (time_before(jiffies, ati_remote->acc_jiffies + HZ)) acc = accel[3]; - else if (time_before(jiffies, ati_remote->acc_jiffies + HZ+(HZ>>1))) acc = accel[4]; - else if (time_before(jiffies, ati_remote->acc_jiffies + (HZ << 1))) acc = accel[5]; - else acc = accel[6]; + /* + * Other event kinds are from the directional control pad, and have an + * acceleration factor applied to them. Without this acceleration, the + * control pad is mostly unusable. + */ + acc = ati_remote_compute_accel(ati_remote); - input_regs(dev, regs); - switch (ati_remote_tbl[index].kind) { - case KIND_ACCEL: - input_event(dev, ati_remote_tbl[index].type, - ati_remote_tbl[index].code, - ati_remote_tbl[index].value * acc); - break; - case KIND_LU: - input_report_rel(dev, REL_X, -acc); - input_report_rel(dev, REL_Y, -acc); - break; - case KIND_RU: - input_report_rel(dev, REL_X, acc); - input_report_rel(dev, REL_Y, -acc); - break; - case KIND_LD: - input_report_rel(dev, REL_X, -acc); - input_report_rel(dev, REL_Y, acc); - break; - case KIND_RD: - input_report_rel(dev, REL_X, acc); - input_report_rel(dev, REL_Y, acc); - break; - default: - dev_dbg(&ati_remote->interface->dev, "ati_remote kind=%d\n", - ati_remote_tbl[index].kind); - } - input_sync(dev); + input_regs(dev, regs); + switch (ati_remote_tbl[index].kind) { + case KIND_ACCEL: + input_event(dev, ati_remote_tbl[index].type, + ati_remote_tbl[index].code, + ati_remote_tbl[index].value * acc); + break; + case KIND_LU: + input_report_rel(dev, REL_X, -acc); + input_report_rel(dev, REL_Y, -acc); + break; + case KIND_RU: + input_report_rel(dev, REL_X, acc); + input_report_rel(dev, REL_Y, -acc); + break; + case KIND_LD: + input_report_rel(dev, REL_X, -acc); + input_report_rel(dev, REL_Y, acc); + break; + case KIND_RD: + input_report_rel(dev, REL_X, acc); + input_report_rel(dev, REL_Y, acc); + break; + default: + dev_dbg(&ati_remote->interface->dev, "ati_remote kind=%d\n", + ati_remote_tbl[index].kind); + } + input_sync(dev); - ati_remote->old_jiffies = jiffies; - ati_remote->old_data[0] = data[1]; - ati_remote->old_data[1] = data[2]; + ati_remote->old_jiffies = jiffies; + ati_remote->old_data[0] = data[1]; + ati_remote->old_data[1] = data[2]; + } } /* diff --git a/drivers/usb/input/hid-input.c b/drivers/usb/input/hid-input.c index 028e1ad89f5d..7208839f2dbf 100644 --- a/drivers/usb/input/hid-input.c +++ b/drivers/usb/input/hid-input.c @@ -607,7 +607,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } - if (usage->hat_min < usage->hat_max || usage->hat_dir) { + if (usage->type == EV_ABS && + (usage->hat_min < usage->hat_max || usage->hat_dir)) { int i; for (i = usage->code; i < usage->code + 2 && i <= max; i++) { input_set_abs_params(input, i, -1, 1, 0, 0); diff --git a/drivers/usb/input/hiddev.c b/drivers/usb/input/hiddev.c index 70477f02cc29..f6b839c257a7 100644 --- a/drivers/usb/input/hiddev.c +++ b/drivers/usb/input/hiddev.c @@ -49,7 +49,7 @@ struct hiddev { int open; wait_queue_head_t wait; struct hid_device *hid; - struct hiddev_list *list; + struct list_head list; }; struct hiddev_list { @@ -59,7 +59,7 @@ struct hiddev_list { unsigned flags; struct fasync_struct *fasync; struct hiddev *hiddev; - struct hiddev_list *next; + struct list_head node; }; static struct hiddev *hiddev_table[HIDDEV_MINORS]; @@ -73,12 +73,15 @@ static struct hiddev *hiddev_table[HIDDEV_MINORS]; static struct hid_report * hiddev_lookup_report(struct hid_device *hid, struct hiddev_report_info *rinfo) { - unsigned flags = rinfo->report_id & ~HID_REPORT_ID_MASK; + unsigned int flags = rinfo->report_id & ~HID_REPORT_ID_MASK; + unsigned int rid = rinfo->report_id & HID_REPORT_ID_MASK; struct hid_report_enum *report_enum; + struct hid_report *report; struct list_head *list; if (rinfo->report_type < HID_REPORT_TYPE_MIN || - rinfo->report_type > HID_REPORT_TYPE_MAX) return NULL; + rinfo->report_type > HID_REPORT_TYPE_MAX) + return NULL; report_enum = hid->report_enum + (rinfo->report_type - HID_REPORT_TYPE_MIN); @@ -88,21 +91,25 @@ hiddev_lookup_report(struct hid_device *hid, struct hiddev_report_info *rinfo) break; case HID_REPORT_ID_FIRST: - list = report_enum->report_list.next; - if (list == &report_enum->report_list) + if (list_empty(&report_enum->report_list)) return NULL; - rinfo->report_id = ((struct hid_report *) list)->id; + + list = report_enum->report_list.next; + report = list_entry(list, struct hid_report, list); + rinfo->report_id = report->id; break; case HID_REPORT_ID_NEXT: - list = (struct list_head *) - report_enum->report_id_hash[rinfo->report_id & HID_REPORT_ID_MASK]; - if (list == NULL) + report = report_enum->report_id_hash[rid]; + if (!report) return NULL; - list = list->next; + + list = report->list.next; if (list == &report_enum->report_list) return NULL; - rinfo->report_id = ((struct hid_report *) list)->id; + + report = list_entry(list, struct hid_report, list); + rinfo->report_id = report->id; break; default: @@ -125,12 +132,13 @@ hiddev_lookup_usage(struct hid_device *hid, struct hiddev_usage_ref *uref) struct hid_field *field; if (uref->report_type < HID_REPORT_TYPE_MIN || - uref->report_type > HID_REPORT_TYPE_MAX) return NULL; + uref->report_type > HID_REPORT_TYPE_MAX) + return NULL; report_enum = hid->report_enum + (uref->report_type - HID_REPORT_TYPE_MIN); - list_for_each_entry(report, &report_enum->report_list, list) + list_for_each_entry(report, &report_enum->report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) { @@ -142,6 +150,7 @@ hiddev_lookup_usage(struct hid_device *hid, struct hiddev_usage_ref *uref) } } } + } return NULL; } @@ -150,9 +159,9 @@ static void hiddev_send_event(struct hid_device *hid, struct hiddev_usage_ref *uref) { struct hiddev *hiddev = hid->hiddev; - struct hiddev_list *list = hiddev->list; + struct hiddev_list *list; - while (list) { + list_for_each_entry(list, &hiddev->list, node) { if (uref->field_index != HID_FIELD_INDEX_NONE || (list->flags & HIDDEV_FLAG_REPORT) != 0) { list->buffer[list->head] = *uref; @@ -160,8 +169,6 @@ static void hiddev_send_event(struct hid_device *hid, (HIDDEV_BUFFER_SIZE - 1); kill_fasync(&list->fasync, SIGIO, POLL_IN); } - - list = list->next; } wake_up_interruptible(&hiddev->wait); @@ -180,7 +187,7 @@ void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, uref.report_type = (type == HID_INPUT_REPORT) ? HID_REPORT_TYPE_INPUT : ((type == HID_OUTPUT_REPORT) ? HID_REPORT_TYPE_OUTPUT : - ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE:0)); + ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE : 0)); uref.report_id = field->report->id; uref.field_index = field->index; uref.usage_index = (usage - field->usage); @@ -200,7 +207,7 @@ void hiddev_report_event(struct hid_device *hid, struct hid_report *report) uref.report_type = (type == HID_INPUT_REPORT) ? HID_REPORT_TYPE_INPUT : ((type == HID_OUTPUT_REPORT) ? HID_REPORT_TYPE_OUTPUT : - ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE:0)); + ((type == HID_FEATURE_REPORT) ? HID_REPORT_TYPE_FEATURE : 0)); uref.report_id = report->id; uref.field_index = HID_FIELD_INDEX_NONE; @@ -213,7 +220,9 @@ static int hiddev_fasync(int fd, struct file *file, int on) { int retval; struct hiddev_list *list = file->private_data; + retval = fasync_helper(fd, file, on, &list->fasync); + return retval < 0 ? retval : 0; } @@ -224,14 +233,9 @@ static int hiddev_fasync(int fd, struct file *file, int on) static int hiddev_release(struct inode * inode, struct file * file) { struct hiddev_list *list = file->private_data; - struct hiddev_list **listptr; - listptr = &list->hiddev->list; hiddev_fasync(-1, file, 0); - - while (*listptr && (*listptr != list)) - listptr = &((*listptr)->next); - *listptr = (*listptr)->next; + list_del(&list->node); if (!--list->hiddev->open) { if (list->hiddev->exist) @@ -248,7 +252,8 @@ static int hiddev_release(struct inode * inode, struct file * file) /* * open file op */ -static int hiddev_open(struct inode * inode, struct file * file) { +static int hiddev_open(struct inode *inode, struct file *file) +{ struct hiddev_list *list; int i = iminor(inode) - HIDDEV_MINOR_BASE; @@ -260,9 +265,7 @@ static int hiddev_open(struct inode * inode, struct file * file) { return -ENOMEM; list->hiddev = hiddev_table[i]; - list->next = hiddev_table[i]->list; - hiddev_table[i]->list = list; - + list_add_tail(&list->node, &hiddev_table[i]->list); file->private_data = list; if (!list->hiddev->open++) @@ -362,6 +365,7 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun static unsigned int hiddev_poll(struct file *file, poll_table *wait) { struct hiddev_list *list = file->private_data; + poll_wait(file, &list->hiddev->wait, wait); if (list->head != list->tail) return POLLIN | POLLRDNORM; @@ -382,7 +386,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd struct hiddev_collection_info cinfo; struct hiddev_report_info rinfo; struct hiddev_field_info finfo; - struct hiddev_usage_ref_multi *uref_multi=NULL; + struct hiddev_usage_ref_multi *uref_multi = NULL; struct hiddev_usage_ref *uref; struct hiddev_devinfo dinfo; struct hid_report *report; @@ -764,15 +768,15 @@ int hiddev_connect(struct hid_device *hid) } init_waitqueue_head(&hiddev->wait); - - hiddev_table[hid->intf->minor - HIDDEV_MINOR_BASE] = hiddev; - + INIT_LIST_HEAD(&hiddev->list); hiddev->hid = hid; hiddev->exist = 1; hid->minor = hid->intf->minor; hid->hiddev = hiddev; + hiddev_table[hid->intf->minor - HIDDEV_MINOR_BASE] = hiddev; + return 0; } diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index e091d327bd9e..9c46746d5d00 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -12,8 +12,13 @@ * the single I/O ports of the device. * * Supported vendors: AK Modul-Bus Computer GmbH -* Supported devices: CY7C63001A-PC (to be continued...) -* Supported functions: Read/Write Ports (to be continued...) +* (Firmware "Port-Chip") +* +* Supported devices: CY7C63001A-PC +* CY7C63001C-PXC +* CY7C63001C-SXC +* +* Supported functions: Read/Write Ports * * * This program is free software; you can redistribute it and/or @@ -203,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface, /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { - dev_err(&dev->udev->dev, "Out of memory!\n"); + dev_err(&interface->dev, "Out of memory!\n"); goto error; } diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 786e1dbe88ec..983e104dd452 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1242,11 +1242,12 @@ done: static int ctrl_out (struct usbtest_dev *dev, unsigned count, unsigned length, unsigned vary) { - unsigned i, j, len, retval; + unsigned i, j, len; + int retval; u8 *buf; char *what = "?"; struct usb_device *udev; - + if (length < 1 || length > 0xffff || vary >= length) return -EINVAL; diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c index e5e6e4f3ef87..bd09232ce13c 100644 --- a/drivers/usb/net/rtl8150.c +++ b/drivers/usb/net/rtl8150.c @@ -175,6 +175,8 @@ static inline struct sk_buff *pull_skb(rtl8150_t *); static void rtl8150_disconnect(struct usb_interface *intf); static int rtl8150_probe(struct usb_interface *intf, const struct usb_device_id *id); +static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message); +static int rtl8150_resume(struct usb_interface *intf); static const char driver_name [] = "rtl8150"; @@ -183,6 +185,8 @@ static struct usb_driver rtl8150_driver = { .probe = rtl8150_probe, .disconnect = rtl8150_disconnect, .id_table = rtl8150_table, + .suspend = rtl8150_suspend, + .resume = rtl8150_resume }; /* @@ -238,9 +242,11 @@ static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) usb_fill_control_urb(dev->ctrl_urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, &dev->rx_creg, size, ctrl_callback, dev); - if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) + if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { + if (ret == -ENODEV) + netif_device_detach(dev->netdev); err("control request submission failed: %d", ret); - else + } else set_bit(RX_REG_SET, &dev->flags); return ret; @@ -416,6 +422,7 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs) struct sk_buff *skb; struct net_device *netdev; u16 rx_stat; + int status; dev = urb->context; if (!dev) @@ -465,7 +472,10 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs) goon: usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); - if (usb_submit_urb(dev->rx_urb, GFP_ATOMIC)) { + status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); + if (status == -ENODEV) + netif_device_detach(dev->netdev); + else if (status) { set_bit(RX_URB_FAIL, &dev->flags); goto resched; } else { @@ -481,6 +491,7 @@ static void rx_fixup(unsigned long data) { rtl8150_t *dev; struct sk_buff *skb; + int status; dev = (rtl8150_t *)data; @@ -499,10 +510,13 @@ static void rx_fixup(unsigned long data) usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); try_again: - if (usb_submit_urb(dev->rx_urb, GFP_ATOMIC)) { + status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); + if (status == -ENODEV) { + netif_device_detach(dev->netdev); + } else if (status) { set_bit(RX_URB_FAIL, &dev->flags); goto tlsched; - } else { + } else { clear_bit(RX_URB_FAIL, &dev->flags); } @@ -574,12 +588,43 @@ static void intr_callback(struct urb *urb, struct pt_regs *regs) resubmit: status = usb_submit_urb (urb, SLAB_ATOMIC); - if (status) + if (status == -ENODEV) + netif_device_detach(dev->netdev); + else if (status) err ("can't resubmit intr, %s-%s/input0, status %d", dev->udev->bus->bus_name, dev->udev->devpath, status); } +static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message) +{ + rtl8150_t *dev = usb_get_intfdata(intf); + + netif_device_detach(dev->netdev); + + if (netif_running(dev->netdev)) { + usb_kill_urb(dev->rx_urb); + usb_kill_urb(dev->intr_urb); + } + return 0; +} + +static int rtl8150_resume(struct usb_interface *intf) +{ + rtl8150_t *dev = usb_get_intfdata(intf); + + netif_device_attach(dev->netdev); + if (netif_running(dev->netdev)) { + dev->rx_urb->status = 0; + dev->rx_urb->actual_length = 0; + read_bulk_callback(dev->rx_urb, NULL); + + dev->intr_urb->status = 0; + dev->intr_urb->actual_length = 0; + intr_callback(dev->intr_urb, NULL); + } + return 0; +} /* ** @@ -690,9 +735,14 @@ static int rtl8150_start_xmit(struct sk_buff *skb, struct net_device *netdev) usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), skb->data, count, write_bulk_callback, dev); if ((res = usb_submit_urb(dev->tx_urb, GFP_ATOMIC))) { - warn("failed tx_urb %d\n", res); - dev->stats.tx_errors++; - netif_start_queue(netdev); + /* Can we get/handle EPIPE here? */ + if (res == -ENODEV) + netif_device_detach(dev->netdev); + else { + warn("failed tx_urb %d\n", res); + dev->stats.tx_errors++; + netif_start_queue(netdev); + } } else { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; @@ -729,16 +779,25 @@ static int rtl8150_open(struct net_device *netdev) usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); - if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) + if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) { + if (res == -ENODEV) + netif_device_detach(dev->netdev); warn("%s: rx_urb submit failed: %d", __FUNCTION__, res); + return res; + } usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3), dev->intr_buff, INTBUFSIZE, intr_callback, dev, dev->intr_interval); - if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) + if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) { + if (res == -ENODEV) + netif_device_detach(dev->netdev); warn("%s: intr_urb submit failed: %d", __FUNCTION__, res); - netif_start_queue(netdev); + usb_kill_urb(dev->rx_urb); + return res; + } enable_net_traffic(dev); set_carrier(netdev); + netif_start_queue(netdev); return res; } diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index ac33bd47cfce..f5b9438c94f0 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -62,15 +62,6 @@ config USB_SERIAL_AIRPRIME To compile this driver as a module, choose M here: the module will be called airprime. -config USB_SERIAL_ANYDATA - tristate "USB AnyData CDMA Wireless Driver" - depends on USB_SERIAL - help - Say Y here if you want to use a AnyData CDMA device. - - To compile this driver as a module, choose M here: the - module will be called anydata. - config USB_SERIAL_ARK3116 tristate "USB ARK Micro 3116 USB Serial Driver (EXPERIMENTAL)" depends on USB_SERIAL && EXPERIMENTAL @@ -502,15 +493,18 @@ config USB_SERIAL_XIRCOM module will be called keyspan_pda. config USB_SERIAL_OPTION - tristate "USB driver for GSM modems" + tristate "USB driver for GSM and CDMA modems" depends on USB_SERIAL help - Say Y here if you have an "Option" GSM PCMCIA card - (or an OEM version: branded Huawei, Audiovox, or Novatel). + Say Y here if you have a GSM or CDMA modem that's connected to USB. - These cards feature a built-in OHCI-USB adapter and an - internally-connected GSM modem. The USB bus is not - accessible externally. + This driver also supports several PCMCIA cards which have a + built-in OHCI-USB adapter and an internally-connected GSM modem. + The USB bus on these cards is not accessible externally. + + Supported devices include (some of?) those made by: + Option, Huawei, Audiovox, Sierra Wireless, Novatel Wireless, or + Anydata. To compile this driver as a module, choose M here: the module will be called option. diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index 35d4acc7f1d3..8efed2ce1ba3 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile @@ -12,7 +12,6 @@ usbserial-obj-$(CONFIG_USB_EZUSB) += ezusb.o usbserial-objs := usb-serial.o generic.o bus.o $(usbserial-obj-y) obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o -obj-$(CONFIG_USB_SERIAL_ANYDATA) += anydata.o obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o diff --git a/drivers/usb/serial/anydata.c b/drivers/usb/serial/anydata.c deleted file mode 100644 index 01843ef8c11e..000000000000 --- a/drivers/usb/serial/anydata.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * AnyData CDMA Serial USB driver - * - * Copyright (C) 2005 Greg Kroah-Hartman - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -static struct usb_device_id id_table [] = { - { USB_DEVICE(0x16d5, 0x6501) }, /* AirData CDMA device */ - { }, -}; -MODULE_DEVICE_TABLE(usb, id_table); - -/* if overridden by the user, then use their value for the size of the - * read and write urbs */ -static int buffer_size; -static int debug; - -static struct usb_driver anydata_driver = { - .name = "anydata", - .probe = usb_serial_probe, - .disconnect = usb_serial_disconnect, - .id_table = id_table, - .no_dynamic_id = 1, -}; - -static int anydata_open(struct usb_serial_port *port, struct file *filp) -{ - char *buffer; - int result = 0; - - dbg("%s - port %d", __FUNCTION__, port->number); - - if (buffer_size) { - /* override the default buffer sizes */ - buffer = kmalloc(buffer_size, GFP_KERNEL); - if (!buffer) { - dev_err(&port->dev, "%s - out of memory.\n", - __FUNCTION__); - return -ENOMEM; - } - kfree (port->read_urb->transfer_buffer); - port->read_urb->transfer_buffer = buffer; - port->read_urb->transfer_buffer_length = buffer_size; - - buffer = kmalloc(buffer_size, GFP_KERNEL); - if (!buffer) { - dev_err(&port->dev, "%s - out of memory.\n", - __FUNCTION__); - return -ENOMEM; - } - kfree (port->write_urb->transfer_buffer); - port->write_urb->transfer_buffer = buffer; - port->write_urb->transfer_buffer_length = buffer_size; - port->bulk_out_size = buffer_size; - } - - /* Start reading from the device */ - usb_fill_bulk_urb(port->read_urb, port->serial->dev, - usb_rcvbulkpipe(port->serial->dev, - port->bulk_in_endpointAddress), - port->read_urb->transfer_buffer, - port->read_urb->transfer_buffer_length, - usb_serial_generic_read_bulk_callback, port); - result = usb_submit_urb(port->read_urb, GFP_KERNEL); - if (result) - dev_err(&port->dev, - "%s - failed submitting read urb, error %d\n", - __FUNCTION__, result); - - return result; -} - -static struct usb_serial_driver anydata_device = { - .driver = { - .owner = THIS_MODULE, - .name = "anydata", - }, - .id_table = id_table, - .num_interrupt_in = NUM_DONT_CARE, - .num_bulk_in = NUM_DONT_CARE, - .num_bulk_out = NUM_DONT_CARE, - .num_ports = 1, - .open = anydata_open, -}; - -static int __init anydata_init(void) -{ - int retval; - - retval = usb_serial_register(&anydata_device); - if (retval) - return retval; - retval = usb_register(&anydata_driver); - if (retval) - usb_serial_deregister(&anydata_device); - return retval; -} - -static void __exit anydata_exit(void) -{ - usb_deregister(&anydata_driver); - usb_serial_deregister(&anydata_device); -} - -module_init(anydata_init); -module_exit(anydata_exit); -MODULE_LICENSE("GPL"); - -module_param(debug, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "Debug enabled or not"); -module_param(buffer_size, int, 0); -MODULE_PARM_DESC(buffer_size, "Size of the transfer buffers"); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b458aedc5fb6..15945e806f03 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -306,6 +306,8 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, @@ -337,6 +339,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) }, diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 04ef90fcb876..8888cd80a491 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -32,6 +32,12 @@ #define FTDI_NF_RIC_PID 0x0001 /* Product Id */ +/* www.canusb.com Lawicel CANUSB device */ +#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */ + +/* AlphaMicro Components AMC-232USB01 device */ +#define FTDI_AMC232_PID 0xFF00 /* Product Id */ + /* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */ #define FTDI_ACTZWAVE_PID 0xF2D0 @@ -182,6 +188,10 @@ /* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */ #define FTDI_USB_UIRT_PID 0xF850 /* Product Id */ +/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */ + +#define FTDI_TNC_X_PID 0xEBE0 + /* * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). * All of these devices use FTDI's vendor ID (0x0403). diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index 59c5d999009a..9840bade79f9 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c @@ -250,6 +250,9 @@ static struct usb_device_id ipaq_id_table [] = { { USB_DEVICE(0x04C5, 0x1058) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */ + { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ + { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */ + { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */ { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f0530c1d7b7a..c856e6f40e22 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -9,40 +9,14 @@ Portions copied from the Keyspan driver by Hugh Blemings - History: - - 2005-05-19 v0.1 Initial version, based on incomplete docs - and analysis of misbehavior with the standard driver - 2005-05-20 v0.2 Extended the input buffer to avoid losing - random 64-byte chunks of data - 2005-05-21 v0.3 implemented chars_in_buffer() - turned on low_latency - simplified the code somewhat - 2005-05-24 v0.4 option_write() sometimes deadlocked under heavy load - removed some dead code - added sponsor notice - coding style clean-up - 2005-06-20 v0.4.1 add missing braces :-/ - killed end-of-line whitespace - 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2 - 2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard - 2005-09-20 v0.4.4 increased recv buffer size: the card sometimes - wants to send >2000 bytes. - 2006-04-10 v0.5 fixed two array overrun errors :-/ - 2006-04-21 v0.5.1 added support for Sierra Wireless MC8755 - 2006-05-15 v0.6 re-enable multi-port support - 2006-06-01 v0.6.1 add COBRA - 2006-06-01 v0.6.2 add backwards-compatibility stuff - 2006-06-01 v0.6.3 add Novatel Wireless - 2006-06-01 v0.7 Option => GSM - 2006-06-01 v0.7.1 add COBRA2 + History: see the git log. Work sponsored by: Sigos GmbH, Germany This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - - nonstandard flow (Option devices) and multiplex (Sierra) control + - nonstandard flow (Option devices) control - controlling the baud rate doesn't make sense This driver is named "option" because the most common device it's @@ -96,8 +70,8 @@ static int option_send_setup(struct usb_serial_port *port); #define OPTION_VENDOR_ID 0x0AF0 #define HUAWEI_VENDOR_ID 0x12D1 #define AUDIOVOX_VENDOR_ID 0x0F3D -#define SIERRAWIRELESS_VENDOR_ID 0x1199 #define NOVATELWIRELESS_VENDOR_ID 0x1410 +#define ANYDATA_VENDOR_ID 0x16d5 #define OPTION_PRODUCT_OLD 0x5000 #define OPTION_PRODUCT_FUSION 0x6000 @@ -106,8 +80,8 @@ static int option_send_setup(struct usb_serial_port *port); #define OPTION_PRODUCT_COBRA2 0x6600 #define HUAWEI_PRODUCT_E600 0x1001 #define AUDIOVOX_PRODUCT_AIRCARD 0x0112 -#define SIERRAWIRELESS_PRODUCT_MC8755 0x6802 #define NOVATELWIRELESS_PRODUCT_U740 0x1400 +#define ANYDATA_PRODUCT_ID 0x6501 static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) }, @@ -117,8 +91,8 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA2) }, { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) }, - { USB_DEVICE(SIERRAWIRELESS_VENDOR_ID, SIERRAWIRELESS_PRODUCT_MC8755) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID,NOVATELWIRELESS_PRODUCT_U740) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -131,10 +105,7 @@ static struct usb_device_id option_ids1[] = { { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID,NOVATELWIRELESS_PRODUCT_U740) }, - { } /* Terminating entry */ -}; -static struct usb_device_id option_ids3[] = { - { USB_DEVICE(SIERRAWIRELESS_VENDOR_ID, SIERRAWIRELESS_PRODUCT_MC8755) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -151,37 +122,11 @@ static struct usb_driver option_driver = { /* The card has three separate interfaces, which the serial driver * recognizes separately, thus num_port=1. */ -static struct usb_serial_driver option_3port_device = { - .driver = { - .owner = THIS_MODULE, - .name = "option", - }, - .description = "GSM modem (3-port)", - .id_table = option_ids3, - .num_interrupt_in = NUM_DONT_CARE, - .num_bulk_in = NUM_DONT_CARE, - .num_bulk_out = NUM_DONT_CARE, - .num_ports = 3, - .open = option_open, - .close = option_close, - .write = option_write, - .write_room = option_write_room, - .chars_in_buffer = option_chars_in_buffer, - .throttle = option_rx_throttle, - .unthrottle = option_rx_unthrottle, - .set_termios = option_set_termios, - .break_ctl = option_break_ctl, - .tiocmget = option_tiocmget, - .tiocmset = option_tiocmset, - .attach = option_startup, - .shutdown = option_shutdown, - .read_int_callback = option_instat_callback, -}; static struct usb_serial_driver option_1port_device = { .driver = { .owner = THIS_MODULE, - .name = "option", + .name = "option1", }, .description = "GSM modem (1-port)", .id_table = option_ids1, @@ -245,9 +190,6 @@ static int __init option_init(void) retval = usb_serial_register(&option_1port_device); if (retval) goto failed_1port_device_register; - retval = usb_serial_register(&option_3port_device); - if (retval) - goto failed_3port_device_register; retval = usb_register(&option_driver); if (retval) goto failed_driver_register; @@ -257,8 +199,6 @@ static int __init option_init(void) return 0; failed_driver_register: - usb_serial_deregister (&option_3port_device); -failed_3port_device_register: usb_serial_deregister (&option_1port_device); failed_1port_device_register: return retval; @@ -267,7 +207,6 @@ failed_1port_device_register: static void __exit option_exit(void) { usb_deregister (&option_driver); - usb_serial_deregister (&option_3port_device); usb_serial_deregister (&option_1port_device); } @@ -656,7 +595,6 @@ static void option_setup_urbs(struct usb_serial *serial) dbg("%s", __FUNCTION__); - for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 259db31b65c1..65e4d046951a 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -79,8 +79,8 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, - { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) }, { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, + { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index d9c1e6e0b4b3..55195e76eb6f 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -82,10 +82,10 @@ #define SPEEDDRAGON_VENDOR_ID 0x0e55 #define SPEEDDRAGON_PRODUCT_ID 0x110b -/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */ -#define OTI_VENDOR_ID 0x0ea0 -#define OTI_PRODUCT_ID 0x6858 - /* DATAPILOT Universal-2 Phone Cable */ #define DATAPILOT_U2_VENDOR_ID 0x0731 #define DATAPILOT_U2_PRODUCT_ID 0x2003 + +/* Belkin "F5U257" Serial Adapter */ +#define BELKIN_VENDOR_ID 0x050d +#define BELKIN_PRODUCT_ID 0x0257 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index a5ca449f6e64..4a803d69fa36 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -145,6 +145,13 @@ UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Mario Rettig */ +UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, + "Nokia", + "Nokia 3250", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ), + /* Reported by Sumedha Swamy and * Einar Th. Einarsson */ UNUSUAL_DEV( 0x0421, 0x0444, 0x0100, 0x0100, @@ -627,18 +634,6 @@ UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210, "Digital Camera EX-20 DSC", US_SC_8070, US_PR_DEVICE, NULL, 0 ), -/* The entry was here before I took over, and had US_SC_RBC. It turns - * out that isn't needed. Additionally, Torsten Eriksson - * is able to use his device fine - * without this entry at all - but I don't suspect that will be true - * for all users (the protocol is likely needed), so is staying at - * this time. - Phil Dibowitz - */ -UNUSUAL_DEV( 0x059f, 0xa601, 0x0200, 0x0200, - "LaCie", - "USB Hard Disk", - US_SC_DEVICE, US_PR_CB, NULL, 0 ), - /* Submitted by Joel Bourquard * Some versions of this device need the SubClass and Protocol overrides * while others don't. @@ -1106,7 +1101,15 @@ UNUSUAL_DEV( 0x0a17, 0x006, 0x0000, 0xffff, "Optio S/S4", US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY ), - + +/* This is a virtual windows driver CD, which the zd1211rw driver automatically + * converts into a WLAN device. */ +UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101, + "ZyXEL", + "G-220F USB-WLAN Install", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + #ifdef CONFIG_USB_STORAGE_ISD200 UNUSUAL_DEV( 0x0bf6, 0xa001, 0x0100, 0x0110, "ATI", @@ -1237,6 +1240,16 @@ UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64), +/* David Kuehling : + * for MP3-Player AVOX WSX-300ER (bought in Japan). Reports lots of SCSI + * errors when trying to write. + */ +UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, + "C-MEX", + "A-VOX", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE ), + /* Reported by Michael Stattmann */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", @@ -1248,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, * Tested on hardware version 1.10. * Entry is needed only for the initializer function override. */ -UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, +UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110, "Desknote", "UCR-61S2B", US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 5ee19be52f65..8d7bdcb5924d 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -483,7 +483,7 @@ static struct us_unusual_dev *find_unusual(const struct usb_device_id *id) } /* Get the unusual_devs entries and the string descriptors */ -static void get_device_info(struct us_data *us, const struct usb_device_id *id) +static int get_device_info(struct us_data *us, const struct usb_device_id *id) { struct usb_device *dev = us->pusb_dev; struct usb_interface_descriptor *idesc = @@ -500,6 +500,11 @@ static void get_device_info(struct us_data *us, const struct usb_device_id *id) unusual_dev->useTransport; us->flags = USB_US_ORIG_FLAGS(id->driver_info); + if (us->flags & US_FL_IGNORE_DEVICE) { + printk(KERN_INFO USB_STORAGE "device ignored\n"); + return -ENODEV; + } + /* * This flag is only needed when we're in high-speed, so let's * disable it if we're in full-speed @@ -541,6 +546,8 @@ static void get_device_info(struct us_data *us, const struct usb_device_id *id) msgs[msg], UTS_RELEASE); } + + return 0; } /* Get the transport settings */ @@ -969,7 +976,9 @@ static int storage_probe(struct usb_interface *intf, * of the match from the usb_device_id table, so we can find the * corresponding entry in the private table. */ - get_device_info(us, id); + result = get_device_info(us, id); + if (result) + goto BadDevice; /* Get the transport, protocol, and pipe settings */ result = get_transport(us); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 6533b0f39231..702eb933cf88 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -86,9 +86,11 @@ config FB_MACMODES default n config FB_BACKLIGHT - bool - depends on FB - default n + bool + depends on FB + select BACKLIGHT_LCD_SUPPORT + select BACKLIGHT_CLASS_DEVICE + default n config FB_MODE_HELPERS bool "Enable Video Mode Handling Helpers" @@ -420,7 +422,7 @@ config FB_OF config FB_CONTROL bool "Apple \"control\" display support" - depends on (FB = y) && PPC_PMAC + depends on (FB = y) && PPC_PMAC && PPC32 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -431,7 +433,7 @@ config FB_CONTROL config FB_PLATINUM bool "Apple \"platinum\" display support" - depends on (FB = y) && PPC_PMAC + depends on (FB = y) && PPC_PMAC && PPC32 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -442,7 +444,7 @@ config FB_PLATINUM config FB_VALKYRIE bool "Apple \"valkyrie\" display support" - depends on (FB = y) && (MAC || PPC_PMAC) + depends on (FB = y) && (MAC || (PPC_PMAC && PPC32)) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -453,7 +455,7 @@ config FB_VALKYRIE config FB_CT65550 bool "Chips 65550 display support" - depends on (FB = y) && PPC + depends on (FB = y) && PPC32 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -552,7 +554,7 @@ config FB_VESA config FB_IMAC bool "Intel-based Macintosh Framebuffer Support" - depends on (FB = y) && X86 + depends on (FB = y) && X86 && EFI select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -721,10 +723,8 @@ config FB_NVIDIA_I2C config FB_NVIDIA_BACKLIGHT bool "Support for backlight control" - depends on FB_NVIDIA && PPC_PMAC + depends on FB_NVIDIA && PMAC_BACKLIGHT select FB_BACKLIGHT - select BACKLIGHT_LCD_SUPPORT - select BACKLIGHT_CLASS_DEVICE default y help Say Y here if you want to control the backlight of your display. @@ -769,10 +769,8 @@ config FB_RIVA_DEBUG config FB_RIVA_BACKLIGHT bool "Support for backlight control" - depends on FB_RIVA && PPC_PMAC + depends on FB_RIVA && PMAC_BACKLIGHT select FB_BACKLIGHT - select BACKLIGHT_LCD_SUPPORT - select BACKLIGHT_CLASS_DEVICE default y help Say Y here if you want to control the backlight of your display. @@ -1025,10 +1023,8 @@ config FB_RADEON_I2C config FB_RADEON_BACKLIGHT bool "Support for backlight control" - depends on FB_RADEON && PPC_PMAC + depends on FB_RADEON && PMAC_BACKLIGHT select FB_BACKLIGHT - select BACKLIGHT_LCD_SUPPORT - select BACKLIGHT_CLASS_DEVICE default y help Say Y here if you want to control the backlight of your display. @@ -1059,10 +1055,8 @@ config FB_ATY128 config FB_ATY128_BACKLIGHT bool "Support for backlight control" - depends on FB_ATY128 && PPC_PMAC + depends on FB_ATY128 && PMAC_BACKLIGHT select FB_BACKLIGHT - select BACKLIGHT_LCD_SUPPORT - select BACKLIGHT_CLASS_DEVICE default y help Say Y here if you want to control the backlight of your display. @@ -1111,10 +1105,8 @@ config FB_ATY_GX config FB_ATY_BACKLIGHT bool "Support for backlight control" - depends on FB_ATY && PPC_PMAC + depends on FB_ATY && PMAC_BACKLIGHT select FB_BACKLIGHT - select BACKLIGHT_LCD_SUPPORT - select BACKLIGHT_CLASS_DEVICE default y help Say Y here if you want to control the backlight of your display. @@ -1620,7 +1612,7 @@ if FB || SGI_NEWPORT_CONSOLE source "drivers/video/logo/Kconfig" endif -if FB && SYSFS +if SYSFS source "drivers/video/backlight/Kconfig" endif diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 95563c9c6b9c..481c6c9695f8 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -4,6 +4,7 @@ # Each configuration option enables a list of files. +obj-y += fb_notify.o obj-$(CONFIG_FB) += fb.o fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ modedb.o fbcvt.o diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index c64a717e2d4b..3e827e04a2aa 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -455,7 +455,10 @@ static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par); static void wait_for_fifo(u16 entries, struct aty128fb_par *par); static void wait_for_idle(struct aty128fb_par *par); static u32 depth_to_dst(u32 depth); + +#ifdef CONFIG_FB_ATY128_BACKLIGHT static void aty128_bl_set_power(struct fb_info *info, int power); +#endif #define BIOS_IN8(v) (readb(bios + (v))) #define BIOS_IN16(v) (readb(bios + (v)) | \ @@ -1910,9 +1913,6 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i u8 chip_rev; u32 dac; - if (!par->vram_size) /* may have already been probed */ - par->vram_size = aty_ld_le32(CONFIG_MEMSIZE) & 0x03FFFFFF; - /* Get the chip revision */ chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F; @@ -2025,9 +2025,6 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i aty128_init_engine(par); - if (register_framebuffer(info) < 0) - return 0; - par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); par->pdev = pdev; par->asleep = 0; @@ -2037,6 +2034,9 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i aty128_bl_init(par); #endif + if (register_framebuffer(info) < 0) + return 0; + printk(KERN_INFO "fb%d: %s frame buffer device on %s\n", info->node, info->fix.id, video_card); @@ -2086,7 +2086,6 @@ static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_ par = info->par; info->pseudo_palette = par->pseudo_palette; - info->fix = aty128fb_fix; /* Virtualize mmio region */ info->fix.mmio_start = reg_addr; diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 1507d19f481f..053ff63365b7 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -2812,7 +2812,7 @@ static int atyfb_blank(int blank, struct fb_info *info) if (par->lock_blank || par->asleep) return 0; -#ifdef CONFIG_PMAC_BACKLIGHT +#ifdef CONFIG_FB_ATY_BACKLIGHT if (machine_is(powermac) && blank > FB_BLANK_NORMAL) aty_bl_set_power(info, FB_BLANK_POWERDOWN); #elif defined(CONFIG_FB_ATY_GENERIC_LCD) @@ -2844,7 +2844,7 @@ static int atyfb_blank(int blank, struct fb_info *info) } aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); -#ifdef CONFIG_PMAC_BACKLIGHT +#ifdef CONFIG_FB_ATY_BACKLIGHT if (machine_is(powermac) && blank <= FB_BLANK_NORMAL) aty_bl_set_power(info, FB_BLANK_UNBLANK); #elif defined(CONFIG_FB_ATY_GENERIC_LCD) diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 8d85fc58142e..8e3400d5dd21 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -266,6 +266,8 @@ static int force_measure_pll = 0; #ifdef CONFIG_MTRR static int nomtrr = 0; #endif +static int force_sleep; +static int ignore_devlist; /* * prototypes @@ -2327,9 +2329,9 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev, /* -2 is special: means ON on mobility chips and do not * change on others */ - radeonfb_pm_init(rinfo, rinfo->is_mobility ? 1 : -1); + radeonfb_pm_init(rinfo, rinfo->is_mobility ? 1 : -1, ignore_devlist, force_sleep); } else - radeonfb_pm_init(rinfo, default_dynclk); + radeonfb_pm_init(rinfo, default_dynclk, ignore_devlist, force_sleep); pci_set_drvdata(pdev, info); @@ -2477,6 +2479,12 @@ static int __init radeonfb_setup (char *options) force_measure_pll = 1; } else if (!strncmp(this_opt, "ignore_edid", 11)) { ignore_edid = 1; +#if defined(CONFIG_PM) && defined(CONFIG_X86) + } else if (!strncmp(this_opt, "force_sleep", 11)) { + force_sleep = 1; + } else if (!strncmp(this_opt, "ignore_devlist", 14)) { + ignore_devlist = 1; +#endif } else mode_option = this_opt; } @@ -2532,3 +2540,9 @@ module_param(panel_yres, int, 0); MODULE_PARM_DESC(panel_yres, "int: set panel yres"); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Specify resolution as \"x[-][@]\" "); +#if defined(CONFIG_PM) && defined(CONFIG_X86) +module_param(force_sleep, bool, 0); +MODULE_PARM_DESC(force_sleep, "bool: force D2 sleep mode on all hardware"); +module_param(ignore_devlist, bool, 0); +MODULE_PARM_DESC(ignore_devlist, "bool: ignore workarounds for bugs in specific laptops"); +#endif diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index c7091761cef4..f31e606a2ded 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c @@ -27,6 +27,99 @@ #include "ati_ids.h" +static void radeon_reinitialize_M10(struct radeonfb_info *rinfo); + +/* + * Workarounds for bugs in PC laptops: + * - enable D2 sleep in some IBM Thinkpads + * - special case for Samsung P35 + * + * Whitelist by subsystem vendor/device because + * its the subsystem vendor's fault! + */ + +#if defined(CONFIG_PM) && defined(CONFIG_X86) +struct radeon_device_id { + const char *ident; /* (arbitrary) Name */ + const unsigned short subsystem_vendor; /* Subsystem Vendor ID */ + const unsigned short subsystem_device; /* Subsystem Device ID */ + const enum radeon_pm_mode pm_mode_modifier; /* modify pm_mode */ + const reinit_function_ptr new_reinit_func; /* changed reinit_func */ +}; + +#define BUGFIX(model, sv, sd, pm, fn) { \ + .ident = model, \ + .subsystem_vendor = sv, \ + .subsystem_device = sd, \ + .pm_mode_modifier = pm, \ + .new_reinit_func = fn \ +} + +static struct radeon_device_id radeon_workaround_list[] = { + BUGFIX("IBM Thinkpad R32", + PCI_VENDOR_ID_IBM, 0x1905, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad R40", + PCI_VENDOR_ID_IBM, 0x0526, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad R40", + PCI_VENDOR_ID_IBM, 0x0527, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad R50/R51/T40/T41", + PCI_VENDOR_ID_IBM, 0x0531, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad R51/T40/T41/T42", + PCI_VENDOR_ID_IBM, 0x0530, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad T30", + PCI_VENDOR_ID_IBM, 0x0517, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad T40p", + PCI_VENDOR_ID_IBM, 0x054d, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad T42", + PCI_VENDOR_ID_IBM, 0x0550, + radeon_pm_d2, NULL), + BUGFIX("IBM Thinkpad X31/X32", + PCI_VENDOR_ID_IBM, 0x052f, + radeon_pm_d2, NULL), + BUGFIX("Samsung P35", + PCI_VENDOR_ID_SAMSUNG, 0xc00c, + radeon_pm_off, radeon_reinitialize_M10), + { .ident = NULL } +}; + +static int radeon_apply_workarounds(struct radeonfb_info *rinfo) +{ + struct radeon_device_id *id; + + for (id = radeon_workaround_list; id->ident != NULL; id++ ) + if ((id->subsystem_vendor == rinfo->pdev->subsystem_vendor ) && + (id->subsystem_device == rinfo->pdev->subsystem_device )) { + + /* we found a device that requires workaround */ + printk(KERN_DEBUG "radeonfb: %s detected" + ", enabling workaround\n", id->ident); + + rinfo->pm_mode |= id->pm_mode_modifier; + + if (id->new_reinit_func != NULL) + rinfo->reinit_func = id->new_reinit_func; + + return 1; + } + return 0; /* not found */ +} + +#else /* defined(CONFIG_PM) && defined(CONFIG_X86) */ +static inline int radeon_apply_workarounds(struct radeonfb_info *rinfo) +{ + return 0; +} +#endif /* defined(CONFIG_PM) && defined(CONFIG_X86) */ + + + static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo) { u32 tmp; @@ -852,18 +945,26 @@ static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo) /* because both INPLL and OUTPLL take the same lock, that's why. */ tmp = INPLL( pllMCLK_MISC) | MCLK_MISC__EN_MCLK_TRISTATE_IN_SUSPEND; OUTPLL( pllMCLK_MISC, tmp); - - /* AGP PLL control */ - if (rinfo->family <= CHIP_FAMILY_RV280) { - OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | BUS_CNTL1__AGPCLK_VALID); - OUTREG(BUS_CNTL1, - (INREG(BUS_CNTL1) & ~BUS_CNTL1__MOBILE_PLATFORM_SEL_MASK) - | (2<family <= CHIP_FAMILY_RV280) { + OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | BUS_CNTL1__AGPCLK_VALID); + OUTREG(BUS_CNTL1, + (INREG(BUS_CNTL1) & ~BUS_CNTL1__MOBILE_PLATFORM_SEL_MASK) + | (2<pm_reg = pci_find_capability(rinfo->pdev, PCI_CAP_ID_PM); @@ -2729,22 +2830,13 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk) } #if defined(CONFIG_PM) +#if defined(CONFIG_PPC_PMAC) /* Check if we can power manage on suspend/resume. We can do * D2 on M6, M7 and M9, and we can resume from D3 cold a few other * "Mac" cards, but that's all. We need more infos about what the * BIOS does tho. Right now, all this PM stuff is pmac-only for that * reason. --BenH */ - /* Special case for Samsung P35 laptops - */ - if ((rinfo->pdev->vendor == PCI_VENDOR_ID_ATI) && - (rinfo->pdev->device == PCI_CHIP_RV350_NP) && - (rinfo->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) && - (rinfo->pdev->subsystem_device == 0xc00c)) { - rinfo->reinit_func = radeon_reinitialize_M10; - rinfo->pm_mode |= radeon_pm_off; - } -#if defined(CONFIG_PPC_PMAC) if (machine_is(powermac) && rinfo->of_node) { if (rinfo->is_mobility && rinfo->pm_reg && rinfo->family <= CHIP_FAMILY_RV250) @@ -2790,6 +2882,18 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk) } #endif /* defined(CONFIG_PPC_PMAC) */ #endif /* defined(CONFIG_PM) */ + + if (ignore_devlist) + printk(KERN_DEBUG + "radeonfb: skipping test for device workarounds\n"); + else + radeon_apply_workarounds(rinfo); + + if (force_sleep) { + printk(KERN_DEBUG + "radeonfb: forcefully enabling D2 sleep mode\n"); + rinfo->pm_mode |= radeon_pm_d2; + } } void radeonfb_pm_exit(struct radeonfb_info *rinfo) diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h index 38657b2d10eb..d5ff224a6258 100644 --- a/drivers/video/aty/radeonfb.h +++ b/drivers/video/aty/radeonfb.h @@ -273,6 +273,8 @@ enum radeon_pm_mode { radeon_pm_off = 0x00000002, /* Can resume from D3 cold */ }; +typedef void (*reinit_function_ptr)(struct radeonfb_info *rinfo); + struct radeonfb_info { struct fb_info *info; @@ -338,7 +340,7 @@ struct radeonfb_info { int dynclk; int no_schedule; enum radeon_pm_mode pm_mode; - void (*reinit_func)(struct radeonfb_info *rinfo); + reinit_function_ptr reinit_func; /* Lock on register access */ spinlock_t reg_lock; @@ -600,7 +602,7 @@ extern int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn, u8 /* PM Functions */ extern int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t state); extern int radeonfb_pci_resume(struct pci_dev *pdev); -extern void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk); +extern void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep); extern void radeonfb_pm_exit(struct radeonfb_info *rinfo); /* Monitor probe functions */ diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index a92a91fef16f..f25d5d648333 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c @@ -156,7 +156,7 @@ int au1100fb_setmode(struct au1100fb_device *fbdev) info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */ - } + } } else { /* mono */ info->fix.visual = FB_VISUAL_MONO10; @@ -164,20 +164,16 @@ int au1100fb_setmode(struct au1100fb_device *fbdev) } info->screen_size = info->fix.line_length * info->var.yres_virtual; + info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \ + >> LCD_CONTROL_SM_BIT) * 90; /* Determine BPP mode and format */ - fbdev->regs->lcd_control = fbdev->panel->control_base | - ((info->var.rotate/90) << LCD_CONTROL_SM_BIT); - + fbdev->regs->lcd_control = fbdev->panel->control_base; + fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; + fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; + fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; fbdev->regs->lcd_intenable = 0; fbdev->regs->lcd_intstatus = 0; - - fbdev->regs->lcd_horztiming = fbdev->panel->horztiming; - - fbdev->regs->lcd_verttiming = fbdev->panel->verttiming; - - fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base; - fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys); if (panel_is_dual(fbdev->panel)) { @@ -206,6 +202,8 @@ int au1100fb_setmode(struct au1100fb_device *fbdev) /* Resume controller */ fbdev->regs->lcd_control |= LCD_CONTROL_GO; + mdelay(10); + au1100fb_fb_blank(VESA_NO_BLANKING, info); return 0; } diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 022f9d3473f5..02f15297a021 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -10,7 +10,7 @@ menuconfig BACKLIGHT_LCD_SUPPORT config BACKLIGHT_CLASS_DEVICE tristate "Lowlevel Backlight controls" - depends on BACKLIGHT_LCD_SUPPORT && FB + depends on BACKLIGHT_LCD_SUPPORT default m help This framework adds support for low-level control of the LCD @@ -26,7 +26,7 @@ config BACKLIGHT_DEVICE config LCD_CLASS_DEVICE tristate "Lowlevel LCD controls" - depends on BACKLIGHT_LCD_SUPPORT && FB + depends on BACKLIGHT_LCD_SUPPORT default m help This framework adds support for low-level control of LCD. diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index 52ed12b12acc..eb4d03fa5391 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -197,7 +197,7 @@ static int __init mdacon_setup(char *str) __setup("mdacon=", mdacon_setup); #endif -static int __init mda_detect(void) +static int mda_detect(void) { int count=0; u16 *p, p_save; @@ -282,7 +282,7 @@ static int __init mda_detect(void) return 1; } -static void __init mda_initialize(void) +static void mda_initialize(void) { write_mda_b(97, 0x00); /* horizontal total */ write_mda_b(80, 0x01); /* horizontal displayed */ diff --git a/drivers/video/fb_notify.c b/drivers/video/fb_notify.c new file mode 100644 index 000000000000..8c020389e4fa --- /dev/null +++ b/drivers/video/fb_notify.c @@ -0,0 +1,46 @@ +/* + * linux/drivers/video/fb_notify.c + * + * Copyright (C) 2006 Antonino Daplas + * + * 2001 - Documented with DocBook + * - Brad Douglas + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#include +#include + +static BLOCKING_NOTIFIER_HEAD(fb_notifier_list); + +/** + * fb_register_client - register a client notifier + * @nb: notifier block to callback on events + */ +int fb_register_client(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&fb_notifier_list, nb); +} +EXPORT_SYMBOL(fb_register_client); + +/** + * fb_unregister_client - unregister a client notifier + * @nb: notifier block to callback on events + */ +int fb_unregister_client(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&fb_notifier_list, nb); +} +EXPORT_SYMBOL(fb_unregister_client); + +/** + * fb_notifier_call_chain - notify clients of fb_events + * + */ +int fb_notifier_call_chain(unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&fb_notifier_list, val, v); +} +EXPORT_SYMBOL_GPL(fb_notifier_call_chain); diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 4fc9df426c1a..17961e3ecaa0 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -52,7 +52,6 @@ #define FBPIXMAPSIZE (1024 * 8) -static BLOCKING_NOTIFIER_HEAD(fb_notifier_list); struct fb_info *registered_fb[FB_MAX]; int num_registered_fb; @@ -791,8 +790,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) event.info = info; event.data = &mode1; - ret = blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_MODE_DELETE, &event); + ret = fb_notifier_call_chain(FB_EVENT_MODE_DELETE, &event); } if (!ret) @@ -837,8 +835,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) info->flags &= ~FBINFO_MISC_USEREVENT; event.info = info; - blocking_notifier_call_chain(&fb_notifier_list, - evnt, &event); + fb_notifier_call_chain(evnt, &event); } } } @@ -861,8 +858,7 @@ fb_blank(struct fb_info *info, int blank) event.info = info; event.data = ␣ - blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_BLANK, &event); + fb_notifier_call_chain(FB_EVENT_BLANK, &event); } return ret; @@ -933,8 +929,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, con2fb.framebuffer = -1; event.info = info; event.data = &con2fb; - blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_GET_CONSOLE_MAP, &event); + fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event); return copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; case FBIOPUT_CON2FBMAP: @@ -952,9 +947,8 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, return -EINVAL; event.info = info; event.data = &con2fb; - return blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_SET_CONSOLE_MAP, - &event); + return fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, + &event); case FBIOBLANK: acquire_console_sem(); info->flags |= FBINFO_MISC_USEREVENT; @@ -1330,8 +1324,7 @@ register_framebuffer(struct fb_info *fb_info) registered_fb[i] = fb_info; event.info = fb_info; - blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_FB_REGISTERED, &event); + fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); return 0; } @@ -1365,29 +1358,10 @@ unregister_framebuffer(struct fb_info *fb_info) fb_cleanup_class_device(fb_info); class_device_destroy(fb_class, MKDEV(FB_MAJOR, i)); event.info = fb_info; - blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_FB_UNREGISTERED, &event); + fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); return 0; } -/** - * fb_register_client - register a client notifier - * @nb: notifier block to callback on events - */ -int fb_register_client(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&fb_notifier_list, nb); -} - -/** - * fb_unregister_client - unregister a client notifier - * @nb: notifier block to callback on events - */ -int fb_unregister_client(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&fb_notifier_list, nb); -} - /** * fb_set_suspend - low level driver signals suspend * @info: framebuffer affected @@ -1403,13 +1377,11 @@ void fb_set_suspend(struct fb_info *info, int state) event.info = info; if (state) { - blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_SUSPEND, &event); + fb_notifier_call_chain(FB_EVENT_SUSPEND, &event); info->state = FBINFO_STATE_SUSPENDED; } else { info->state = FBINFO_STATE_RUNNING; - blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_RESUME, &event); + fb_notifier_call_chain(FB_EVENT_RESUME, &event); } } @@ -1480,9 +1452,7 @@ int fb_new_modelist(struct fb_info *info) if (!list_empty(&info->modelist)) { event.info = info; - err = blocking_notifier_call_chain(&fb_notifier_list, - FB_EVENT_NEW_MODELIST, - &event); + err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); } return err; @@ -1594,8 +1564,6 @@ EXPORT_SYMBOL(fb_blank); EXPORT_SYMBOL(fb_pan_display); EXPORT_SYMBOL(fb_get_buffer_offset); EXPORT_SYMBOL(fb_set_suspend); -EXPORT_SYMBOL(fb_register_client); -EXPORT_SYMBOL(fb_unregister_client); EXPORT_SYMBOL(fb_get_options); MODULE_LICENSE("GPL"); diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c index ff233b84dec4..18ea4a549105 100644 --- a/drivers/video/imacfb.c +++ b/drivers/video/imacfb.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include @@ -28,7 +30,7 @@ typedef enum _MAC_TYPE { M_I20, M_MINI, M_MACBOOK, - M_NEW + M_UNKNOWN } MAC_TYPE; /* --------------------------------------------------------------------- */ @@ -52,10 +54,36 @@ static struct fb_fix_screeninfo imacfb_fix __initdata = { }; static int inverse; -static int model = M_NEW; +static int model = M_UNKNOWN; static int manual_height; static int manual_width; +static int set_system(struct dmi_system_id *id) +{ + printk(KERN_INFO "imacfb: %s detected - set system to %ld\n", + id->ident, (long)id->driver_data); + + model = (long)id->driver_data; + + return 0; +} + +static struct dmi_system_id __initdata dmi_system_table[] = { + { set_system, "iMac4,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17}, + { set_system, "MacBookPro1,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17}, + { set_system, "MacBook1,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK}, + { set_system, "Macmini1,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"Macmini1,1")}, (void *)M_MINI}, + {}, +}; + #define DEFAULT_FB_MEM 1024*1024*16 /* --------------------------------------------------------------------- */ @@ -149,7 +177,6 @@ static int __init imacfb_probe(struct platform_device *dev) screen_info.lfb_linelength = 1472 * 4; screen_info.lfb_base = 0x80010000; break; - case M_NEW: case M_I20: screen_info.lfb_width = 1680; screen_info.lfb_height = 1050; @@ -207,6 +234,10 @@ static int __init imacfb_probe(struct platform_device *dev) size_remap = size_total; imacfb_fix.smem_len = size_remap; +#ifndef __i386__ + screen_info.imacpm_seg = 0; +#endif + if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) { printk(KERN_WARNING "imacfb: cannot reserve video memory at 0x%lx\n", @@ -324,8 +355,16 @@ static int __init imacfb_init(void) int ret; char *option = NULL; - /* ignore error return of fb_get_options */ - fb_get_options("imacfb", &option); + if (!efi_enabled) + return -ENODEV; + if (!dmi_check_system(dmi_system_table)) + return -ENODEV; + if (model == M_UNKNOWN) + return -ENODEV; + + if (fb_get_options("imacfb", &option)) + return -ENODEV; + imacfb_setup(option); ret = platform_driver_register(&imacfb_driver); diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c index 440272ad10e7..7c76e079ca7d 100644 --- a/drivers/video/matrox/g450_pll.c +++ b/drivers/video/matrox/g450_pll.c @@ -331,7 +331,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll, tmp |= M1064_XPIXCLKCTRL_PLL_UP; } matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); +#ifdef __powerpc__ + /* This is necessary to avoid jitter on PowerPC + * (OpenFirmware) systems, but apparently + * introduces jitter, at least on a x86-64 + * using DVI. + * A simple workaround is disable for non-PPC. + */ matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); +#endif /* __powerpc__ */ matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); matroxfb_DAC_unlock_irqrestore(flags); diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c index 9f2066f0745a..d4f850117874 100644 --- a/drivers/video/nvidia/nvidia.c +++ b/drivers/video/nvidia/nvidia.c @@ -34,10 +34,6 @@ #include "nv_proto.h" #include "nv_dma.h" -#ifndef CONFIG_PCI /* sanity check */ -#error This driver requires PCI support. -#endif - #undef CONFIG_FB_NVIDIA_DEBUG #ifdef CONFIG_FB_NVIDIA_DEBUG #define NVTRACE printk @@ -1303,20 +1299,19 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd, nvidia_save_vga(par, &par->SavedReg); + pci_set_drvdata(pd, info); + nvidia_bl_init(par); if (register_framebuffer(info) < 0) { printk(KERN_ERR PFX "error registering nVidia framebuffer\n"); goto err_out_iounmap_fb; } - pci_set_drvdata(pd, info); printk(KERN_INFO PFX "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n", info->fix.id, par->FbMapSize / (1024 * 1024), info->fix.smem_start); - nvidia_bl_init(par); - NVTRACE_LEAVE(); return 0; diff --git a/drivers/video/offb.c b/drivers/video/offb.c index ce5f3031b99b..0013311e0564 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -62,8 +62,6 @@ struct offb_par default_par; * Interface used by the world */ -int offb_init(void); - static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int offb_blank(int blank, struct fb_info *info); @@ -72,11 +70,6 @@ static int offb_blank(int blank, struct fb_info *info); extern boot_infos_t *boot_infos; #endif -static void offb_init_nodriver(struct device_node *); -static void offb_init_fb(const char *name, const char *full_name, - int width, int height, int depth, int pitch, - unsigned long address, struct device_node *dp); - static struct fb_ops offb_ops = { .owner = THIS_MODULE, .fb_setcolreg = offb_setcolreg, @@ -229,123 +222,17 @@ static int offb_blank(int blank, struct fb_info *info) return 0; } - /* - * Initialisation - */ -int __init offb_init(void) +static void __iomem *offb_map_reg(struct device_node *np, int index, + unsigned long offset, unsigned long size) { - struct device_node *dp = NULL, *boot_disp = NULL; + struct resource r; - if (fb_get_options("offb", NULL)) - return -ENODEV; - - for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { - if (get_property(dp, "linux,opened", NULL) && - get_property(dp, "linux,boot-display", NULL)) { - boot_disp = dp; - offb_init_nodriver(dp); - } - } - for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { - if (get_property(dp, "linux,opened", NULL) && - dp != boot_disp) - offb_init_nodriver(dp); - } - - return 0; -} - - -static void __init offb_init_nodriver(struct device_node *dp) -{ - unsigned int len; - int i, width = 640, height = 480, depth = 8, pitch = 640; - unsigned int flags, rsize, addr_prop = 0; - unsigned long max_size = 0; - u64 rstart, address = OF_BAD_ADDR; - u32 *pp, *addrp, *up; - u64 asize; - - pp = (u32 *)get_property(dp, "linux,bootx-depth", &len); - if (pp == NULL) - pp = (u32 *)get_property(dp, "depth", &len); - if (pp && len == sizeof(u32)) - depth = *pp; - - pp = (u32 *)get_property(dp, "linux,bootx-width", &len); - if (pp == NULL) - pp = (u32 *)get_property(dp, "width", &len); - if (pp && len == sizeof(u32)) - width = *pp; - - pp = (u32 *)get_property(dp, "linux,bootx-height", &len); - if (pp == NULL) - pp = (u32 *)get_property(dp, "height", &len); - if (pp && len == sizeof(u32)) - height = *pp; - - pp = (u32 *)get_property(dp, "linux,bootx-linebytes", &len); - if (pp == NULL) - pp = (u32 *)get_property(dp, "linebytes", &len); - if (pp && len == sizeof(u32)) - pitch = *pp; - else - pitch = width * ((depth + 7) / 8); - - rsize = (unsigned long)pitch * (unsigned long)height; - - /* Ok, now we try to figure out the address of the framebuffer. - * - * Unfortunately, Open Firmware doesn't provide a standard way to do - * so. All we can do is a dodgy heuristic that happens to work in - * practice. On most machines, the "address" property contains what - * we need, though not on Matrox cards found in IBM machines. What I've - * found that appears to give good results is to go through the PCI - * ranges and pick one that is both big enough and if possible encloses - * the "address" property. If none match, we pick the biggest - */ - up = (u32 *)get_property(dp, "linux,bootx-addr", &len); - if (up == NULL) - up = (u32 *)get_property(dp, "address", &len); - if (up && len == sizeof(u32)) - addr_prop = *up; - - for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags)) - != NULL; i++) { - int match_addrp = 0; - - if (!(flags & IORESOURCE_MEM)) - continue; - if (asize < rsize) - continue; - rstart = of_translate_address(dp, addrp); - if (rstart == OF_BAD_ADDR) - continue; - if (addr_prop && (rstart <= addr_prop) && - ((rstart + asize) >= (addr_prop + rsize))) - match_addrp = 1; - if (match_addrp) { - address = addr_prop; - break; - } - if (rsize > max_size) { - max_size = rsize; - address = OF_BAD_ADDR; - } - - if (address == OF_BAD_ADDR) - address = rstart; - } - if (address == OF_BAD_ADDR && addr_prop) - address = (u64)addr_prop; - if (address != OF_BAD_ADDR) { - /* kludge for valkyrie */ - if (strcmp(dp->name, "valkyrie") == 0) - address += 0x1000; - offb_init_fb(dp->name, dp->full_name, width, height, depth, - pitch, address, dp); - } + if (of_address_to_resource(np, index, &r)) + return 0; + if ((r.start + offset + size) > r.end) + return 0; + return ioremap(r.start + offset, size); } static void __init offb_init_fb(const char *name, const char *full_name, @@ -402,45 +289,39 @@ static void __init offb_init_fb(const char *name, const char *full_name, par->cmap_type = cmap_unknown; if (depth == 8) { - /* Palette hacks disabled for now */ -#if 0 if (dp && !strncmp(name, "ATY,Rage128", 11)) { - unsigned long regbase = dp->addrs[2].address; - par->cmap_adr = ioremap(regbase, 0x1FFF); - par->cmap_type = cmap_r128; + par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); + if (par->cmap_adr) + par->cmap_type = cmap_r128; } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) || !strncmp(name, "ATY,RageM3p12A", 14))) { - unsigned long regbase = - dp->parent->addrs[2].address; - par->cmap_adr = ioremap(regbase, 0x1FFF); - par->cmap_type = cmap_M3A; + par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); + if (par->cmap_adr) + par->cmap_type = cmap_M3A; } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { - unsigned long regbase = - dp->parent->addrs[2].address; - par->cmap_adr = ioremap(regbase, 0x1FFF); - par->cmap_type = cmap_M3B; + par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); + if (par->cmap_adr) + par->cmap_type = cmap_M3B; } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { - unsigned long regbase = dp->addrs[1].address; - par->cmap_adr = ioremap(regbase, 0x1FFF); - par->cmap_type = cmap_radeon; + par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); + if (par->cmap_adr) + par->cmap_type = cmap_radeon; } else if (!strncmp(name, "ATY,", 4)) { unsigned long base = address & 0xff000000UL; par->cmap_adr = ioremap(base + 0x7ff000, 0x1000) + 0xcc0; par->cmap_data = par->cmap_adr + 1; par->cmap_type = cmap_m64; - } else if (device_is_compatible(dp, "pci1014,b7")) { - unsigned long regbase = dp->addrs[0].address; - par->cmap_adr = ioremap(regbase + 0x6000, 0x1000); - par->cmap_type = cmap_gxt2000; + } else if (dp && device_is_compatible(dp, "pci1014,b7")) { + par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); + if (par->cmap_adr) + par->cmap_type = cmap_gxt2000; } -#endif - fix->visual = par->cmap_adr ? FB_VISUAL_PSEUDOCOLOR - : FB_VISUAL_STATIC_PSEUDOCOLOR; + fix->visual = (par->cmap_type != cmap_unknown) ? + FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR; } else - fix->visual = /* par->cmap_adr ? FB_VISUAL_DIRECTCOLOR - : */ FB_VISUAL_TRUECOLOR; + fix->visual = FB_VISUAL_TRUECOLOR; var->xoffset = var->yoffset = 0; switch (depth) { @@ -520,5 +401,139 @@ static void __init offb_init_fb(const char *name, const char *full_name, info->node, full_name); } + +static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) +{ + unsigned int len; + int i, width = 640, height = 480, depth = 8, pitch = 640; + unsigned int flags, rsize, addr_prop = 0; + unsigned long max_size = 0; + u64 rstart, address = OF_BAD_ADDR; + u32 *pp, *addrp, *up; + u64 asize; + + pp = (u32 *)get_property(dp, "linux,bootx-depth", &len); + if (pp == NULL) + pp = (u32 *)get_property(dp, "depth", &len); + if (pp && len == sizeof(u32)) + depth = *pp; + + pp = (u32 *)get_property(dp, "linux,bootx-width", &len); + if (pp == NULL) + pp = (u32 *)get_property(dp, "width", &len); + if (pp && len == sizeof(u32)) + width = *pp; + + pp = (u32 *)get_property(dp, "linux,bootx-height", &len); + if (pp == NULL) + pp = (u32 *)get_property(dp, "height", &len); + if (pp && len == sizeof(u32)) + height = *pp; + + pp = (u32 *)get_property(dp, "linux,bootx-linebytes", &len); + if (pp == NULL) + pp = (u32 *)get_property(dp, "linebytes", &len); + if (pp && len == sizeof(u32)) + pitch = *pp; + else + pitch = width * ((depth + 7) / 8); + + rsize = (unsigned long)pitch * (unsigned long)height; + + /* Ok, now we try to figure out the address of the framebuffer. + * + * Unfortunately, Open Firmware doesn't provide a standard way to do + * so. All we can do is a dodgy heuristic that happens to work in + * practice. On most machines, the "address" property contains what + * we need, though not on Matrox cards found in IBM machines. What I've + * found that appears to give good results is to go through the PCI + * ranges and pick one that is both big enough and if possible encloses + * the "address" property. If none match, we pick the biggest + */ + up = (u32 *)get_property(dp, "linux,bootx-addr", &len); + if (up == NULL) + up = (u32 *)get_property(dp, "address", &len); + if (up && len == sizeof(u32)) + addr_prop = *up; + + /* Hack for when BootX is passing us */ + if (no_real_node) + goto skip_addr; + + for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags)) + != NULL; i++) { + int match_addrp = 0; + + if (!(flags & IORESOURCE_MEM)) + continue; + if (asize < rsize) + continue; + rstart = of_translate_address(dp, addrp); + if (rstart == OF_BAD_ADDR) + continue; + if (addr_prop && (rstart <= addr_prop) && + ((rstart + asize) >= (addr_prop + rsize))) + match_addrp = 1; + if (match_addrp) { + address = addr_prop; + break; + } + if (rsize > max_size) { + max_size = rsize; + address = OF_BAD_ADDR; + } + + if (address == OF_BAD_ADDR) + address = rstart; + } + skip_addr: + if (address == OF_BAD_ADDR && addr_prop) + address = (u64)addr_prop; + if (address != OF_BAD_ADDR) { + /* kludge for valkyrie */ + if (strcmp(dp->name, "valkyrie") == 0) + address += 0x1000; + offb_init_fb(no_real_node ? "bootx" : dp->name, + no_real_node ? "display" : dp->full_name, + width, height, depth, pitch, address, + no_real_node ? dp : NULL); + } +} + +static int __init offb_init(void) +{ + struct device_node *dp = NULL, *boot_disp = NULL; + + if (fb_get_options("offb", NULL)) + return -ENODEV; + + /* Check if we have a MacOS display without a node spec */ + if (get_property(of_chosen, "linux,bootx-noscreen", NULL) != NULL) { + /* The old code tried to work out which node was the MacOS + * display based on the address. I'm dropping that since the + * lack of a node spec only happens with old BootX versions + * (users can update) and with this code, they'll still get + * a display (just not the palette hacks). + */ + offb_init_nodriver(of_chosen, 1); + } + + for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { + if (get_property(dp, "linux,opened", NULL) && + get_property(dp, "linux,boot-display", NULL)) { + boot_disp = dp; + offb_init_nodriver(dp, 0); + } + } + for (dp = NULL; (dp = of_find_node_by_type(dp, "display"));) { + if (get_property(dp, "linux,opened", NULL) && + dp != boot_disp) + offb_init_nodriver(dp, 0); + } + + return 0; +} + + module_init(offb_init); MODULE_LICENSE("GPL"); diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index 33dddbae5420..76fc9d355eb7 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -2132,6 +2132,9 @@ static int __devinit rivafb_probe(struct pci_dev *pd, fb_destroy_modedb(info->monspecs.modedb); info->monspecs.modedb = NULL; + + pci_set_drvdata(pd, info); + riva_bl_init(info->par); ret = register_framebuffer(info); if (ret < 0) { printk(KERN_ERR PFX @@ -2139,8 +2142,6 @@ static int __devinit rivafb_probe(struct pci_dev *pd, goto err_iounmap_screen_base; } - pci_set_drvdata(pd, info); - printk(KERN_INFO PFX "PCI nVidia %s framebuffer ver %s (%dMB @ 0x%lX)\n", info->fix.id, @@ -2148,8 +2149,6 @@ static int __devinit rivafb_probe(struct pci_dev *pd, info->fix.smem_len / (1024 * 1024), info->fix.smem_start); - riva_bl_init(info->par); - NVTRACE_LEAVE(); return 0; diff --git a/fs/9p/conv.c b/fs/9p/conv.c index 1e898144eb7c..56d88c1a09c5 100644 --- a/fs/9p/conv.c +++ b/fs/9p/conv.c @@ -673,8 +673,10 @@ struct v9fs_fcall *v9fs_create_tcreate(u32 fid, char *name, u32 perm, u8 mode, struct cbuf *bufp = &buffer; size = 4 + 2 + strlen(name) + 4 + 1; /* fid[4] name[s] perm[4] mode[1] */ - if (extended && extension!=NULL) - size += 2 + strlen(extension); /* extension[s] */ + if (extended) { + size += 2 + /* extension[s] */ + (extension == NULL ? 0 : strlen(extension)); + } fc = v9fs_create_common(bufp, size, TCREATE); if (IS_ERR(fc)) diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 2f580a197b8d..eae50c9d6dc4 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -434,11 +434,11 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir) result = v9fs_t_remove(v9ses, fid, &fcall); if (result < 0) { PRINT_FCALL_ERROR("remove fails", fcall); - } else { - v9fs_put_idpool(fid, &v9ses->fidpool); - v9fs_fid_destroy(v9fid); } + v9fs_put_idpool(fid, &v9ses->fidpool); + v9fs_fid_destroy(v9fid); + kfree(fcall); return result; } diff --git a/fs/adfs/super.c b/fs/adfs/super.c index ba1c88af49fe..82011019494c 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -308,7 +308,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di if (adfs_checkmap(sb, dm)) return dm; - adfs_error(sb, NULL, "map corrupted"); + adfs_error(sb, "map corrupted"); error_free: while (--zone >= 0) diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index fcaeead9696b..50cfca5c7efd 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -512,7 +512,11 @@ befs_utf2nls(struct super_block *sb, const char *in, wchar_t uni; int unilen, utflen; char *result; - int maxlen = in_len; /* The utf8->nls conversion can't make more chars */ + /* The utf8->nls conversion won't make the final nls string bigger + * than the utf one, but if the string is pure ascii they'll have the + * same width and an extra char is needed to save the additional \0 + */ + int maxlen = in_len + 1; befs_debug(sb, "---> utf2nls()"); @@ -588,7 +592,10 @@ befs_nls2utf(struct super_block *sb, const char *in, wchar_t uni; int unilen, utflen; char *result; - int maxlen = 3 * in_len; + /* There're nls characters that will translate to 3-chars-wide UTF-8 + * characters, a additional byte is needed to save the final \0 + * in special cases */ + int maxlen = (3 * in_len) + 1; befs_debug(sb, "---> nls2utf()\n"); diff --git a/fs/block_dev.c b/fs/block_dev.c index 37534573960b..045f98854f14 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); +static int __blkdev_put(struct block_device *bdev, unsigned int subclass) +{ + int ret = 0; + struct inode *bd_inode = bdev->bd_inode; + struct gendisk *disk = bdev->bd_disk; + + mutex_lock_nested(&bdev->bd_mutex, subclass); + lock_kernel(); + if (!--bdev->bd_openers) { + sync_blockdev(bdev); + kill_bdev(bdev); + } + if (bdev->bd_contains == bdev) { + if (disk->fops->release) + ret = disk->fops->release(bd_inode, NULL); + } else { + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + subclass + 1); + bdev->bd_contains->bd_part_count--; + mutex_unlock(&bdev->bd_contains->bd_mutex); + } + if (!bdev->bd_openers) { + struct module *owner = disk->fops->owner; + + put_disk(disk); + module_put(owner); + + if (bdev->bd_contains != bdev) { + kobject_put(&bdev->bd_part->kobj); + bdev->bd_part = NULL; + } + bdev->bd_disk = NULL; + bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; + if (bdev != bdev->bd_contains) + __blkdev_put(bdev->bd_contains, subclass + 1); + bdev->bd_contains = NULL; + } + unlock_kernel(); + mutex_unlock(&bdev->bd_mutex); + bdput(bdev); + return ret; +} + +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_NORMAL); +} +EXPORT_SYMBOL(blkdev_put); + +int blkdev_put_partition(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_PARTITION); +} +EXPORT_SYMBOL(blkdev_put_partition); + static int blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); @@ -980,7 +1035,7 @@ out_first: bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; if (bdev != bdev->bd_contains) - blkdev_put(bdev->bd_contains); + __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE); bdev->bd_contains = NULL; put_disk(disk); module_put(owner); @@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } -static int __blkdev_put(struct block_device *bdev, unsigned int subclass) -{ - int ret = 0; - struct inode *bd_inode = bdev->bd_inode; - struct gendisk *disk = bdev->bd_disk; - - mutex_lock_nested(&bdev->bd_mutex, subclass); - lock_kernel(); - if (!--bdev->bd_openers) { - sync_blockdev(bdev); - kill_bdev(bdev); - } - if (bdev->bd_contains == bdev) { - if (disk->fops->release) - ret = disk->fops->release(bd_inode, NULL); - } else { - mutex_lock_nested(&bdev->bd_contains->bd_mutex, - subclass + 1); - bdev->bd_contains->bd_part_count--; - mutex_unlock(&bdev->bd_contains->bd_mutex); - } - if (!bdev->bd_openers) { - struct module *owner = disk->fops->owner; - - put_disk(disk); - module_put(owner); - - if (bdev->bd_contains != bdev) { - kobject_put(&bdev->bd_part->kobj); - bdev->bd_part = NULL; - } - bdev->bd_disk = NULL; - bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) - __blkdev_put(bdev->bd_contains, subclass + 1); - bdev->bd_contains = NULL; - } - unlock_kernel(); - mutex_unlock(&bdev->bd_mutex); - bdput(bdev); - return ret; -} - -int blkdev_put(struct block_device *bdev) -{ - return __blkdev_put(bdev, BD_MUTEX_NORMAL); -} - -EXPORT_SYMBOL(blkdev_put); - -int blkdev_put_partition(struct block_device *bdev) -{ - return __blkdev_put(bdev, BD_MUTEX_PARTITION); -} - -EXPORT_SYMBOL(blkdev_put_partition); - static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); diff --git a/fs/buffer.c b/fs/buffer.c index 3660dcb97591..71649ef9b658 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -470,13 +470,18 @@ out: pass does the actual I/O. */ void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) { + struct address_space *mapping = bdev->bd_inode->i_mapping; + + if (mapping->nrpages == 0) + return; + invalidate_bh_lrus(); /* * FIXME: what about destroy_dirty_buffers? * We really want to use invalidate_inode_pages2() for * that, but not until that's cleaned up. */ - invalidate_inode_pages(bdev->bd_inode->i_mapping); + invalidate_inode_pages(mapping); } /* diff --git a/fs/coda/file.c b/fs/coda/file.c index cc66c681bd11..dbfbcfa5b3c0 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -136,10 +136,8 @@ int coda_open(struct inode *coda_inode, struct file *coda_file) coda_vfs_stat.open++; cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); - if (!cfi) { - unlock_kernel(); + if (!cfi) return -ENOMEM; - } lock_kernel(); diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c index e249cf733a6b..1d30d2ff440f 100644 --- a/fs/efs/symlink.c +++ b/fs/efs/symlink.c @@ -22,7 +22,7 @@ static int efs_symlink_readpage(struct file *file, struct page *page) err = -ENAMETOOLONG; if (size > 2 * EFS_BLOCKSIZE) - goto fail; + goto fail_notlocked; lock_kernel(); /* read first 512 bytes of link target */ @@ -47,6 +47,7 @@ static int efs_symlink_readpage(struct file *file, struct page *page) return 0; fail: unlock_kernel(); +fail_notlocked: SetPageError(page); kunmap(page); unlock_page(page); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 19ffb043abbc..3a3567433b92 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi) eexit_1: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", - current, ep, epi->file, error)); + current, ep, epi->ffd.file, error)); return error; } @@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k struct eventpoll *ep = epi->ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", - current, epi->file, epi, ep)); + current, epi->ffd.file, epi, ep)); write_lock_irqsave(&ep->lock, flags); diff --git a/fs/exec.c b/fs/exec.c index 8344ba73a2a6..54135df2a966 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -486,8 +486,6 @@ struct file *open_exec(const char *name) if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && S_ISREG(inode->i_mode)) { int err = vfs_permission(&nd, MAY_EXEC); - if (!err && !(inode->i_mode & 0111)) - err = -EACCES; file = ERR_PTR(err); if (!err) { file = nameidata_to_filp(&nd, O_RDONLY); @@ -753,7 +751,7 @@ no_thread_group: write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); - spin_lock(&newsighand->siglock); + spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); rcu_assign_pointer(current->sighand, newsighand); recalc_sigpending(); @@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm) int retval; mode = inode->i_mode; - /* - * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, - * generic_permission lets a non-executable through - */ - if (!(mode & 0111)) /* with at least _one_ execute bit set */ - return -EACCES; if (bprm->file->f_op == NULL) return -EACCES; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index f2702cda9779..681dea8f9532 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); - if (sbi->s_inodes_per_block == 0) + if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index a504a40d6d29..063d994bda0b 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, goal = le32_to_cpu(es->s_first_data_block); group_no = (goal - le32_to_cpu(es->s_first_data_block)) / EXT3_BLOCKS_PER_GROUP(sb); + goal_group = group_no; +retry_alloc: gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); if (!gdp) goto io_error; - goal_group = group_no; -retry: free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * if there is not enough free blocks to make a new resevation @@ -1349,7 +1349,7 @@ retry: if (my_rsv) { my_rsv = NULL; group_no = goal_group; - goto retry; + goto retry_alloc; } /* No space left on the device */ *errp = -ENOSPC; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index f804d5e9d60c..c5ee9f0691e3 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1158,7 +1158,7 @@ retry: ret = PTR_ERR(handle); goto out; } - if (test_opt(inode->i_sb, NOBH)) + if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) ret = nobh_prepare_write(page, from, to, ext3_get_block); else ret = block_prepare_write(page, from, to, ext3_get_block); @@ -1244,7 +1244,7 @@ static int ext3_writeback_commit_write(struct file *file, struct page *page, if (new_i_size > EXT3_I(inode)->i_disksize) EXT3_I(inode)->i_disksize = new_i_size; - if (test_opt(inode->i_sb, NOBH)) + if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) ret = nobh_commit_write(file, page, from, to); else ret = generic_commit_write(file, page, from, to); @@ -1494,7 +1494,7 @@ static int ext3_writeback_writepage(struct page *page, goto out_fail; } - if (test_opt(inode->i_sb, NOBH)) + if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) ret = nobh_writepage(page, ext3_get_block, wbc); else ret = block_write_full_page(page, ext3_get_block, wbc); @@ -2402,14 +2402,15 @@ static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, struct buffer_head *bh; struct ext3_group_desc * gdp; - - if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && - ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || - ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { - ext3_error(sb, "ext3_get_inode_block", - "bad inode number: %lu", ino); + if (!ext3_valid_inum(sb, ino)) { + /* + * This error is already checked for in namei.c unless we are + * looking at an NFS filehandle, in which case no error + * report is needed + */ return 0; } + block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); if (block_group >= EXT3_SB(sb)->s_groups_count) { ext3_error(sb,"ext3_get_inode_block","group >= groups count"); diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index d9176dba3698..2aa7101b27cd 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1000,7 +1000,12 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str if (bh) { unsigned long ino = le32_to_cpu(de->inode); brelse (bh); - inode = iget(dir->i_sb, ino); + if (!ext3_valid_inum(dir->i_sb, ino)) { + ext3_error(dir->i_sb, "ext3_lookup", + "bad inode number: %lu", ino); + inode = NULL; + } else + inode = iget(dir->i_sb, ino); if (!inode) return ERR_PTR(-EACCES); @@ -1028,7 +1033,13 @@ struct dentry *ext3_get_parent(struct dentry *child) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); brelse(bh); - inode = iget(child->d_inode->i_sb, ino); + + if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { + ext3_error(child->d_inode->i_sb, "ext3_get_parent", + "bad inode number: %lu", ino); + inode = NULL; + } else + inode = iget(child->d_inode->i_sb, ino); if (!inode) return ERR_PTR(-EACCES); diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 29cce456c7ce..43886fa00a2a 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -246,6 +246,8 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler) u_long page, npages, block, pblocks, nblocks, offset; loff_t pos; + lock_kernel(); + switch ((long)fp->f_pos) { case 0: if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0) diff --git a/fs/fuse/control.c b/fs/fuse/control.c index a3bce3a77253..46fe60b2da23 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -105,7 +105,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, /* * Add a connection to the control filesystem (if it exists). Caller - * must host fuse_mutex + * must hold fuse_mutex */ int fuse_ctl_add_conn(struct fuse_conn *fc) { @@ -139,7 +139,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc) /* * Remove a connection from the control filesystem (if it exists). - * Caller must host fuse_mutex + * Caller must hold fuse_mutex */ void fuse_ctl_remove_conn(struct fuse_conn *fc) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 72a74cde6de8..409ce6a7cca4 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -14,6 +14,33 @@ #include #include +#if BITS_PER_LONG >= 64 +static inline void fuse_dentry_settime(struct dentry *entry, u64 time) +{ + entry->d_time = time; +} + +static inline u64 fuse_dentry_time(struct dentry *entry) +{ + return entry->d_time; +} +#else +/* + * On 32 bit archs store the high 32 bits of time in d_fsdata + */ +static void fuse_dentry_settime(struct dentry *entry, u64 time) +{ + entry->d_time = time; + entry->d_fsdata = (void *) (unsigned long) (time >> 32); +} + +static u64 fuse_dentry_time(struct dentry *entry) +{ + return (u64) entry->d_time + + ((u64) (unsigned long) entry->d_fsdata << 32); +} +#endif + /* * FUSE caches dentries and attributes with separate timeout. The * time in jiffies until the dentry/attributes are valid is stored in @@ -23,10 +50,13 @@ /* * Calculate the time in jiffies until a dentry/attributes are valid */ -static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) +static u64 time_to_jiffies(unsigned long sec, unsigned long nsec) { - struct timespec ts = {sec, nsec}; - return jiffies + timespec_to_jiffies(&ts); + if (sec || nsec) { + struct timespec ts = {sec, nsec}; + return get_jiffies_64() + timespec_to_jiffies(&ts); + } else + return 0; } /* @@ -35,7 +65,8 @@ static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) */ static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) { - entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec); + fuse_dentry_settime(entry, + time_to_jiffies(o->entry_valid, o->entry_valid_nsec)); if (entry->d_inode) get_fuse_inode(entry->d_inode)->i_time = time_to_jiffies(o->attr_valid, o->attr_valid_nsec); @@ -47,7 +78,7 @@ static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) */ void fuse_invalidate_attr(struct inode *inode) { - get_fuse_inode(inode)->i_time = jiffies - 1; + get_fuse_inode(inode)->i_time = 0; } /* @@ -60,7 +91,7 @@ void fuse_invalidate_attr(struct inode *inode) */ static void fuse_invalidate_entry_cache(struct dentry *entry) { - entry->d_time = jiffies - 1; + fuse_dentry_settime(entry, 0); } /* @@ -102,7 +133,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) if (inode && is_bad_inode(inode)) return 0; - else if (time_after(jiffies, entry->d_time)) { + else if (fuse_dentry_time(entry) < get_jiffies_64()) { int err; struct fuse_entry_out outarg; struct fuse_conn *fc; @@ -666,7 +697,7 @@ static int fuse_revalidate(struct dentry *entry) if (!fuse_allow_task(fc, current)) return -EACCES; if (get_node_id(inode) != FUSE_ROOT_ID && - time_before_eq(jiffies, fi->i_time)) + fi->i_time >= get_jiffies_64()) return 0; return fuse_do_getattr(inode); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 63614ed16336..5c4fcd1dbf59 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -395,14 +395,16 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, struct fuse_readpages_data data; int err; + err = -EIO; if (is_bad_inode(inode)) - return -EIO; + goto clean_pages_up; data.file = file; data.inode = inode; data.req = fuse_get_req(fc); + err = PTR_ERR(data.req); if (IS_ERR(data.req)) - return PTR_ERR(data.req); + goto clean_pages_up; err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); if (!err) { @@ -412,6 +414,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, fuse_put_request(fc, data.req); } return err; + +clean_pages_up: + put_pages_list(pages); + return err; } static size_t fuse_send_write(struct fuse_req *req, struct file *file, diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0dbf96621841..69c7750d55b8 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -59,7 +59,7 @@ struct fuse_inode { struct fuse_req *forget_req; /** Time in jiffies until the file attributes are valid */ - unsigned long i_time; + u64 i_time; }; /** FUSE specific file data */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index dcaaabd3b9c4..7d25092262ae 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -51,7 +51,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) return NULL; fi = get_fuse_inode(inode); - fi->i_time = jiffies - 1; + fi->i_time = 0; fi->nodeid = 0; fi->nlookup = 0; fi->forget_req = fuse_request_alloc(); diff --git a/fs/inotify_user.c b/fs/inotify_user.c index f2386442adee..017cb0f134d6 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -187,7 +187,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, { struct inotify_kernel_event *kevent; - kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); + kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); if (unlikely(!kevent)) return NULL; diff --git a/fs/ioprio.c b/fs/ioprio.c index 93aa5715f224..78b1deae3fa2 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) task->ioprio = ioprio; ioc = task->io_context; + /* see wmb() in current_io_context() */ + smp_read_barrier_depends(); + if (ioc && ioc->set_ioprio) ioc->set_ioprio(ioc, ioprio); @@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) continue; ret = set_task_ioprio(p, ioprio); if (ret) - break; + goto free_uid; } while_each_thread(g, p); - +free_uid: if (who) free_uid(user); break; @@ -137,6 +140,29 @@ out: return ret; } +int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (!ioprio_valid(aprio)) + return bprio; + if (!ioprio_valid(bprio)) + return aprio; + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + asmlinkage long sys_ioprio_get(int which, int who) { struct task_struct *g, *p; diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 0971814c38b8..42da60784311 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal) struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); - kfree(jh->b_committed_data); + jbd_slab_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } @@ -745,14 +745,14 @@ restart_loop: * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { - kfree(jh->b_committed_data); + jbd_slab_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { - kfree(jh->b_frozen_data); + jbd_slab_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 8c9b28dff119..f66724ce443a 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); +static int journal_create_jbd_slab(size_t slab_size); /* * Helper function used to manage commit timeouts @@ -328,10 +329,10 @@ repeat: char *tmp; jbd_unlock_bh_state(bh_in); - tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); + tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); jbd_lock_bh_state(bh_in); if (jh_in->b_frozen_data) { - kfree(tmp); + jbd_slab_free(tmp, bh_in->b_size); goto repeat; } @@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal) int journal_load(journal_t *journal) { int err; + journal_superblock_t *sb; err = load_superblock(journal); if (err) return err; + sb = journal->j_superblock; /* If this is a V2 superblock, then we have to check the * features flags on it. */ if (journal->j_format_version >= 2) { - journal_superblock_t *sb = journal->j_superblock; - if ((sb->s_feature_ro_compat & ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || (sb->s_feature_incompat & @@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal) } } + /* + * Create a slab for this blocksize + */ + err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize)); + if (err) + return err; + /* Let the recovery code check whether it needs to recover any * data from the journal. */ if (journal_recover(journal)) @@ -1611,6 +1619,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); } +/* + * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed + * and allocate frozen and commit buffers from these slabs. + * + * Reason for doing this is to avoid, SLAB_DEBUG - since it could + * cause bh to cross page boundary. + */ + +#define JBD_MAX_SLABS 5 +#define JBD_SLAB_INDEX(size) (size >> 11) + +static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; +static const char *jbd_slab_names[JBD_MAX_SLABS] = { + "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" +}; + +static void journal_destroy_jbd_slabs(void) +{ + int i; + + for (i = 0; i < JBD_MAX_SLABS; i++) { + if (jbd_slab[i]) + kmem_cache_destroy(jbd_slab[i]); + jbd_slab[i] = NULL; + } +} + +static int journal_create_jbd_slab(size_t slab_size) +{ + int i = JBD_SLAB_INDEX(slab_size); + + BUG_ON(i >= JBD_MAX_SLABS); + + /* + * Check if we already have a slab created for this size + */ + if (jbd_slab[i]) + return 0; + + /* + * Create a slab and force alignment to be same as slabsize - + * this will make sure that allocations won't cross the page + * boundary. + */ + jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], + slab_size, slab_size, 0, NULL, NULL); + if (!jbd_slab[i]) { + printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); + return -ENOMEM; + } + return 0; +} + +void * jbd_slab_alloc(size_t size, gfp_t flags) +{ + int idx; + + idx = JBD_SLAB_INDEX(size); + BUG_ON(jbd_slab[idx] == NULL); + return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); +} + +void jbd_slab_free(void *ptr, size_t size) +{ + int idx; + + idx = JBD_SLAB_INDEX(size); + BUG_ON(jbd_slab[idx] == NULL); + kmem_cache_free(jbd_slab[idx], ptr); +} + /* * Journal_head storage management */ @@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) printk(KERN_WARNING "%s: freeing " "b_frozen_data\n", __FUNCTION__); - kfree(jh->b_frozen_data); + jbd_slab_free(jh->b_frozen_data, bh->b_size); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing " "b_committed_data\n", __FUNCTION__); - kfree(jh->b_committed_data); + jbd_slab_free(jh->b_committed_data, bh->b_size); } bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ @@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void) journal_destroy_revoke_caches(); journal_destroy_journal_head_cache(); journal_destroy_handle_cache(); + journal_destroy_jbd_slabs(); } static int __init journal_init(void) diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 508b2ea91f43..de2e4cbbf79a 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -666,8 +666,9 @@ repeat: if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); - frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, - GFP_NOFS); + frozen_buffer = + jbd_slab_alloc(jh2bh(jh)->b_size, + GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG "%s: OOM for frozen_buffer\n", @@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) repeat: if (!jh->b_committed_data) { - committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); + committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __FUNCTION__); @@ -906,7 +907,7 @@ repeat: out: journal_put_journal_head(jh); if (unlikely(committed_data)) - kfree(committed_data); + jbd_slab_free(committed_data, bh->b_size); return err; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 43e3f566aad6..a223cf4faa9b 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode) set_cflag(COMMIT_Dirty, inode); } -static int -jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, - struct buffer_head *bh_result, int create) +int jfs_get_block(struct inode *ip, sector_t lblock, + struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int rc = 0; xad_t xad; s64 xaddr; int xflag; - s32 xlen = max_blocks; + s32 xlen = bh_result->b_size >> ip->i_blkbits; /* * Take appropriate lock on inode @@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, IREAD_LOCK(ip); if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && - (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) && + (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) @@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, return rc; } -static int jfs_get_block(struct inode *ip, sector_t lblock, - struct buffer_head *bh_result, int create) -{ - return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, - bh_result, create); -} - static int jfs_writepage(struct page *page, struct writeback_control *wbc) { return nobh_writepage(page, jfs_get_block, wbc); diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index b5c7da6190dc..1fc48df670c8 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t); extern void jfs_free_zero_link(struct inode *); extern struct dentry *jfs_get_parent(struct dentry *dentry); extern void jfs_set_inode_flags(struct inode *); +extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); extern const struct address_space_operations jfs_aops; extern struct inode_operations jfs_dir_inode_operations; diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4f6cfebc82db..143bcd1d5eaa 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, break; } -#if defined(CONFIG_QUOTA) +#ifdef CONFIG_QUOTA case Opt_quota: case Opt_usrquota: *flag |= JFS_USRQUOTA; @@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) if (sbi->flag & JFS_NOINTEGRITY) seq_puts(seq, ",nointegrity"); -#if defined(CONFIG_QUOTA) +#ifdef CONFIG_QUOTA if (sbi->flag & JFS_USRQUOTA) seq_puts(seq, ",usrquota"); @@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) return 0; } +#ifdef CONFIG_QUOTA + +/* Read data from quotafile - avoid pagecache and such because we cannot afford + * acquiring the locks... As quota files are never truncated and quota code + * itself serializes the operations (and noone else should touch the files) + * we don't have to be afraid of races */ +static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, + size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + int offset = off & (sb->s_blocksize - 1); + int tocopy; + size_t toread; + struct buffer_head tmp_bh; + struct buffer_head *bh; + loff_t i_size = i_size_read(inode); + + if (off > i_size) + return 0; + if (off+len > i_size) + len = i_size-off; + toread = len; + while (toread > 0) { + tocopy = sb->s_blocksize - offset < toread ? + sb->s_blocksize - offset : toread; + + tmp_bh.b_state = 0; + tmp_bh.b_size = 1 << inode->i_blkbits; + err = jfs_get_block(inode, blk, &tmp_bh, 0); + if (err) + return err; + if (!buffer_mapped(&tmp_bh)) /* A hole? */ + memset(data, 0, tocopy); + else { + bh = sb_bread(sb, tmp_bh.b_blocknr); + if (!bh) + return -EIO; + memcpy(data, bh->b_data+offset, tocopy); + brelse(bh); + } + offset = 0; + toread -= tocopy; + data += tocopy; + blk++; + } + return len; +} + +/* Write to quotafile */ +static ssize_t jfs_quota_write(struct super_block *sb, int type, + const char *data, size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + int offset = off & (sb->s_blocksize - 1); + int tocopy; + size_t towrite = len; + struct buffer_head tmp_bh; + struct buffer_head *bh; + + mutex_lock(&inode->i_mutex); + while (towrite > 0) { + tocopy = sb->s_blocksize - offset < towrite ? + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; + tmp_bh.b_size = 1 << inode->i_blkbits; + err = jfs_get_block(inode, blk, &tmp_bh, 1); + if (err) + goto out; + if (offset || tocopy != sb->s_blocksize) + bh = sb_bread(sb, tmp_bh.b_blocknr); + else + bh = sb_getblk(sb, tmp_bh.b_blocknr); + if (!bh) { + err = -EIO; + goto out; + } + lock_buffer(bh); + memcpy(bh->b_data+offset, data, tocopy); + flush_dcache_page(bh->b_page); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + unlock_buffer(bh); + brelse(bh); + offset = 0; + towrite -= tocopy; + data += tocopy; + blk++; + } +out: + if (len == towrite) + return err; + if (inode->i_size < off+len-towrite) + i_size_write(inode, off+len-towrite); + inode->i_version++; + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + mutex_unlock(&inode->i_mutex); + return len - towrite; +} + +#endif + static struct super_operations jfs_super_operations = { .alloc_inode = jfs_alloc_inode, .destroy_inode = jfs_destroy_inode, @@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = { .unlockfs = jfs_unlockfs, .statfs = jfs_statfs, .remount_fs = jfs_remount, - .show_options = jfs_show_options + .show_options = jfs_show_options, +#ifdef CONFIG_QUOTA + .quota_read = jfs_quota_read, + .quota_write = jfs_quota_write, +#endif }; static struct export_operations jfs_export_operations = { diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index baf5ae513481..c9d419703cf3 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -638,9 +638,6 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) if (task->tk_status < 0) { /* RPC error: Re-insert for retransmission */ timeout = 10 * HZ; - } else if (block->b_done) { - /* Block already removed, kill it for real */ - timeout = 0; } else { /* Call was successful, now wait for client callback */ timeout = 60 * HZ; @@ -709,13 +706,10 @@ nlmsvc_retry_blocked(void) break; if (time_after(block->b_when,jiffies)) break; - dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", - block, block->b_when, block->b_done); + dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", + block, block->b_when); kref_get(&block->b_count); - if (block->b_done) - nlmsvc_unlink_block(block); - else - nlmsvc_grant_blocked(block); + nlmsvc_grant_blocked(block); nlmsvc_release_block(block); } diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 2a4df9b3779a..01b4db9e5466 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -237,19 +237,22 @@ static int nlm_traverse_files(struct nlm_host *host, int action) { struct nlm_file *file, **fp; - int i; + int i, ret = 0; mutex_lock(&nlm_file_mutex); for (i = 0; i < FILE_NRHASH; i++) { fp = nlm_files + i; while ((file = *fp) != NULL) { + file->f_count++; + mutex_unlock(&nlm_file_mutex); + /* Traverse locks, blocks and shares of this file * and update file->f_locks count */ - if (nlm_inspect_file(host, file, action)) { - mutex_unlock(&nlm_file_mutex); - return 1; - } + if (nlm_inspect_file(host, file, action)) + ret = 1; + mutex_lock(&nlm_file_mutex); + file->f_count--; /* No more references to this file. Let go of it. */ if (!file->f_blocks && !file->f_locks && !file->f_shares && !file->f_count) { @@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action) } } mutex_unlock(&nlm_file_mutex); - return 0; + return ret; } /* diff --git a/fs/locks.c b/fs/locks.c index b0b41a64e10b..d7c53392cac1 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1421,8 +1421,9 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) if (!leases_enable) goto out; - error = lease_alloc(filp, arg, &fl); - if (error) + error = -ENOMEM; + fl = locks_alloc_lock(); + if (fl == NULL) goto out; locks_copy_lock(fl, lease); @@ -1430,6 +1431,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) locks_insert_lock(before, fl); *flp = fl; + error = 0; out: return error; } diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 9ea91c5eeb7b..330ff9fc7cf0 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) /* * Allocate the buffer map to keep the superblock small. */ + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kmalloc(i, GFP_KERNEL); if (!map) @@ -263,7 +265,7 @@ out_no_root: out_no_bitmap: printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); - out_freemap: +out_freemap: for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) @@ -276,11 +278,16 @@ out_no_map: printk("MINIX-fs: can't allocate map\n"); goto out_release; +out_illegal_sb: + if (!silent) + printk("MINIX-fs: bad superblock\n"); + goto out_release; + out_no_fs: if (!silent) printk("VFS: Can't find a Minix or Minix V2 filesystem " "on device %s\n", s->s_id); - out_release: +out_release: brelse(bh); goto out; @@ -290,7 +297,7 @@ out_bad_hblock: out_bad_sb: printk("MINIX-fs: unable to read superblock\n"); - out: +out: s->s_fs_info = NULL; kfree(sbi); return -EINVAL; diff --git a/fs/namei.c b/fs/namei.c index e01070d7bf58..432d6bc6fab0 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -159,7 +159,7 @@ char * getname(const char __user * filename) #ifdef CONFIG_AUDITSYSCALL void putname(const char *name) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) audit_putname(name); else __putname(name); @@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask, int permission(struct inode *inode, int mask, struct nameidata *nd) { + umode_t mode = inode->i_mode; int retval, submask; if (mask & MAY_WRITE) { - umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. @@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) } + /* + * MAY_EXEC on regular files requires special handling: We override + * filesystem execute permissions if the mode bits aren't set. + */ + if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) + return -EACCES; + /* Ordinary permission routines do not understand MAY_APPEND. */ submask = mask & ~MAY_APPEND; if (inode->i_op && inode->i_op->permission) @@ -1125,7 +1132,7 @@ static int fastcall do_path_lookup(int dfd, const char *name, retval = link_path_walk(name, nd); out: if (likely(retval == 0)) { - if (unlikely(current->audit_context && nd && nd->dentry && + if (unlikely(!audit_dummy_context() && nd && nd->dentry && nd->dentry->d_inode)) audit_inode(name, nd->dentry->d_inode); } @@ -1357,7 +1364,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); - audit_inode_child(victim->d_name.name, victim->d_inode, dir->i_ino); + audit_inode_child(victim->d_name.name, victim->d_inode, dir); error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); if (error) @@ -1659,6 +1666,7 @@ do_last: * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); + audit_inode_update(path.dentry->d_inode); error = -EEXIST; if (flag & O_EXCL) @@ -1669,6 +1677,7 @@ do_last: if (flag & O_NOFOLLOW) goto exit_dput; } + error = -ENOENT; if (!path.dentry->d_inode) goto exit_dput; @@ -1765,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; + nd->flags |= LOOKUP_CREATE; + nd->intent.open.flags = O_EXCL; /* * Do the final lookup. diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc2b874ad5a4..48e892880d5b 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) static int nfs_release_page(struct page *page, gfp_t gfp) { - return !nfs_wb_page(page->mapping->host, page); + if (gfp & __GFP_FS) + return !nfs_wb_page(page->mapping->host, page); + else + /* + * Avoid deadlock on nfs_wait_on_request(). + */ + return 0; } const struct address_space_operations nfs_file_aops = { diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b81e7ed3c902..07a5dd57646e 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp) if (!idmap) return; - dput(idmap->idmap_dentry); - idmap->idmap_dentry = NULL; - rpc_unlink(idmap->idmap_path); + rpc_unlink(idmap->idmap_dentry); clp->cl_idmap = NULL; kfree(idmap); } diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 19b98ca468eb..86b3169c8cac 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry, namelen = dentry->d_name.len; buflen -= namelen + 1; if (buflen < 0) - goto Elong; + goto Elong_unlock; end -= namelen; memcpy(end, dentry->d_name.name, namelen); *--end = '/'; @@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry, end -= namelen; memcpy(end, base, namelen); return end; +Elong_unlock: + spin_unlock(&dcache_lock); Elong: return ERR_PTR(-ENAMETOOLONG); } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e6ee97f19d81..153898e1331f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2668,7 +2668,7 @@ out: nfs4_set_cached_acl(inode, acl); } -static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) +static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct page *pages[NFS4ACL_MAXPAGES]; struct nfs_getaclargs args = { @@ -2721,6 +2721,19 @@ out_free: return ret; } +static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) +{ + struct nfs4_exception exception = { }; + ssize_t ret; + do { + ret = __nfs4_get_acl_uncached(inode, buf, buflen); + if (ret >= 0) + break; + ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); + } while (exception.retry); + return ret; +} + static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); @@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) return nfs4_get_acl_uncached(inode, buf, buflen); } -static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) +static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); struct page *pages[NFS4ACL_MAXPAGES]; @@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen return ret; } +static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), + __nfs4_proc_set_acl(inode, buf, buflen), + &exception); + } while (exception.retry); + return err; +} + static int nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) { diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1750d996f49f..730ec8fb31c6 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n struct kvec *iov = rcvbuf->head; unsigned int nr, pglen = rcvbuf->page_len; uint32_t *end, *entry, *p, *kaddr; - uint32_t len, attrlen; + uint32_t len, attrlen, xlen; int hdrlen, recvd, status; status = decode_op_hdr(xdr, OP_READDIR); @@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); - end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); + end = p + ((pglen + readdir->pgbase) >> 2); entry = p; for (nr = 0; *p++; nr++) { - if (p + 3 > end) + if (end - p < 3) goto short_pkt; dprintk("cookie = %Lu, ", *((unsigned long long *)p)); p += 2; /* cookie */ @@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); goto err_unmap; } + xlen = XDR_QUADLEN(len); + if (end - p < xlen + 1) + goto short_pkt; dprintk("filename = %*s\n", len, (char *)p); - p += XDR_QUADLEN(len); - if (p + 1 > end) - goto short_pkt; + p += xlen; len = ntohl(*p++); /* bitmap length */ + if (end - p < len + 1) + goto short_pkt; p += len; - if (p + 1 > end) - goto short_pkt; attrlen = XDR_QUADLEN(ntohl(*p++)); - p += attrlen; /* attributes */ - if (p + 2 > end) + if (end - p < attrlen + 2) goto short_pkt; + p += attrlen; /* attributes */ entry = p; } if (!nr && (entry[0] != 0 || entry[1] == 0)) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 52bf634260a1..da9cf11c326f 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -63,7 +63,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) return p; } -void nfs_readdata_free(struct nfs_read_data *p) +static void nfs_readdata_free(struct nfs_read_data *p) { if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); @@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; base &= ~PAGE_CACHE_MASK; pglen = PAGE_CACHE_SIZE - base; - if (pglen < remainder) + for (;;) { + if (remainder <= pglen) { + memclear_highpage_flush(*pages, base, remainder); + break; + } memclear_highpage_flush(*pages, base, pglen); - else - memclear_highpage_flush(*pages, base, remainder); + pages++; + remainder -= pglen; + pglen = PAGE_CACHE_SIZE; + base = 0; + } } /* @@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) unsigned int base = data->args.pgbase; struct page **pages; + if (data->res.eof) + count = data->args.count; if (unlikely(count == 0)) return; pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; @@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageUptodate(*pages); - /* - * Was this an eof or a short read? If the latter, don't mark the page - * as uptodate yet. - */ - if (count > 0 && (data->res.eof || data->args.count == data->res.count)) + if (count != 0) SetPageUptodate(*pages); } @@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageError(*pages); + if (count != 0) + SetPageError(*pages); } /* diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 86bac6a5008e..50774991f8d5 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -137,7 +137,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) return p; } -void nfs_writedata_free(struct nfs_write_data *p) +static void nfs_writedata_free(struct nfs_write_data *p) { if (p && (p->pagevec != &p->page_array[0])) kfree(p->pagevec); diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index ecc439d2565f..501d83884530 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -187,6 +187,11 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) goto out; } + /* Set user creds for this exportpoint */ + error = nfserrno(nfsd_setuser(rqstp, exp)); + if (error) + goto out; + /* * Look up the dentry using the NFS file handle. */ @@ -241,16 +246,17 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) dprintk("nfsd: fh_verify - just checking\n"); dentry = fhp->fh_dentry; exp = fhp->fh_export; + /* Set user creds for this exportpoint; necessary even + * in the "just checking" case because this may be a + * filehandle that was created by fh_compose, and that + * is about to be used in another nfsv4 compound + * operation */ + error = nfserrno(nfsd_setuser(rqstp, exp)); + if (error) + goto out; } cache_get(&exp->h); - /* Set user creds for this exportpoint; necessary even in the "just - * checking" case because this may be a filehandle that was created by - * fh_compose, and that is about to be used in another nfsv4 compound - * operation */ - error = nfserrno(nfsd_setuser(rqstp, exp)); - if (error) - goto out; error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); if (error) diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 1b8346dd0572..9503240ef0e5 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2375,7 +2375,6 @@ leave: mlog(0, "returning %d\n", ret); return ret; } -EXPORT_SYMBOL_GPL(dlm_migrate_lockres); int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) { diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index b0c3134f4f70..37be4b2e0d4a 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -155,7 +155,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, else status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); - if (status != DLM_NORMAL) + if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) goto leave; /* By now this has been masked out of cancel requests. */ @@ -183,8 +183,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, spin_lock(&lock->spinlock); /* if the master told us the lock was already granted, * let the ast handle all of these actions */ - if (status == DLM_NORMAL && - lksb->status == DLM_CANCELGRANT) { + if (status == DLM_CANCELGRANT) { actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); @@ -349,14 +348,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, vec, veclen, owner, &status); if (tmpret >= 0) { // successfully sent and received - if (status == DLM_CANCELGRANT) - ret = DLM_NORMAL; - else if (status == DLM_FORWARD) { + if (status == DLM_FORWARD) mlog(0, "master was in-progress. retry\n"); - ret = DLM_FORWARD; - } else - ret = status; - lksb->status = status; + ret = status; } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { @@ -372,7 +366,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, /* something bad. this will BUG in ocfs2 */ ret = dlm_err_to_dlm_status(tmpret); } - lksb->status = ret; } return ret; @@ -483,6 +476,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) /* lock was found on queue */ lksb = lock->lksb; + if (flags & (LKM_VALBLK|LKM_PUT_LVB) && + lock->ml.type != LKM_EXMODE) + flags &= ~(LKM_VALBLK|LKM_PUT_LVB); + /* unlockast only called on originating node */ if (flags & LKM_PUT_LVB) { lksb->flags |= DLM_LKSB_PUT_LVB; @@ -507,11 +504,8 @@ not_found: "cookie=%u:%llu\n", dlm_get_lock_cookie_node(unlock->cookie), dlm_get_lock_cookie_seq(unlock->cookie)); - else { - /* send the lksb->status back to the other node */ - status = lksb->status; + else dlm_lock_put(lock); - } leave: if (res) @@ -533,26 +527,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, if (dlm_lock_on_list(&res->blocked, lock)) { /* cancel this outright */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK); } else if (dlm_lock_on_list(&res->converting, lock)) { /* cancel the request, put back on granted */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK | DLM_UNLOCK_REGRANT_LOCK | DLM_UNLOCK_CLEAR_CONVERT_TYPE); } else if (dlm_lock_on_list(&res->granted, lock)) { - /* too late, already granted. DLM_CANCELGRANT */ - lksb->status = DLM_CANCELGRANT; - status = DLM_NORMAL; + /* too late, already granted. */ + status = DLM_CANCELGRANT; *actions = DLM_UNLOCK_CALL_AST; } else { mlog(ML_ERROR, "lock to cancel is not on any list!\n"); - lksb->status = DLM_IVLOCKID; status = DLM_IVLOCKID; *actions = 0; } @@ -569,13 +559,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, /* unlock request */ if (!dlm_lock_on_list(&res->granted, lock)) { - lksb->status = DLM_DENIED; status = DLM_DENIED; dlm_error(status); *actions = 0; } else { /* unlock granted lock */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_FREE_LOCK | DLM_UNLOCK_CALL_AST | @@ -632,6 +620,8 @@ retry: spin_lock(&res->spinlock); is_master = (res->owner == dlm->node_num); + if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) + flags &= ~LKM_VALBLK; spin_unlock(&res->spinlock); if (is_master) { @@ -665,7 +655,7 @@ retry: } if (call_ast) { - mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); + mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { /* it is possible that there is one last bast * pending. make sure it is flushed, then @@ -677,9 +667,12 @@ retry: wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } - (*unlockast)(data, lksb->status); + (*unlockast)(data, status); } + if (status == DLM_CANCELGRANT) + status = DLM_NORMAL; + if (status == DLM_NORMAL) { mlog(0, "kicking the thread\n"); dlm_kick_thread(dlm, res); diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 0d1973ea32b0..1f17a4d08287 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -840,6 +840,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, mlog(0, "Allocating %u clusters for a new window.\n", ocfs2_local_alloc_window_bits(osb)); + + /* Instruct the allocation code to try the most recently used + * cluster group. We'll re-record the group used this pass + * below. */ + ac->ac_last_group = osb->la_last_gd; + /* we used the generic suballoc reserve function, but we set * everything up nicely, so there's no reason why we can't use * the more specific cluster api to claim bits. */ @@ -852,6 +858,8 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, goto bail; } + osb->la_last_gd = ac->ac_last_group; + la->la_bm_off = cpu_to_le32(cluster_off); alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); /* just in case... In the future when we find space ourselves, diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index cd4a6f253d13..0462a7f4e21b 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -197,7 +197,6 @@ struct ocfs2_super struct ocfs2_node_map recovery_map; struct ocfs2_node_map umount_map; - u32 num_clusters; u64 root_blkno; u64 system_dir_blkno; u64 bitmap_blkno; @@ -237,6 +236,7 @@ struct ocfs2_super enum ocfs2_local_alloc_state local_alloc_state; struct buffer_head *local_alloc_bh; + u64 la_last_gd; /* Next two fields are for local node slot recovery during * mount. */ diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 195523090c87..9d91e66f51a9 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -70,12 +70,6 @@ static int ocfs2_block_group_search(struct inode *inode, struct buffer_head *group_bh, u32 bits_wanted, u32 min_bits, u16 *bit_off, u16 *bits_found); -static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, - u32 bits_wanted, - u32 min_bits, - u16 *bit_off, - unsigned int *num_bits, - u64 *bg_blkno); static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, struct ocfs2_alloc_context *ac, u32 bits_wanted, @@ -85,11 +79,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, u64 *bg_blkno); static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, int nr); -static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, - struct buffer_head *bg_bh, - unsigned int bits_wanted, - u16 *bit_off, - u16 *bits_found); static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, @@ -143,6 +132,64 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); } +/* somewhat more expensive than our other checks, so use sparingly. */ +static int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd) +{ + unsigned int max_bits; + + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd); + return -EIO; + } + + if (di->i_blkno != gd->bg_parent_dinode) { + ocfs2_error(sb, "Group descriptor # %llu has bad parent " + "pointer (%llu, expected %llu)", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), + (unsigned long long)le64_to_cpu(di->i_blkno)); + return -EIO; + } + + max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); + if (le16_to_cpu(gd->bg_bits) > max_bits) { + ocfs2_error(sb, "Group descriptor # %llu has bit count of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_chain) >= + le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { + ocfs2_error(sb, "Group descriptor # %llu has bad chain %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_chain)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "claims that %u are free", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "max bitmap bits of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + return -EIO; + } + + return 0; +} + static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, struct inode *alloc_inode, struct buffer_head *bg_bh, @@ -663,6 +710,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, struct buffer_head *bg_bh, unsigned int bits_wanted, + unsigned int total_bits, u16 *bit_off, u16 *bits_found) { @@ -679,10 +727,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, found = start = best_offset = best_size = 0; bitmap = bg->bg_bitmap; - while((offset = ocfs2_find_next_zero_bit(bitmap, - le16_to_cpu(bg->bg_bits), - start)) != -1) { - if (offset == le16_to_cpu(bg->bg_bits)) + while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) { + if (offset == total_bits) break; if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { @@ -911,14 +957,35 @@ static int ocfs2_cluster_group_search(struct inode *inode, { int search = -ENOSPC; int ret; - struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data; u16 tmp_off, tmp_found; + unsigned int max_bits, gd_cluster_off; BUG_ON(!ocfs2_is_cluster_bitmap(inode)); - if (bg->bg_free_bits_count) { + if (gd->bg_free_bits_count) { + max_bits = le16_to_cpu(gd->bg_bits); + + /* Tail groups in cluster bitmaps which aren't cpg + * aligned are prone to partial extention by a failed + * fs resize. If the file system resize never got to + * update the dinode cluster count, then we don't want + * to trust any clusters past it, regardless of what + * the group descriptor says. */ + gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb, + le64_to_cpu(gd->bg_blkno)); + if ((gd_cluster_off + max_bits) > + OCFS2_I(inode)->ip_clusters) { + max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; + mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + OCFS2_I(inode)->ip_clusters, max_bits); + } + ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), group_bh, bits_wanted, + max_bits, &tmp_off, &tmp_found); if (ret) return ret; @@ -951,17 +1018,109 @@ static int ocfs2_block_group_search(struct inode *inode, if (bg->bg_free_bits_count) ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), group_bh, bits_wanted, + le16_to_cpu(bg->bg_bits), bit_off, bits_found); return ret; } +static int ocfs2_alloc_dinode_update_counts(struct inode *inode, + struct ocfs2_journal_handle *handle, + struct buffer_head *di_bh, + u32 num_bits, + u16 chain) +{ + int ret; + u32 tmp_used; + struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; + struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); + di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); + le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 gd_blkno, + u16 *bits_left) +{ + int ret; + u16 found; + struct buffer_head *group_bh = NULL; + struct ocfs2_group_desc *gd; + struct inode *alloc_inode = ac->ac_inode; + struct ocfs2_journal_handle *handle = ac->ac_handle; + + ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno, + &group_bh, OCFS2_BH_CACHED, alloc_inode); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + gd = (struct ocfs2_group_desc *) group_bh->b_data; + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); + ret = -EIO; + goto out; + } + + ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, + bit_off, &found); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + *num_bits = found; + + ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, + *num_bits, + le16_to_cpu(gd->bg_chain)); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, + *bit_off, *num_bits); + if (ret < 0) + mlog_errno(ret); + + *bits_left = le16_to_cpu(gd->bg_free_bits_count); + +out: + brelse(group_bh); + + return ret; +} + static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, u32 bits_wanted, u32 min_bits, u16 *bit_off, unsigned int *num_bits, - u64 *bg_blkno) + u64 *bg_blkno, + u16 *bits_left) { int status; u16 chain, tmp_bits; @@ -988,9 +1147,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); goto bail; } @@ -1018,9 +1177,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); goto bail; } } @@ -1099,6 +1258,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, (unsigned long long)fe->i_blkno); *bg_blkno = le64_to_cpu(bg->bg_blkno); + *bits_left = le16_to_cpu(bg->bg_free_bits_count); bail: if (group_bh) brelse(group_bh); @@ -1120,6 +1280,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, { int status; u16 victim, i; + u16 bits_left = 0; + u64 hint_blkno = ac->ac_last_group; struct ocfs2_chain_list *cl; struct ocfs2_dinode *fe; @@ -1146,6 +1308,28 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, goto bail; } + if (hint_blkno) { + /* Attempt to short-circuit the usual search mechanism + * by jumping straight to the most recently used + * allocation group. This helps us mantain some + * contiguousness across allocations. */ + status = ocfs2_search_one_group(ac, bits_wanted, min_bits, + bit_off, num_bits, + hint_blkno, &bits_left); + if (!status) { + /* Be careful to update *bg_blkno here as the + * caller is expecting it to be filled in, and + * ocfs2_search_one_group() won't do that for + * us. */ + *bg_blkno = hint_blkno; + goto set_hint; + } + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + } + cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; victim = ocfs2_find_victim_chain(cl); @@ -1153,9 +1337,9 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_allow_chain_relink = 1; status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, - num_bits, bg_blkno); + num_bits, bg_blkno, &bits_left); if (!status) - goto bail; + goto set_hint; if (status < 0 && status != -ENOSPC) { mlog_errno(status); goto bail; @@ -1177,8 +1361,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_chain = i; status = ocfs2_search_chain(ac, bits_wanted, min_bits, - bit_off, num_bits, - bg_blkno); + bit_off, num_bits, bg_blkno, + &bits_left); if (!status) break; if (status < 0 && status != -ENOSPC) { @@ -1186,8 +1370,19 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, goto bail; } } -bail: +set_hint: + if (status != -ENOSPC) { + /* If the next search of this group is not likely to + * yield a suitable extent, then we reset the last + * group hint so as to not waste a disk read */ + if (bits_left < min_bits) + ac->ac_last_group = 0; + else + ac->ac_last_group = *bg_blkno; + } + +bail: mlog_exit(status); return status; } @@ -1341,7 +1536,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, { int status; unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; - u64 bg_blkno; + u64 bg_blkno = 0; u16 bg_bit_off; mlog_entry_void(); @@ -1494,9 +1689,9 @@ static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle, } group = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(group)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group); + if (status) { + mlog_errno(status); goto bail; } BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a76c82a7ceac..c787838d1052 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -49,6 +49,8 @@ struct ocfs2_alloc_context { u16 ac_chain; int ac_allow_chain_relink; group_search_t *ac_group_search; + + u64 ac_last_group; }; void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 382706a67ffd..d17e33e66a1e 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1442,8 +1442,13 @@ static int ocfs2_initialize_super(struct super_block *sb, osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; + /* We don't have a cluster lock on the bitmap here because + * we're only interested in static information and the extra + * complexity at mount time isn't worht it. Don't pass the + * inode in to the read function though as we don't want it to + * be put in the cache. */ status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, - inode); + NULL); iput(inode); if (status < 0) { mlog_errno(status); @@ -1452,7 +1457,6 @@ static int ocfs2_initialize_super(struct super_block *sb, di = (struct ocfs2_dinode *) bitmap_bh->b_data; osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); - osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total); brelse(bitmap_bh); mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig index c9a478099281..e478f1941831 100644 --- a/fs/partitions/Kconfig +++ b/fs/partitions/Kconfig @@ -99,7 +99,7 @@ config IBM_PARTITION config MAC_PARTITION bool "Macintosh partition map support" if PARTITION_ADVANCED - default y if MAC + default y if (MAC || PPC_PMAC) help Say Y here if you would like to use hard disks under Linux which were partitioned on a Macintosh. diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c index abe91ca03edf..0a5927c806ca 100644 --- a/fs/partitions/sun.c +++ b/fs/partitions/sun.c @@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); for (i = 0; i < 8; i++, p++) { unsigned long st_sector; - int num_sectors; + unsigned int num_sectors; st_sector = be32_to_cpu(p->start_cylinder) * spc; num_sectors = be32_to_cpu(p->num_sectors); diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 9f2cfc30f9cf..942156225447 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "Mapped: %8lu kB\n" "Slab: %8lu kB\n" "PageTables: %8lu kB\n" - "NFS Unstable: %8lu kB\n" + "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" "CommitLimit: %8lu kB\n" "Committed_AS: %8lu kB\n" diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index f318b58510fd..1627edd50810 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -48,8 +48,8 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp) return 0; } - reiserfs_write_lock(inode->i_sb); mutex_lock(&inode->i_mutex); + reiserfs_write_lock(inode->i_sb); /* freeing preallocation only involves relogging blocks that * are already in the current transaction. preallocation gets * freed at the end of each transaction, so it is impossible for diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 12dfdcfbee3d..52f1e2136546 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -39,14 +39,10 @@ void reiserfs_delete_inode(struct inode *inode) /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ - mutex_lock(&inode->i_mutex); - reiserfs_delete_xattrs(inode); - if (journal_begin(&th, inode->i_sb, jbegin_count)) { - mutex_unlock(&inode->i_mutex); + if (journal_begin(&th, inode->i_sb, jbegin_count)) goto out; - } reiserfs_update_inode_transaction(inode); err = reiserfs_delete_object(&th, inode); @@ -57,12 +53,8 @@ void reiserfs_delete_inode(struct inode *inode) if (!err) DQUOT_FREE_INODE(inode); - if (journal_end(&th, inode->i_sb, jbegin_count)) { - mutex_unlock(&inode->i_mutex); + if (journal_end(&th, inode->i_sb, jbegin_count)) goto out; - } - - mutex_unlock(&inode->i_mutex); /* check return value from reiserfs_delete_object after * ending the transaction @@ -2348,6 +2340,7 @@ static int reiserfs_write_full_page(struct page *page, unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; int error = 0; unsigned long block; + sector_t last_block; struct buffer_head *head, *bh; int partial = 0; int nr = 0; @@ -2395,10 +2388,19 @@ static int reiserfs_write_full_page(struct page *page, } bh = head; block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); + last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; /* first map all the buffers, logging any direct items we find */ do { - if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) || - (buffer_mapped(bh) + if (block > last_block) { + /* + * This can happen when the block size is less than + * the page size. The corresponding bytes in the page + * were zero filled above + */ + clear_buffer_dirty(bh); + set_buffer_uptodate(bh); + } else if ((checked || buffer_dirty(bh)) && + (!buffer_mapped(bh) || (buffer_mapped(bh) && bh->b_blocknr == 0))) { /* not mapped yet, or it points to a direct item, search diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 745c88100895..a986b5e1e288 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -116,12 +116,12 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp) if (REISERFS_I(inode)->i_flags & i_nopack_mask) { return 0; } - reiserfs_write_lock(inode->i_sb); /* we need to make sure nobody is changing the file size beneath ** us */ mutex_lock(&inode->i_mutex); + reiserfs_write_lock(inode->i_sb); write_from = inode->i_size & (blocksize - 1); /* if we are on a block boundary, we are already unpacked. */ diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 39fedaa88a0c..d935fb9394e3 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf) int res = -ENOTDIR; if (!file->f_op || !file->f_op->readdir) goto out; - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR); // down(&inode->i_zombie); res = -ENOENT; if (!IS_DEADDIR(inode)) { diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 3873c672cb4c..33323473e3c4 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -75,6 +75,12 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) } *err = -ENOSPC; + UDF_I_UNIQUE(inode) = 0; + UDF_I_LENEXTENTS(inode) = 0; + UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; + UDF_I_NEXT_ALLOC_GOAL(inode) = 0; + UDF_I_STRAT4096(inode) = 0; + block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, start, err); if (*err) @@ -84,11 +90,6 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) } mutex_lock(&sbi->s_alloc_mutex); - UDF_I_UNIQUE(inode) = 0; - UDF_I_LENEXTENTS(inode) = 0; - UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; - UDF_I_NEXT_ALLOC_GOAL(inode) = 0; - UDF_I_STRAT4096(inode) = 0; if (UDF_SB_LVIDBH(sb)) { struct logicalVolHeaderDesc *lvhd; diff --git a/fs/udf/super.c b/fs/udf/super.c index 4df822c881b6..fcce1a21a51b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -115,6 +115,13 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; + + ei->i_unique = 0; + ei->i_lenExtents = 0; + ei->i_next_alloc_block = 0; + ei->i_next_alloc_goal = 0; + ei->i_strat4096 = 0; + return &ei->vfs_inode; } @@ -1652,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) iput(inode); goto error_out; } - sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_maxbytes = 1<<30; return 0; error_out: diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index e1b0e8cfecb4..0abd66ce36ea 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c @@ -239,38 +239,52 @@ void udf_truncate_extents(struct inode * inode) { if (offset) { - extoffset -= adsize; - etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); - if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) - { - extoffset -= adsize; - elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); - udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); - } - else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) - { - kernel_lb_addr neloc = { 0, 0 }; - extoffset -= adsize; - nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | - ((elen + offset + inode->i_sb->s_blocksize - 1) & - ~(inode->i_sb->s_blocksize - 1)); - udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); - udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); - } - else - { - if (elen & (inode->i_sb->s_blocksize - 1)) - { - extoffset -= adsize; - elen = EXT_RECORDED_ALLOCATED | - ((elen + inode->i_sb->s_blocksize - 1) & - ~(inode->i_sb->s_blocksize - 1)); - udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); - } + /* + * OK, there is not extent covering inode->i_size and + * no extent above inode->i_size => truncate is + * extending the file by 'offset'. + */ + if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) || + (bh && extoffset == sizeof(struct allocExtDesc))) { + /* File has no extents at all! */ memset(&eloc, 0x00, sizeof(kernel_lb_addr)); elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); } + else { + extoffset -= adsize; + etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); + if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) + { + extoffset -= adsize; + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); + udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); + } + else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) + { + kernel_lb_addr neloc = { 0, 0 }; + extoffset -= adsize; + nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | + ((elen + offset + inode->i_sb->s_blocksize - 1) & + ~(inode->i_sb->s_blocksize - 1)); + udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); + udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); + } + else + { + if (elen & (inode->i_sb->s_blocksize - 1)) + { + extoffset -= adsize; + elen = EXT_RECORDED_ALLOCATED | + ((elen + inode->i_sb->s_blocksize - 1) & + ~(inode->i_sb->s_blocksize - 1)); + udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); + } + memset(&eloc, 0x00, sizeof(kernel_lb_addr)); + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; + udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); + } + } } } UDF_I_LENEXTENTS(inode) = inode->i_size; diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index b01804baa120..b82381475779 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -248,7 +248,7 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, if (likely(cur_index != index)) { page = ufs_get_locked_page(mapping, index); - if (IS_ERR(page)) + if (!page || IS_ERR(page)) /* it was truncated or EIO */ continue; } else page = locked_page; diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index e7c8615beb65..30c6e8a9446c 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) static struct buffer_head * ufs_clear_frags(struct inode *inode, sector_t beg, - unsigned int n) + unsigned int n, sector_t want) { - struct buffer_head *res, *bh; + struct buffer_head *res = NULL, *bh; sector_t end = beg + n; - res = sb_getblk(inode->i_sb, beg); - ufs_clear_frag(inode, res); - for (++beg; beg < end; ++beg) { + for (; beg < end; ++beg) { bh = sb_getblk(inode->i_sb, beg); ufs_clear_frag(inode, bh); - brelse(bh); + if (want != beg) + brelse(bh); + else + res = bh; } + BUG_ON(!res); return res; } @@ -265,7 +267,9 @@ repeat: lastfrag = ufsi->i_lastfrag; } - goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; + tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]); + if (tmp) + goal = tmp + uspi->s_fpb; tmp = ufs_new_fragments (inode, p, fragment - blockoff, goal, required + blockoff, err, locked_page); @@ -277,13 +281,15 @@ repeat: tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err, locked_page); - } + } else /* (lastblock > block) */ { /* * We will allocate new block before last allocated block */ - else /* (lastblock > block) */ { - if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) - goal = tmp + uspi->s_fpb; + if (block) { + tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]); + if (tmp) + goal = tmp + uspi->s_fpb; + } tmp = ufs_new_fragments(inode, p, fragment - blockoff, goal, uspi->s_fpb, err, locked_page); } @@ -296,7 +302,7 @@ repeat: } if (!phys) { - result = ufs_clear_frags(inode, tmp + blockoff, required); + result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); } else { *phys = tmp + blockoff; result = NULL; @@ -383,7 +389,7 @@ repeat: } } - if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) + if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]))) goal = tmp + uspi->s_fpb; else goal = bh->b_blocknr + uspi->s_fpb; @@ -397,7 +403,8 @@ repeat: if (!phys) { - result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); + result = ufs_clear_frags(inode, tmp, uspi->s_fpb, + tmp + blockoff); } else { *phys = tmp + blockoff; *new = 1; diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index abd5f23a426d..d344b411e261 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -129,7 +129,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, struct inode * inode; if (l > sb->s_blocksize) - goto out; + goto out_notlocked; lock_kernel(); inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); @@ -155,6 +155,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, err = ufs_add_nondir(dentry, inode); out: unlock_kernel(); +out_notlocked: return err; out_fail: diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index c9b55872079b..ea11d04c41a0 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode) int err = 0; struct address_space *mapping = inode->i_mapping; struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; - struct ufs_inode_info *ufsi = UFS_I(inode); unsigned lastfrag, i, end; struct page *lastpage; struct buffer_head *bh; lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; - if (!lastfrag) { - ufsi->i_lastfrag = 0; + if (!lastfrag) goto out; - } + lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> @@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode) for (i = 0; i < end; ++i) bh = bh->b_this_page; - if (!buffer_mapped(bh)) { - err = ufs_getfrag_block(inode, lastfrag, bh, 1); - if (unlikely(err)) - goto out_unlock; + err = ufs_getfrag_block(inode, lastfrag, bh, 1); - if (buffer_new(bh)) { - clear_buffer_new(bh); - unmap_underlying_metadata(bh->b_bdev, - bh->b_blocknr); - /* - * we do not zeroize fragment, because of - * if it maped to hole, it already contains zeroes - */ - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - set_page_dirty(lastpage); - } + if (unlikely(err)) + goto out_unlock; + + if (buffer_new(bh)) { + clear_buffer_new(bh); + unmap_underlying_metadata(bh->b_bdev, + bh->b_blocknr); + /* + * we do not zeroize fragment, because of + * if it maped to hole, it already contains zeroes + */ + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + set_page_dirty(lastpage); } + out_unlock: ufs_put_locked_page(lastpage); out: @@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; - if (inode->i_size > old_i_size) { - /* - * if we expand file we should care about - * allocation of block for last byte first of all - */ - err = ufs_alloc_lastblock(inode); + err = ufs_alloc_lastblock(inode); - if (err) { - i_size_write(inode, old_i_size); - goto out; - } - /* - * go away, because of we expand file, and we do not - * need free blocks, and zeroizes page - */ - lock_kernel(); - goto almost_end; + if (err) { + i_size_write(inode, old_i_size); + goto out; } block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); @@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) yield(); } - if (inode->i_size < old_i_size) { - /* - * now we should have enough space - * to allocate block for last byte - */ - err = ufs_alloc_lastblock(inode); - if (err) - /* - * looks like all the same - we have no space, - * but we truncate file already - */ - inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize; - } -almost_end: inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; + ufsi->i_lastfrag = DIRECT_FRAGMENT; unlock_kernel(); mark_inode_dirty(inode); out: diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 337cf2c46d10..22f820a9b15c 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c @@ -251,12 +251,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping, { struct page *page; -try_again: page = find_lock_page(mapping, index); if (!page) { page = read_cache_page(mapping, index, (filler_t*)mapping->a_ops->readpage, NULL); + if (IS_ERR(page)) { printk(KERN_ERR "ufs_change_blocknr: " "read_cache_page error: ino %lu, index: %lu\n", @@ -266,6 +266,14 @@ try_again: lock_page(page); + if (unlikely(page->mapping == NULL)) { + /* Truncate got there first */ + unlock_page(page); + page_cache_release(page); + page = NULL; + goto out; + } + if (!PageUptodate(page) || PageError(page)) { unlock_page(page); page_cache_release(page); @@ -275,15 +283,8 @@ try_again: mapping->host->i_ino, index); page = ERR_PTR(-EIO); - goto out; } } - - if (unlikely(!page->mapping || !page_has_buffers(page))) { - unlock_page(page); - page_cache_release(page); - goto try_again;/*we really need these buffers*/ - } out: return page; } diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index ceda3a2859d2..7858703ed84c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); #define BUF_BUSY XBF_DONT_BLOCK #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) -#define XFS_BUF_ZEROFLAGS(bp) \ - ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) +#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ + ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9bdef9d51900..4754f342a5d3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp) return; } + if (xfs_readonly_buftarg(mp->m_ddev_targp)) { + xfs_fs_cmn_err(CE_NOTE, mp, + "Disabling barriers, underlying device is readonly"); + mp->m_flags &= ~XFS_MOUNT_BARRIER; + return; + } + error = xfs_barrier_test(mp); if (error) { xfs_fs_cmn_err(CE_NOTE, mp, diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index e95e99f7168f..f137856c3261 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -217,17 +217,24 @@ xfs_qm_statvfs( return 0; dp = &dqp->q_core; - limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; + limit = dp->d_blk_softlimit ? + be64_to_cpu(dp->d_blk_softlimit) : + be64_to_cpu(dp->d_blk_hardlimit); if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; - statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? - (statp->f_blocks - dp->d_bcount) : 0; + statp->f_bfree = + (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? + (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; } - limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; + + limit = dp->d_ino_softlimit ? + be64_to_cpu(dp->d_ino_softlimit) : + be64_to_cpu(dp->d_ino_hardlimit); if (limit && statp->f_files > limit) { statp->f_files = limit; - statp->f_ffree = (statp->f_files > dp->d_icount) ? - (statp->f_ffree - dp->d_icount) : 0; + statp->f_ffree = + (statp->f_files > be64_to_cpu(dp->d_icount)) ? + (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; } xfs_qm_dqput(dqp); diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index eef6763f3a67..d2bbcd882a69 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist( &agbp))) return error; if (!pag->pagf_init) { + ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } } else agbp = NULL; - /* If this is a metadata preferred pag and we are user data + /* + * If this is a metadata preferred pag and we are user data * then try somewhere else if we are not being asked to * try harder at this point */ - if (pag->pagf_metadata && args->userdata && flags) { + if (pag->pagf_metadata && args->userdata && + (flags & XFS_ALLOC_FLAG_TRYLOCK)) { + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } - need = XFS_MIN_FREELIST_PAG(pag, mp); - delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; - /* - * If it looks like there isn't a long enough extent, or enough - * total blocks, reject it. - */ - longest = (pag->pagf_longest > delta) ? - (pag->pagf_longest - delta) : - (pag->pagf_flcount > 0 || pag->pagf_longest > 0); - if (args->minlen + args->alignment + args->minalignslop - 1 > longest || - (!(flags & XFS_ALLOC_FLAG_FREEING) && - (int)(pag->pagf_freeblks + pag->pagf_flcount - - need - args->total) < - (int)args->minleft)) { - if (agbp) - xfs_trans_brelse(tp, agbp); - args->agbp = NULL; - return 0; + if (!(flags & XFS_ALLOC_FLAG_FREEING)) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; + /* + * If it looks like there isn't a long enough extent, or enough + * total blocks, reject it. + */ + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || pag->pagf_longest > 0); + if ((args->minlen + args->alignment + args->minalignslop - 1) > + longest || + ((int)(pag->pagf_freeblks + pag->pagf_flcount - + need - args->total) < (int)args->minleft)) { + if (agbp) + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } } + /* * Get the a.g. freespace buffer. * Can fail if we're not blocking on locks, and it's held. @@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist( &agbp))) return error; if (agbp == NULL) { + ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } @@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist( */ agf = XFS_BUF_TO_AGF(agbp); need = XFS_MIN_FREELIST(agf, mp); - delta = need > be32_to_cpu(agf->agf_flcount) ? - (need - be32_to_cpu(agf->agf_flcount)) : 0; /* * If there isn't enough total or single-extent, reject it. */ - longest = be32_to_cpu(agf->agf_longest); - longest = (longest > delta) ? (longest - delta) : - (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); - if (args->minlen + args->alignment + args->minalignslop - 1 > longest || - (!(flags & XFS_ALLOC_FLAG_FREEING) && - (int)(be32_to_cpu(agf->agf_freeblks) + - be32_to_cpu(agf->agf_flcount) - need - args->total) < - (int)args->minleft)) { - xfs_trans_brelse(tp, agbp); - args->agbp = NULL; - return 0; + if (!(flags & XFS_ALLOC_FLAG_FREEING)) { + delta = need > be32_to_cpu(agf->agf_flcount) ? + (need - be32_to_cpu(agf->agf_flcount)) : 0; + longest = be32_to_cpu(agf->agf_longest); + longest = (longest > delta) ? (longest - delta) : + (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); + if ((args->minlen + args->alignment + args->minalignslop - 1) > + longest || + ((int)(be32_to_cpu(agf->agf_freeblks) + + be32_to_cpu(agf->agf_flcount) - need - args->total) < + (int)args->minleft)) { + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } } /* * Make the freelist shorter if it's too long. @@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist( * on a completely full ag. */ if (targs.agbno == NULLAGBLOCK) { - if (!(flags & XFS_ALLOC_FLAG_FREEING)) { - xfs_trans_brelse(tp, agflbp); - args->agbp = NULL; - return 0; - } - break; + if (flags & XFS_ALLOC_FLAG_FREEING) + break; + xfs_trans_brelse(tp, agflbp); + args->agbp = NULL; + return 0; } /* * Put each allocated block on the list. @@ -2442,31 +2452,26 @@ xfs_free_extent( xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len) /* length of extent */ { -#ifdef DEBUG - xfs_agf_t *agf; /* a.g. freespace header */ -#endif - xfs_alloc_arg_t args; /* allocation argument structure */ + xfs_alloc_arg_t args; int error; ASSERT(len != 0); + memset(&args, 0, sizeof(xfs_alloc_arg_t)); args.tp = tp; args.mp = tp->t_mountp; args.agno = XFS_FSB_TO_AGNO(args.mp, bno); ASSERT(args.agno < args.mp->m_sb.sb_agcount); args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); - args.alignment = 1; - args.minlen = args.minleft = args.minalignslop = 0; down_read(&args.mp->m_peraglock); args.pag = &args.mp->m_perag[args.agno]; if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) goto error0; #ifdef DEBUG ASSERT(args.agbp != NULL); - agf = XFS_BUF_TO_AGF(args.agbp); - ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); + ASSERT((args.agbno + len) <= + be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); #endif - error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, - len, 0); + error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); error0: up_read(&args.mp->m_peraglock); return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 86c1bf0bba9e..1f8ecff8553a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -334,10 +334,9 @@ xfs_itobp( #if !defined(__KERNEL__) ni = 0; #elif defined(DEBUG) - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : - (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); + ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; #else /* usual case */ - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; + ni = 1; #endif for (i = 0; i < ni; i++) { @@ -348,11 +347,15 @@ xfs_itobp( (i << mp->m_sb.sb_inodelog)); di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); - if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, - XFS_RANDOM_ITOBP_INOTOBP))) { + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, + XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { + if (imap_flags & XFS_IMAP_BULKSTAT) { + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EINVAL); + } #ifdef DEBUG - if (!(imap_flags & XFS_IMAP_BULKSTAT)) - cmn_err(CE_ALERT, + cmn_err(CE_ALERT, "Device %s - bad inode magic/vsn " "daddr %lld #%d (magic=%x)", XFS_BUFTARG_NAME(mp->m_ddev_targp), diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e730328636c3..21ac1a67e3e0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log, ops = iclog->ic_header.h_num_logops; INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); - bp = iclog->ic_bp; + bp = iclog->ic_bp; ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); @@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log, } XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ + XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); /* * Do an ordered write for the log block. - * - * It may not be needed to flush the first split block in the log wrap - * case, but do it anyways to be safe -AK + * Its unnecessary to flush the first split block in the log wrap case. */ - if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) + if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER)) XFS_BUF_ORDERED(bp); ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); @@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log, return error; } if (split) { - bp = iclog->ic_log->l_xbuf; + bp = iclog->ic_log->l_xbuf; ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); @@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log, XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ (__psint_t)count), split); XFS_BUF_SET_FSPRIVATE(bp, iclog); + XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 6c96391f3f1a..b427d220a169 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -515,7 +515,7 @@ xfs_mount( if (error) goto error2; - if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) + if (mp->m_flags & XFS_MOUNT_BARRIER) xfs_mountfs_check_barriers(mp); error = XFS_IOINIT(vfsp, args, flags); diff --git a/include/asm-arm/arch-iop3xx/iop331-irqs.h b/include/asm-arm/arch-iop3xx/iop331-irqs.h index 8ff73d487222..7135ad7e335e 100644 --- a/include/asm-arm/arch-iop3xx/iop331-irqs.h +++ b/include/asm-arm/arch-iop3xx/iop331-irqs.h @@ -91,7 +91,6 @@ #define NR_IRQS NR_IOP331_IRQS -#if defined(CONFIG_ARCH_IQ80331) /* * Interrupts available on the IQ80331 board */ @@ -111,7 +110,6 @@ #define IRQ_IQ80331_INTC IRQ_IOP331_XINT2 #define IRQ_IQ80331_INTD IRQ_IOP331_XINT3 -#elif defined(CONFIG_MACH_IQ80332) /* * Interrupts available on the IQ80332 board */ @@ -131,6 +129,4 @@ #define IRQ_IQ80332_INTC IRQ_IOP331_XINT2 #define IRQ_IQ80332_INTD IRQ_IOP331_XINT3 -#endif - #endif // _IOP331_IRQ_H_ diff --git a/include/asm-arm/arch-omap/clock.h b/include/asm-arm/arch-omap/clock.h index 3c4eb9fbe48a..f83003f5287b 100644 --- a/include/asm-arm/arch-omap/clock.h +++ b/include/asm-arm/arch-omap/clock.h @@ -48,8 +48,6 @@ struct clk_functions { }; extern unsigned int mpurate; -extern struct list_head clocks; -extern spinlock_t clockfw_lock; extern int clk_init(struct clk_functions * custom_clocks); extern int clk_register(struct clk *clk); diff --git a/include/asm-arm/arch-s3c2410/dma.h b/include/asm-arm/arch-s3c2410/dma.h index 72964f9b8414..7463fd5252ce 100644 --- a/include/asm-arm/arch-s3c2410/dma.h +++ b/include/asm-arm/arch-s3c2410/dma.h @@ -104,6 +104,7 @@ enum s3c2410_chan_op_e { S3C2410_DMAOP_RESUME, S3C2410_DMAOP_FLUSH, S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */ + S3C2410_DMAOP_STARTED, /* indicate channel started */ }; typedef enum s3c2410_chan_op_e s3c2410_chan_op_t; diff --git a/include/asm-arm/arch-s3c2410/regs-rtc.h b/include/asm-arm/arch-s3c2410/regs-rtc.h index 228983f89bc8..0fbec07bb6b8 100644 --- a/include/asm-arm/arch-s3c2410/regs-rtc.h +++ b/include/asm-arm/arch-s3c2410/regs-rtc.h @@ -18,7 +18,7 @@ #ifndef __ASM_ARCH_REGS_RTC_H #define __ASM_ARCH_REGS_RTC_H __FILE__ -#define S3C2410_RTCREG(x) ((x) + S3C24XX_VA_RTC) +#define S3C2410_RTCREG(x) (x) #define S3C2410_RTCCON S3C2410_RTCREG(0x40) #define S3C2410_RTCCON_RTCEN (1<<0) diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h index edb7b6502fcf..91a31adfa8a8 100644 --- a/include/asm-arm/procinfo.h +++ b/include/asm-arm/procinfo.h @@ -55,5 +55,6 @@ extern unsigned int elf_hwcap; #define HWCAP_VFP 64 #define HWCAP_EDSP 128 #define HWCAP_JAVA 256 +#define HWCAP_IWMMXT 512 #endif diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 0730a20f6db8..8774d06689da 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -45,6 +45,7 @@ typedef u8 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 0 +#define flush_insn_slot(p) do { } while (0) void arch_remove_kprobe(struct kprobe *p); void kretprobe_trampoline(void); diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index e33e9f9e4c66..22cb07cc8f32 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -14,7 +14,7 @@ extern struct pglist_data *node_data[]; #ifdef CONFIG_X86_NUMAQ #include -#else /* summit or generic arch */ +#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ #include #endif diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 2418a787c405..938904910115 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -125,5 +125,6 @@ static inline void jprobe_return(void) } extern void invalidate_stacked_regs(void); extern void flush_register_stack(void); +extern void flush_insn_slot(struct kprobe *p); #endif /* _ASM_KPROBES_H */ diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 894bc4d89dc0..6a33a07b3f1d 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h @@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *); extern struct page *vmem_map; extern int find_largest_hole (u64 start, u64 end, void *arg); extern int create_mem_map_page_table (u64 start, u64 end, void *arg); + extern int vmemmap_find_next_valid_pfn(int, int); +#else +static inline int vmemmap_find_next_valid_pfn(int node, int i) +{ + return i + 1; +} #endif - #endif /* meminit_h */ diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 37e52a2836b0..20a8d618c845 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -1433,7 +1433,12 @@ typedef union pal_version_u { } pal_version_u_t; -/* Return PAL version information */ +/* + * Return PAL version information. While the documentation states that + * PAL_VERSION can be called in either physical or virtual mode, some + * implementations only allow physical calls. We don't call it very often, + * so the overhead isn't worth eliminating. + */ static inline s64 ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) { diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 8406f1ef4caf..b72af597878d 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -1124,8 +1124,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) -#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) -#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) +#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f)) +#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010)) static inline void diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index fc9677bc87ee..384fbf7f2a0f 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -24,7 +24,7 @@ * 0xa000000000000000+2*PERCPU_PAGE_SIZE * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) */ -#define KERNEL_START (GATE_ADDR+0x100000000) +#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) #ifndef __ASSEMBLY__ diff --git a/include/asm-powerpc/backlight.h b/include/asm-powerpc/backlight.h index 58d4b6f8d827..8cf5c37c3817 100644 --- a/include/asm-powerpc/backlight.h +++ b/include/asm-powerpc/backlight.h @@ -30,8 +30,12 @@ static inline void pmac_backlight_key_down(void) pmac_backlight_key(1); } +extern void pmac_backlight_set_legacy_brightness_pmu(int brightness); extern int pmac_backlight_set_legacy_brightness(int brightness); extern int pmac_backlight_get_legacy_brightness(void); +extern void pmac_backlight_enable(void); +extern void pmac_backlight_disable(void); + #endif /* __KERNEL__ */ #endif diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h index 8f7fd5cfec34..11cbdf81fd2e 100644 --- a/include/asm-powerpc/kexec.h +++ b/include/asm-powerpc/kexec.h @@ -32,6 +32,7 @@ #endif #ifndef __ASSEMBLY__ +#include #ifdef CONFIG_KEXEC @@ -109,7 +110,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs, #define MAX_NOTE_BYTES 1024 -#ifdef __powerpc64__ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ extern int crashing_cpu; @@ -119,7 +119,6 @@ static inline int kexec_sr_activated(int cpu) { return cpu_isset(cpu,cpus_in_sr); } -#endif /* __powerpc64 __ */ struct kimage; struct pt_regs; diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index 2d0af52c823d..34e1f89a5fa0 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -51,6 +51,7 @@ typedef unsigned int kprobe_opcode_t; #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 1 +#define flush_insn_slot(p) do { } while (0) void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h index 9f0917c68659..ae63db7b3e7d 100644 --- a/include/asm-powerpc/pgalloc.h +++ b/include/asm-powerpc/pgalloc.h @@ -117,7 +117,7 @@ static inline void pte_free(struct page *ptepage) pte_free_kernel(page_address(ptepage)); } -#define PGF_CACHENUM_MASK 0xf +#define PGF_CACHENUM_MASK 0x3 typedef struct pgtable_free { unsigned long val; diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h index a33c6acffa61..82a27e9a041f 100644 --- a/include/asm-powerpc/rtas.h +++ b/include/asm-powerpc/rtas.h @@ -170,6 +170,7 @@ extern int rtas_get_sensor(int sensor, int index, int *state); extern int rtas_get_power_level(int powerdomain, int *level); extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); extern int rtas_set_indicator(int indicator, int index, int new_value); +extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); extern void rtas_initialize(void); diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 7307aa775671..4c9f5229e833 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -53,6 +53,15 @@ #define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ +/* + * This is a barrier which prevents following instructions from being + * started until the value of the argument x is known. For example, if + * x is a variable loaded from memory, this prevents following + * instructions from being executed until the load has been performed. + */ +#define data_barrier(x) \ + asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); + struct task_struct; struct pt_regs; diff --git a/include/asm-powerpc/tsi108.h b/include/asm-powerpc/tsi108.h index c4c278d72f71..2c702d35a7cf 100644 --- a/include/asm-powerpc/tsi108.h +++ b/include/asm-powerpc/tsi108.h @@ -1,16 +1,18 @@ /* - * include/asm-ppc/tsi108.h - * * common routine and memory layout for Tundra TSI108(Grendel) host bridge * memory controller. * * Author: Jacob Pan (jacob.pan@freescale.com) * Alex Bounine (alexandreb@tundra.com) - * 2004 (c) Freescale Semiconductor Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. + * + * Copyright 2004-2006 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ + #ifndef __PPC_KERNEL_TSI108_H #define __PPC_KERNEL_TSI108_H diff --git a/include/asm-powerpc/tsi108_irq.h b/include/asm-powerpc/tsi108_irq.h new file mode 100644 index 000000000000..3e4d04effa57 --- /dev/null +++ b/include/asm-powerpc/tsi108_irq.h @@ -0,0 +1,124 @@ +/* + * (C) Copyright 2005 Tundra Semiconductor Corp. + * Alex Bounine, audit_context; + return !p || *(int *)p; +} static inline void audit_getname(const char *name) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) __audit_getname(name); } static inline void audit_inode(const char *name, const struct inode *inode) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) __audit_inode(name, inode); } static inline void audit_inode_child(const char *dname, - const struct inode *inode, - unsigned long pino) { - if (unlikely(current->audit_context)) - __audit_inode_child(dname, inode, pino); + const struct inode *inode, + const struct inode *parent) { + if (unlikely(!audit_dummy_context())) + __audit_inode_child(dname, inode, parent); +} +static inline void audit_inode_update(const struct inode *inode) { + if (unlikely(!audit_dummy_context())) + __audit_inode_update(inode); } /* Private API (for audit.c only) */ @@ -365,57 +375,61 @@ extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_ipc_obj(ipcp); return 0; } static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_ipc_set_perm(qbytes, uid, gid, mode); return 0; } static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_mq_open(oflag, mode, u_attr); return 0; } static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); return 0; } static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); return 0; } static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_mq_notify(mqdes, u_notification); return 0; } static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { - if (unlikely(current->audit_context)) + if (unlikely(!audit_dummy_context())) return __audit_mq_getsetattr(mqdes, mqstat); return 0; } +extern int audit_n_rules; #else #define audit_alloc(t) ({ 0; }) #define audit_free(t) do { ; } while (0) #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) #define audit_syscall_exit(f,r) do { ; } while (0) +#define audit_dummy_context() 1 #define audit_getname(n) do { ; } while (0) #define audit_putname(n) do { ; } while (0) #define __audit_inode(n,i) do { ; } while (0) #define __audit_inode_child(d,i,p) do { ; } while (0) +#define __audit_inode_update(i) do { ; } while (0) #define audit_inode(n,i) do { ; } while (0) #define audit_inode_child(d,i,p) do { ; } while (0) +#define audit_inode_update(i) do { ; } while (0) #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) #define audit_get_loginuid(c) ({ -1; }) #define audit_ipc_obj(i) ({ 0; }) @@ -430,6 +444,7 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) #define audit_mq_notify(d,n) ({ 0; }) #define audit_mq_getsetattr(d,s) ({ 0; }) +#define audit_n_rules 0 #endif #ifdef CONFIG_AUDIT diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index dbb7769009be..1c86d65bc4b9 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -57,7 +57,8 @@ struct proc_event { PROC_EVENT_EXIT = 0x80000000 } what; __u32 cpu; - struct timespec timestamp; + __u64 __attribute__((aligned(8))) timestamp_ns; + /* Number of nano seconds since system boot */ union { /* must be last field of proc_event struct */ struct { __u32 err; diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index 269d000bb2a3..bea0255196c4 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -216,6 +216,7 @@ COMPATIBLE_IOCTL(VT_RESIZE) COMPATIBLE_IOCTL(VT_RESIZEX) COMPATIBLE_IOCTL(VT_LOCKSWITCH) COMPATIBLE_IOCTL(VT_UNLOCKSWITCH) +COMPATIBLE_IOCTL(VT_GETHIFONTMASK) /* Little p (/dev/rtc, /dev/envctrl, etc.) */ COMPATIBLE_IOCTL(RTC_AIE_ON) COMPATIBLE_IOCTL(RTC_AIE_OFF) diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 6a7047851e48..88dafa246d87 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -1,6 +1,8 @@ #ifndef __LINUX_DEBUG_LOCKING_H #define __LINUX_DEBUG_LOCKING_H +struct task_struct; + extern int debug_locks; extern int debug_locks_silent; diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 7e8b6011b8f3..11487b6e7127 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -55,7 +55,7 @@ static inline void delayacct_tsk_init(struct task_struct *tsk) { /* reinitialize in case parent's non-null pointer was dup'ed*/ tsk->delays = NULL; - if (unlikely(delayacct_on)) + if (delayacct_on) __delayacct_tsk_init(tsk); } @@ -80,9 +80,7 @@ static inline void delayacct_blkio_end(void) static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { - if (likely(!delayacct_on)) - return -EINVAL; - if (!tsk->delays) + if (!delayacct_on || !tsk->delays) return 0; return __delayacct_add_tsk(d, tsk); } diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 5607e6457a65..9f9cce7bd86d 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -492,6 +492,15 @@ static inline struct ext3_inode_info *EXT3_I(struct inode *inode) { return container_of(inode, struct ext3_inode_info, vfs_inode); } + +static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino) +{ + return ino == EXT3_ROOT_INO || + ino == EXT3_JOURNAL_INO || + ino == EXT3_RESIZE_INO || + (ino >= EXT3_FIRST_INO(sb) && + ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)); +} #else /* Assume that user mode programs are passing in an ext3fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test diff --git a/include/linux/fb.h b/include/linux/fb.h index 405f44e44e5d..2f335e966011 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -1,7 +1,6 @@ #ifndef _LINUX_FB_H #define _LINUX_FB_H -#include #include /* Definitions of frame buffers */ @@ -381,6 +380,7 @@ struct fb_cursor { #include #include #include +#include #include struct vm_area_struct; @@ -524,7 +524,7 @@ struct fb_event { extern int fb_register_client(struct notifier_block *nb); extern int fb_unregister_client(struct notifier_block *nb); - +extern int fb_notifier_call_chain(unsigned long val, void *v); /* * Pixmap structure definition * diff --git a/include/linux/fs.h b/include/linux/fs.h index 25610205c90d..555bc195c420 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -570,13 +570,14 @@ struct inode { * 3: quota file * * The locking order between these classes is - * parent -> child -> normal -> quota + * parent -> child -> normal -> xattr -> quota */ enum inode_i_mutex_lock_class { I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, + I_MUTEX_XATTR, I_MUTEX_QUOTA }; diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 783c476b8674..74ed35a00a94 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -69,34 +69,21 @@ enum fs_ioport { fsiop_porte, }; -struct fs_mii_bus_info { - int method; /* mii method */ - int id; /* the id of the mii_bus */ - int disable_aneg; /* if the controller needs to negothiate speed & duplex */ - int lpa; /* the default board-specific vallues will be applied otherwise */ - - union { - struct { - int duplex; - int speed; - } fixed; - - struct { - /* nothing */ - } fec; - - struct { - /* nothing */ - } scc; - - struct { - int mdio_port; /* port & bit for MDIO */ - int mdio_bit; - int mdc_port; /* port & bit for MDC */ - int mdc_bit; - int delay; /* delay in us */ - } bitbang; - } i; +struct fs_mii_bit { + u32 offset; + u8 bit; + u8 polarity; +}; +struct fs_mii_bb_platform_info { + struct fs_mii_bit mdio_dir; + struct fs_mii_bit mdio_dat; + struct fs_mii_bit mdc_dat; + int mdio_port; /* port & bit for MDIO */ + int mdio_bit; + int mdc_port; /* port & bit for MDC */ + int mdc_bit; + int delay; /* delay in us */ + int irq[32]; /* irqs per phy's */ }; struct fs_platform_info { @@ -119,6 +106,7 @@ struct fs_platform_info { u32 device_flags; int phy_addr; /* the phy address (-1 no phy) */ + const char* bus_id; int phy_irq; /* the phy irq (if it exists) */ const struct fs_mii_bus_info *bus_info; @@ -130,6 +118,10 @@ struct fs_platform_info { int napi_weight; /* NAPI weight */ int use_rmii; /* use RMII mode */ + int has_phy; /* if the network is phy container as well...*/ +}; +struct fs_mii_fec_platform_info { + u32 irq[32]; + u32 mii_speed; }; - #endif diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index cc5dec70c32c..d4f219ffaa5d 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -67,7 +67,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (source) { inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); } - audit_inode_child(new_name, source, new_dir->i_ino); + audit_inode_child(new_name, source, new_dir); } /* @@ -98,7 +98,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) inode_dir_notify(inode, DN_CREATE); inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, dentry->d_inode); - audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); + audit_inode_child(dentry->d_name.name, dentry->d_inode, inode); } /* @@ -109,7 +109,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) inode_dir_notify(inode, DN_CREATE); inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, dentry->d_name.name, dentry->d_inode); - audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); + audit_inode_child(dentry->d_name.name, dentry->d_inode, inode); } /* diff --git a/include/linux/futex.h b/include/linux/futex.h index 34c3a215f2cd..d097b5b72bc6 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -96,7 +96,8 @@ struct robust_list_head { long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, u32 __user *uaddr2, u32 val2, u32 val3); -extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); +extern int +handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); diff --git a/include/linux/ide.h b/include/linux/ide.h index dc7abef10965..99620451d958 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -571,6 +571,7 @@ typedef struct ide_drive_s { u8 waiting_for_dma; /* dma currently in progress */ u8 unmask; /* okay to unmask other irqs */ u8 bswap; /* byte swap data */ + u8 noflush; /* don't attempt flushes */ u8 dsc_overlap; /* DSC overlap */ u8 nice1; /* give potential excess bandwidth */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 383627ad328f..ab2740832742 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, { struct net_device_stats *stats; + if (skb_bond_should_drop(skb)) { + dev_kfree_skb_any(skb); + return NET_RX_DROP; + } + skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; if (skb->dev == NULL) { dev_kfree_skb_any(skb); diff --git a/include/linux/input.h b/include/linux/input.h index 56f1e0e1e598..b3253ab72ff7 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -893,7 +893,6 @@ struct input_dev { int (*open)(struct input_dev *dev); void (*close)(struct input_dev *dev); - int (*accept)(struct input_dev *dev, struct file *file); int (*flush)(struct input_dev *dev, struct file *file); int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); int (*upload_effect)(struct input_dev *dev, struct ff_effect *effect); @@ -961,6 +960,26 @@ struct input_dev { struct input_handle; +/** + * struct input_handler - implements one of interfaces for input devices + * @private: driver-specific data + * @event: event handler + * @connect: called when attaching a handler to an input device + * @disconnect: disconnects a handler from input device + * @start: starts handler for given handle. This function is called by + * input core right after connect() method and also when a process + * that "grabbed" a device releases it + * @fops: file operations this driver implements + * @minor: beginning of range of 32 minors for devices this driver + * can provide + * @name: name of the handler, to be shown in /proc/bus/input/handlers + * @id_table: pointer to a table of input_device_ids this driver can + * handle + * @blacklist: prointer to a table of input_device_ids this driver should + * ignore even if they match @id_table + * @h_list: list of input handles associated with the handler + * @node: for placing the driver onto input_handler_list + */ struct input_handler { void *private; @@ -968,6 +987,7 @@ struct input_handler { void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id); void (*disconnect)(struct input_handle *handle); + void (*start)(struct input_handle *handle); const struct file_operations *fops; int minor; @@ -1030,10 +1050,10 @@ void input_release_device(struct input_handle *); int input_open_device(struct input_handle *); void input_close_device(struct input_handle *); -int input_accept_process(struct input_handle *handle, struct file *file); int input_flush_device(struct input_handle* handle, struct file* file); void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); +void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); static inline void input_report_key(struct input_dev *dev, unsigned int code, int value) { diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 88d5961f7a3f..8e2042b9d471 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -59,27 +59,6 @@ static inline int task_nice_ioprio(struct task_struct *task) /* * For inheritance, return the highest of the two given priorities */ -static inline int ioprio_best(unsigned short aprio, unsigned short bprio) -{ - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); - - if (!ioprio_valid(aprio)) - return bprio; - if (!ioprio_valid(bprio)) - return aprio; - - if (aclass == IOPRIO_CLASS_NONE) - aclass = IOPRIO_CLASS_BE; - if (bclass == IOPRIO_CLASS_NONE) - bclass = IOPRIO_CLASS_BE; - - if (aclass == bclass) - return min(aprio, bprio); - if (aclass > bclass) - return bprio; - else - return aprio; -} +extern int ioprio_best(unsigned short aprio, unsigned short bprio); #endif diff --git a/include/linux/irq.h b/include/linux/irq.h index b48eae32dc61..fbf6d901e9c2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -47,8 +47,8 @@ #define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL 0x00400000 /* IRQ level triggered */ #define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */ +#define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */ #ifdef CONFIG_IRQ_PER_CPU -# define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */ # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) #else # define CHECK_IRQ_PER_CPU(var) 0 @@ -58,6 +58,7 @@ #define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */ #define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */ #define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */ +#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */ struct proc_dir_entry; @@ -124,6 +125,7 @@ struct irq_chip { * @action: the irq action chain * @status: status information * @depth: disable-depth, for nested irq_disable() calls + * @wake_depth: enable depth, for multiple set_irq_wake() callers * @irq_count: stats field to detect stalled irqs * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP @@ -147,6 +149,7 @@ struct irq_desc { unsigned int status; /* IRQ status */ unsigned int depth; /* nested irq disables */ + unsigned int wake_depth; /* nested wake enables */ unsigned int irq_count; /* For detecting broken IRQs */ unsigned int irqs_unhandled; spinlock_t lock; diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 20eb34403d0c..a04c154c5207 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -72,6 +72,9 @@ extern int journal_enable_debug; #endif extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); +extern void * jbd_slab_alloc(size_t size, gfp_t flags); +extern void jbd_slab_free(void *ptr, size_t size); + #define jbd_kmalloc(size, flags) \ __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) #define jbd_rep_kmalloc(size, flags) \ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 043376920f51..329ebcffa106 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -47,8 +47,8 @@ * - (NOM / DEN) fits in (32 - LSH) bits. * - (NOM % DEN) fits in (32 - LSH) bits. */ -#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \ - + (((NOM % DEN) << LSH) + DEN / 2) / DEN) +#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ + + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) /* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ #define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 181c69cad4e3..851aa1bcfc1a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -210,6 +210,7 @@ extern enum system_states { extern void dump_stack(void); #ifdef DEBUG +/* If you are writing a driver, please use dev_dbg instead */ #define pr_debug(fmt,arg...) \ printk(KERN_DEBUG fmt,##arg) #else diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 0503b2ed8bae..2d229327959e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -46,8 +46,6 @@ enum kobject_action { KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ - KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */ - KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */ }; struct kobject { diff --git a/include/linux/libata.h b/include/linux/libata.h index 6cc497a2b6da..66c3100c2b94 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -265,12 +265,14 @@ enum { /* ata_eh_info->flags */ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ - ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ + ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ ATA_EHI_QUIET = (1 << 3), /* be quiet */ ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ + ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, + /* max repeat if error condition is still set after ->error_handler */ ATA_EH_MAX_REPEAT = 5, diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index aa4fe905bb4d..0d92c468d55a 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -123,7 +123,6 @@ struct nlm_block { unsigned int b_id; /* block id */ unsigned char b_queued; /* re-queued */ unsigned char b_granted; /* VFS granted lock */ - unsigned char b_done; /* callback complete */ struct nlm_file * b_file; /* file in question */ }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 990957e0929f..f0b135cd86da 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -336,6 +336,7 @@ static inline void init_page_count(struct page *page) } void put_page(struct page *page); +void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 75f02d8c6ed3..50a4719512ed 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -320,6 +320,9 @@ struct net_device #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) + /* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) + #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) @@ -1012,6 +1015,30 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) unlikely(skb->ip_summed != CHECKSUM_HW)); } +/* On bonding slaves other than the currently active slave, suppress + * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast. + */ +static inline int skb_bond_should_drop(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct net_device *master = dev->master; + + if (master && + (dev->priv_flags & IFF_SLAVE_INACTIVE)) { + if (master->priv_flags & IFF_MASTER_ALB) { + if (skb->pkt_type != PACKET_BROADCAST && + skb->pkt_type != PACKET_MULTICAST) + return 0; + } + if (master->priv_flags & IFF_MASTER_8023AD && + skb->protocol == __constant_htons(ETH_P_SLOW)) + return 0; + + return 1; + } + return 0; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_DEV_H */ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 31f02ba036ce..427c67ff89e9 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -6,7 +6,6 @@ #include #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER) -#include #include #endif @@ -49,15 +48,25 @@ enum nf_br_hook_priorities { /* Only used in br_forward.c */ static inline -void nf_bridge_maybe_copy_header(struct sk_buff *skb) +int nf_bridge_maybe_copy_header(struct sk_buff *skb) { + int err; + if (skb->nf_bridge) { if (skb->protocol == __constant_htons(ETH_P_8021Q)) { + err = skb_cow(skb, 18); + if (err) + return err; memcpy(skb->data - 18, skb->nf_bridge->data, 18); skb_push(skb, 4); - } else + } else { + err = skb_cow(skb, 16); + if (err) + return err; memcpy(skb->data - 16, skb->nf_bridge->data, 16); + } } + return 0; } /* This is called by the IP fragmenting code and it ensures there is diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 55ea853d57bc..247434553ae8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -476,10 +476,9 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page) } /* - * Allocate and free nfs_write_data structures + * Allocate nfs_write_data structures */ extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); -extern void nfs_writedata_free(struct nfs_write_data *p); /* * linux/fs/nfs/read.c @@ -491,10 +490,9 @@ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); extern void nfs_readdata_release(void *data); /* - * Allocate and free nfs_read_data structures + * Allocate nfs_read_data structures */ extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); -extern void nfs_readdata_free(struct nfs_read_data *p); /* * linux/fs/nfs3proc.c diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 2d3fb6416d91..db9cbf68e12b 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -659,7 +659,7 @@ struct nfs4_rename_res { struct nfs4_setclientid { const nfs4_verifier * sc_verifier; /* request */ unsigned int sc_name_len; - char sc_name[32]; /* request */ + char sc_name[48]; /* request */ u32 sc_prog; /* request */ unsigned int sc_netid_len; char sc_netid[4]; /* request */ diff --git a/include/linux/node.h b/include/linux/node.h index 81dcec84cd8f..bc001bc225c3 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -30,12 +30,20 @@ extern struct node node_devices[]; extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); +#ifdef CONFIG_NUMA extern int register_one_node(int nid); extern void unregister_one_node(int nid); -#ifdef CONFIG_NUMA extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); #else +static inline int register_one_node(int nid) +{ + return 0; +} +static inline int unregister_one_node(int nid) +{ + return 0; +} static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index df7b62676d87..47faf43fde1f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2159,6 +2159,7 @@ #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 #define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 +#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 #define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 #define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 diff --git a/include/linux/phy.h b/include/linux/phy.h index 331521a10a2d..9447a57ee8a9 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -378,6 +378,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct mii_ioctl_data *mii_data, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); +struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id); extern struct bus_type mdio_bus_type; #endif /* __PHY_H */ diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 2ed807ddc08c..783177387ac6 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -231,7 +231,6 @@ extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; extern unsigned int pmu_power_flags; /* Backlight */ -extern int disable_kernel_backlight; -extern void pmu_backlight_init(struct device_node*); +extern void pmu_backlight_init(void); #endif /* __KERNEL__ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 6afa72e080cb..6674fc1e51bf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1557,6 +1557,14 @@ static inline void freeze(struct task_struct *p) p->flags |= PF_FREEZE; } +/* + * Sometimes we may need to cancel the previous 'freeze' request + */ +static inline void do_not_freeze(struct task_struct *p) +{ + p->flags &= ~PF_FREEZE; +} + /* * Wake up a frozen process */ diff --git a/include/linux/security.h b/include/linux/security.h index f75303831d09..6bc2aad494ff 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1109,6 +1109,16 @@ struct swap_info_struct; * @name contains the name of the security module being unstacked. * @ops contains a pointer to the struct security_operations of the module to unstack. * + * @secid_to_secctx: + * Convert secid to security context. + * @secid contains the security ID. + * @secdata contains the pointer that stores the converted security context. + * + * @release_secctx: + * Release the security context. + * @secdata contains the security context. + * @seclen contains the length of the security context. + * * This is the main security structure. */ struct security_operations { @@ -1289,6 +1299,8 @@ struct security_operations { int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size); int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); + int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); + void (*release_secctx)(char *secdata, u32 seclen); #ifdef CONFIG_SECURITY_NETWORK int (*unix_stream_connect) (struct socket * sock, @@ -1317,7 +1329,7 @@ struct security_operations { int (*socket_shutdown) (struct socket * sock, int how); int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); - int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen); + int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); void (*sk_free_security) (struct sock *sk); unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir); @@ -2059,6 +2071,16 @@ static inline int security_netlink_recv(struct sk_buff * skb, int cap) return security_ops->netlink_recv(skb, cap); } +static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +{ + return security_ops->secid_to_secctx(secid, secdata, seclen); +} + +static inline void security_release_secctx(char *secdata, u32 seclen) +{ + return security_ops->release_secctx(secdata, seclen); +} + /* prototypes */ extern int security_init (void); extern int register_security (struct security_operations *ops); @@ -2725,6 +2747,14 @@ static inline void securityfs_remove(struct dentry *dentry) { } +static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +{ + return -EOPNOTSUPP; +} + +static inline void security_release_secctx(char *secdata, u32 seclen) +{ +} #endif /* CONFIG_SECURITY */ #ifdef CONFIG_SECURITY_NETWORK @@ -2840,10 +2870,9 @@ static inline int security_socket_getpeersec_stream(struct socket *sock, char __ return security_ops->socket_getpeersec_stream(sock, optval, optlen, len); } -static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, - u32 *seclen) +static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { - return security_ops->socket_getpeersec_dgram(skb, secdata, seclen); + return security_ops->socket_getpeersec_dgram(sock, skb, secid); } static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) @@ -2968,8 +2997,7 @@ static inline int security_socket_getpeersec_stream(struct socket *sock, char __ return -ENOPROTOOPT; } -static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, - u32 *seclen) +static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return -ENOPROTOOPT; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4307e764ef0a..755e9cddac47 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -604,12 +604,17 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } -extern struct lock_class_key skb_queue_lock_key; - +/* + * This function creates a split out lock class for each invocation; + * this is needed for now since a whole lot of users of the skb-queue + * infrastructure in drivers have different locking usage (in hardirq) + * than the networking core (in softirq only). In the long run either the + * network layer or drivers should need annotation to consolidate the + * main types of usage into 3 classes. + */ static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); - lockdep_set_class(&list->lock, &skb_queue_lock_key); list->prev = list->next = (struct sk_buff *)list; list->qlen = 0; } @@ -1034,6 +1039,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len) return (len < skb->len) ? __pskb_trim(skb, len) : 0; } +/** + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer + * @skb: buffer to alter + * @len: new length + * + * This is identical to pskb_trim except that the caller knows that + * the skb is not cloned so we should never get an error due to out- + * of-memory. + */ +static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) +{ + int err = pskb_trim(skb, len); + BUG_ON(err); +} + /** * skb_orphan - orphan a buffer * @skb: buffer to orphan @@ -1076,7 +1096,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. + * %NULL is returned if there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) @@ -1096,7 +1116,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. Although this function + * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *dev_alloc_skb(unsigned int length) @@ -1104,6 +1124,28 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length) return __dev_alloc_skb(length, GFP_ATOMIC); } +extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, + unsigned int length, gfp_t gfp_mask); + +/** + * netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on + * @length: length to allocate + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. Although this function + * allocates memory it can be called from an interrupt. + */ +static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); +} + /** * skb_cow - copy header of skb when it is required * @skb: buffer to cow diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 2c2189cb30aa..a481472c9484 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -42,9 +42,9 @@ RPC_I(struct inode *inode) extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); -extern int rpc_rmdir(char *); +extern int rpc_rmdir(struct dentry *); extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags); -extern int rpc_unlink(char *); +extern int rpc_unlink(struct dentry *); extern struct vfsmount *rpc_get_mount(void); extern void rpc_put_mount(void); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index e8bbe8118de8..3a0cca255b76 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport; #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) -#define RPC_DEF_MIN_RESVPORT (650U) +#define RPC_DEF_MIN_RESVPORT (665U) #define RPC_DEF_MAX_RESVPORT (1023U) /* @@ -229,7 +229,7 @@ int xprt_reserve_xprt(struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_task *task); int xprt_prepare_transmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); -void xprt_abort_transmit(struct rpc_task *task); +void xprt_end_transmit(struct rpc_task *task); int xprt_adjust_timeout(struct rpc_rqst *req); void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); diff --git a/include/linux/tty.h b/include/linux/tty.h index e421d5e34818..04827ca65781 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -59,6 +59,7 @@ struct tty_bufhead { struct tty_buffer *head; /* Queue head */ struct tty_buffer *tail; /* Active buffer */ struct tty_buffer *free; /* Free queue head */ + int memory_used; /* Buffer space used excluding free queue */ }; /* * The pty uses char_buf and flag_buf as a contiguous buffer diff --git a/include/linux/usb.h b/include/linux/usb.h index c944e8f06a4a..d2bd0c8e0154 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -103,8 +103,7 @@ enum usb_interface_condition { * @condition: binding state of the interface: not bound, binding * (in probe()), bound to a driver, or unbinding (in disconnect()) * @dev: driver model's view of this device - * @usb_dev: if an interface is bound to the USB major, this will point - * to the sysfs representation for that device. + * @class_dev: driver model's class view of this device. * * USB device drivers attach to interfaces on a physical device. Each * interface encapsulates a single high level function, such as feeding @@ -144,7 +143,7 @@ struct usb_interface { * bound to */ enum usb_interface_condition condition; /* state of binding */ struct device dev; /* interface specific device info */ - struct device *usb_dev; /* pointer to the usb class's device, if any */ + struct class_device *class_dev; }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) #define interface_to_usbdev(intf) \ @@ -361,7 +360,7 @@ struct usb_device { char *serial; /* iSerialNumber string, if present */ struct list_head filelist; - struct device *usbfs_dev; + struct class_device *class_dev; struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */ /* diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index f38f43f20fae..e7fc5fed5b98 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -44,7 +44,9 @@ US_FLAG(NO_WP_DETECT, 0x00000200) \ /* Don't check for write-protect */ \ US_FLAG(MAX_SECTORS_64, 0x00000400) \ - /* Sets max_sectors to 64 */ + /* Sets max_sectors to 64 */ \ + US_FLAG(IGNORE_DEVICE, 0x00000800) \ + /* Don't claim device */ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 41bc7e9603cd..518c7a32175e 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h @@ -12,10 +12,11 @@ #ifndef __LINUX_VIDEODEV_H #define __LINUX_VIDEODEV_H -#define HAVE_V4L1 1 - #include +#ifdef CONFIG_VIDEO_V4L1_COMPAT +#define HAVE_V4L1 1 + struct video_capability { char name[32]; @@ -336,6 +337,8 @@ struct video_code #define VID_HARDWARE_SN9C102 38 #define VID_HARDWARE_ARV 39 +#endif /* CONFIG_VIDEO_V4L1_COMPAT */ + #endif /* __LINUX_VIDEODEV_H */ /* diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index a62673dad76e..b7146956a929 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -716,7 +716,7 @@ struct v4l2_ext_control __s64 value64; void *reserved; }; -}; +} __attribute__ ((packed)); struct v4l2_ext_controls { diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 1ab806c47514..2d9b1b60798a 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -41,23 +41,23 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); static inline void __count_vm_event(enum vm_event_item item) { - __get_cpu_var(vm_event_states.event[item])++; + __get_cpu_var(vm_event_states).event[item]++; } static inline void count_vm_event(enum vm_event_item item) { - get_cpu_var(vm_event_states.event[item])++; + get_cpu_var(vm_event_states).event[item]++; put_cpu(); } static inline void __count_vm_events(enum vm_event_item item, long delta) { - __get_cpu_var(vm_event_states.event[item]) += delta; + __get_cpu_var(vm_event_states).event[item] += delta; } static inline void count_vm_events(enum vm_event_item item, long delta) { - get_cpu_var(vm_event_states.event[item]) += delta; + get_cpu_var(vm_event_states).event[item] += delta; put_cpu(); } diff --git a/include/linux/vt.h b/include/linux/vt.h index 8ab334a48222..ba806e8711be 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -60,5 +60,6 @@ struct vt_consize { #define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ #define VT_LOCKSWITCH 0x560B /* disallow vt switching */ #define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ +#define VT_GETHIFONTMASK 0x560D /* return hi font mask */ #endif /* _LINUX_VT_H */ diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 62dae1a8c441..600d61d7d2ab 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -16,7 +16,7 @@ #include #include #include /* need __user */ -#ifdef CONFIG_VIDEO_V4L1 +#ifdef CONFIG_VIDEO_V4L1_COMPAT #include #else #include @@ -341,11 +341,14 @@ extern int video_usercopy(struct inode *inode, struct file *file, extern struct video_device* video_devdata(struct file*); #define to_video_device(cd) container_of(cd, struct video_device, class_dev) -static inline void +static inline int video_device_create_file(struct video_device *vfd, struct class_device_attribute *attr) { - class_device_create_file(&vfd->class_dev, attr); + int ret = class_device_create_file(&vfd->class_dev, attr); + if (ret < 0) + printk(KERN_WARNING "%s error: %d\n", __FUNCTION__, ret); + return ret; } static inline void video_device_remove_file(struct video_device *vfd, diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 2fec827c8801..c0398f5a8cb9 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -54,15 +54,13 @@ struct unix_skb_parms { struct ucred creds; /* Skb credentials */ struct scm_fp_list *fp; /* Passed files */ #ifdef CONFIG_SECURITY_NETWORK - char *secdata; /* Security context */ - u32 seclen; /* Security length */ + u32 secid; /* Security ID */ #endif }; #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) -#define UNIXSECDATA(skb) (&UNIXCB((skb)).secdata) -#define UNIXSECLEN(skb) (&UNIXCB((skb)).seclen) +#define UNIXSID(skb) (&UNIXCB((skb)).secid) #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index ab29dafb1a6a..96b0e66406ec 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -139,16 +139,22 @@ extern rwlock_t rt6_lock; /* * Store a destination cache entry in a socket */ -static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, - struct in6_addr *daddr) +static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, + struct in6_addr *daddr) { struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt = (struct rt6_info *) dst; - write_lock(&sk->sk_dst_lock); sk_setup_caps(sk, dst); np->daddr_cache = daddr; np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; +} + +static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, + struct in6_addr *daddr) +{ + write_lock(&sk->sk_dst_lock); + __ip6_dst_store(sk, dst, daddr); write_unlock(&sk->sk_dst_lock); } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a8fdf7970b37..ece7e8a84ffd 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -468,6 +468,9 @@ extern void ip6_flush_pending_frames(struct sock *sk); extern int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl); +extern int ip6_sk_dst_lookup(struct sock *sk, + struct dst_entry **dst, + struct flowi *fl); /* * skb processing functions diff --git a/include/net/netdma.h b/include/net/netdma.h index ceae5ee85c04..7f53cd1d8b1e 100644 --- a/include/net/netdma.h +++ b/include/net/netdma.h @@ -29,7 +29,7 @@ static inline struct dma_chan *get_softnet_dma(void) { struct dma_chan *chan; rcu_read_lock(); - chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma)); + chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); if (chan) dma_chan_get(chan); rcu_read_unlock(); diff --git a/include/net/netevent.h b/include/net/netevent.h new file mode 100644 index 000000000000..e5d216241423 --- /dev/null +++ b/include/net/netevent.h @@ -0,0 +1,33 @@ +#ifndef _NET_EVENT_H +#define _NET_EVENT_H + +/* + * Generic netevent notifiers + * + * Authors: + * Tom Tucker + * Steve Wise + * + * Changes: + */ +#ifdef __KERNEL__ + +#include + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */ + NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */ + NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */ +}; + +extern int register_netevent_notifier(struct notifier_block *nb); +extern int unregister_netevent_notifier(struct notifier_block *nb); +extern int call_netevent_notifiers(unsigned long val, void *v); + +#endif +#endif diff --git a/include/net/red.h b/include/net/red.h index 5ccdbb3d4722..a4eb37946f2c 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -212,7 +212,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) * Seems, it is the best solution to * problem of too coarse exponent tabulation. */ - us_idle = (p->qavg * us_idle) >> p->Scell_log; + us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; if (us_idle < (p->qavg >> 1)) return p->qavg - us_idle; diff --git a/include/net/scm.h b/include/net/scm.h index 02daa097cdcd..5637d5e22d5f 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -3,6 +3,7 @@ #include #include +#include /* Well, we should have at least one descriptor open * to accept passed FDs 8) @@ -20,8 +21,7 @@ struct scm_cookie struct ucred creds; /* Skb credentials */ struct scm_fp_list *fp; /* Passed files */ #ifdef CONFIG_SECURITY_NETWORK - char *secdata; /* Security context */ - u32 seclen; /* Security length */ + u32 secid; /* Passed security ID */ #endif unsigned long seq; /* Connection seqno */ }; @@ -32,6 +32,16 @@ extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie extern void __scm_destroy(struct scm_cookie *scm); extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl); +#ifdef CONFIG_SECURITY_NETWORK +static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) +{ + security_socket_getpeersec_dgram(sock, NULL, &scm->secid); +} +#else +static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) +{ } +#endif /* CONFIG_SECURITY_NETWORK */ + static __inline__ void scm_destroy(struct scm_cookie *scm) { if (scm && scm->fp) @@ -47,6 +57,7 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, scm->creds.pid = p->tgid; scm->fp = NULL; scm->seq = 0; + unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; return __scm_send(sock, msg, scm); @@ -55,8 +66,18 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, #ifdef CONFIG_SECURITY_NETWORK static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { - if (test_bit(SOCK_PASSSEC, &sock->flags) && scm->secdata != NULL) - put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, scm->seclen, scm->secdata); + char *secdata; + u32 seclen; + int err; + + if (test_bit(SOCK_PASSSEC, &sock->flags)) { + err = security_secid_to_secctx(scm->secid, &secdata, &seclen); + + if (!err) { + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata); + security_release_secctx(secdata, seclen); + } + } } #else static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index a9663b49ea54..92eae0e0f3f1 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -404,19 +404,6 @@ static inline int sctp_list_single_entry(struct list_head *head) return ((head->next != head) && (head->next == head->prev)); } -/* Calculate the size (in bytes) occupied by the data of an iovec. */ -static inline size_t get_user_iov_size(struct iovec *iov, int iovlen) -{ - size_t retval = 0; - - for (; iovlen > 0; --iovlen) { - retval += iov->iov_len; - iov++; - } - - return retval; -} - /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ static inline __s32 sctp_jitter(__u32 rto) { diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 1eac3d0eb7a9..de313de4fefe 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -221,8 +221,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - const struct sctp_chunk *, - const struct msghdr *); + const struct msghdr *, size_t msg_len); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, diff --git a/include/net/tcp.h b/include/net/tcp.h index 0720bddff1e9..7a093d0aa0fe 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -914,6 +914,9 @@ static inline void tcp_set_state(struct sock *sk, int state) static inline void tcp_done(struct sock *sk) { + if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); + tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk); diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index ba2760802ded..41904f611d12 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -60,6 +60,7 @@ struct iscsi_nopin; #define TMABORT_SUCCESS 0x1 #define TMABORT_FAILED 0x2 #define TMABORT_TIMEDOUT 0x3 +#define TMABORT_NOT_FOUND 0x4 /* Connection suspend "bit" */ #define ISCSI_SUSPEND_BIT 1 @@ -83,6 +84,12 @@ struct iscsi_mgmt_task { struct list_head running; }; +enum { + ISCSI_TASK_COMPLETED, + ISCSI_TASK_PENDING, + ISCSI_TASK_RUNNING, +}; + struct iscsi_cmd_task { /* * Becuae LLDs allocate their hdr differently, this is a pointer to @@ -101,6 +108,8 @@ struct iscsi_cmd_task { struct iscsi_conn *conn; /* used connection */ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ + /* state set/tested under session->lock */ + int state; struct list_head running; /* running cmd list */ void *dd_data; /* driver/transport data */ }; @@ -126,6 +135,14 @@ struct iscsi_conn { int id; /* CID */ struct list_head item; /* maintains list of conns */ int c_stage; /* connection state */ + /* + * Preallocated buffer for pdus that have data but do not + * originate from scsi-ml. We never have two pdus using the + * buffer at the same time. It is only allocated to + * the default max recv size because the pdus we support + * should always fit in this buffer + */ + char *data; struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ @@ -134,7 +151,7 @@ struct iscsi_conn { struct kfifo *immqueue; /* immediate xmit queue */ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ struct list_head mgmt_run_list; /* list of control tasks */ - struct kfifo *xmitqueue; /* data-path cmd queue */ + struct list_head xmitqueue; /* data-path cmd queue */ struct list_head run_list; /* list of cmds in progress */ struct work_struct xmitwork; /* per-conn. xmit workqueue */ /* diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 5a3df1d7085f..39e833260bd0 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -57,8 +57,6 @@ struct sockaddr; * @stop_conn: suspend/recover/terminate connection * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. * @session_recovery_timedout: notify LLD a block during recovery timed out - * @suspend_conn_recv: susepend the recv side of the connection - * @termincate_conn: destroy socket connection. Called with mutex lock. * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs. * Called from queuecommand with session lock held. * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs. @@ -112,8 +110,6 @@ struct iscsi_transport { char *data, uint32_t data_size); void (*get_stats) (struct iscsi_cls_conn *conn, struct iscsi_stats *stats); - void (*suspend_conn_recv) (struct iscsi_conn *conn); - void (*terminate_conn) (struct iscsi_conn *conn); void (*init_cmd_task) (struct iscsi_cmd_task *ctask); void (*init_mgmt_task) (struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, diff --git a/ipc/msg.c b/ipc/msg.c index cd92d342953e..2b4fccf8ea55 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -1,6 +1,6 @@ /* * linux/ipc/msg.c - * Copyright (C) 1992 Krishna Balasubramanian + * Copyright (C) 1992 Krishna Balasubramanian * * Removed all the remaining kerneld mess * Catch the -EFAULT stuff properly @@ -41,22 +41,24 @@ int msg_ctlmax = MSGMAX; int msg_ctlmnb = MSGMNB; int msg_ctlmni = MSGMNI; -/* one msg_receiver structure for each sleeping receiver */ +/* + * one msg_receiver structure for each sleeping receiver: + */ struct msg_receiver { - struct list_head r_list; - struct task_struct* r_tsk; + struct list_head r_list; + struct task_struct *r_tsk; - int r_mode; - long r_msgtype; - long r_maxsize; + int r_mode; + long r_msgtype; + long r_maxsize; - struct msg_msg* volatile r_msg; + volatile struct msg_msg *r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { - struct list_head list; - struct task_struct* tsk; + struct list_head list; + struct task_struct *tsk; }; #define SEARCH_ANY 1 @@ -64,45 +66,42 @@ struct msg_sender { #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 -static atomic_t msg_bytes = ATOMIC_INIT(0); -static atomic_t msg_hdrs = ATOMIC_INIT(0); +static atomic_t msg_bytes = ATOMIC_INIT(0); +static atomic_t msg_hdrs = ATOMIC_INIT(0); static struct ipc_ids msg_ids; -#define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id)) -#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) -#define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id)) -#define msg_checkid(msq, msgid) \ - ipc_checkid(&msg_ids,&msq->q_perm,msgid) -#define msg_buildid(id, seq) \ - ipc_buildid(&msg_ids, id, seq) +#define msg_lock(id) ((struct msg_queue *)ipc_lock(&msg_ids, id)) +#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) +#define msg_rmid(id) ((struct msg_queue *)ipc_rmid(&msg_ids, id)) +#define msg_checkid(msq, msgid) ipc_checkid(&msg_ids, &msq->q_perm, msgid) +#define msg_buildid(id, seq) ipc_buildid(&msg_ids, id, seq) -static void freeque (struct msg_queue *msq, int id); -static int newque (key_t key, int msgflg); +static void freeque(struct msg_queue *msq, int id); +static int newque(key_t key, int msgflg); #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it); #endif -void __init msg_init (void) +void __init msg_init(void) { - ipc_init_ids(&msg_ids,msg_ctlmni); + ipc_init_ids(&msg_ids, msg_ctlmni); ipc_init_proc_interface("sysvipc/msg", " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", &msg_ids, sysvipc_msg_proc_show); } -static int newque (key_t key, int msgflg) +static int newque(key_t key, int msgflg) { - int id; - int retval; struct msg_queue *msq; + int id, retval; - msq = ipc_rcu_alloc(sizeof(*msq)); - if (!msq) + msq = ipc_rcu_alloc(sizeof(*msq)); + if (!msq) return -ENOMEM; - msq->q_perm.mode = (msgflg & S_IRWXUGO); + msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; @@ -113,13 +112,13 @@ static int newque (key_t key, int msgflg) } id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni); - if(id == -1) { + if (id == -1) { security_msg_queue_free(msq); ipc_rcu_putref(msq); return -ENOSPC; } - msq->q_id = msg_buildid(id,msq->q_perm.seq); + msq->q_id = msg_buildid(id, msq->q_perm.seq); msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -133,44 +132,44 @@ static int newque (key_t key, int msgflg) return msq->q_id; } -static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) +static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) { - mss->tsk=current; - current->state=TASK_INTERRUPTIBLE; - list_add_tail(&mss->list,&msq->q_senders); + mss->tsk = current; + current->state = TASK_INTERRUPTIBLE; + list_add_tail(&mss->list, &msq->q_senders); } -static inline void ss_del(struct msg_sender* mss) +static inline void ss_del(struct msg_sender *mss) { - if(mss->list.next != NULL) + if (mss->list.next != NULL) list_del(&mss->list); } -static void ss_wakeup(struct list_head* h, int kill) +static void ss_wakeup(struct list_head *h, int kill) { struct list_head *tmp; tmp = h->next; while (tmp != h) { - struct msg_sender* mss; - - mss = list_entry(tmp,struct msg_sender,list); + struct msg_sender *mss; + + mss = list_entry(tmp, struct msg_sender, list); tmp = tmp->next; - if(kill) - mss->list.next=NULL; + if (kill) + mss->list.next = NULL; wake_up_process(mss->tsk); } } -static void expunge_all(struct msg_queue* msq, int res) +static void expunge_all(struct msg_queue *msq, int res) { struct list_head *tmp; tmp = msq->q_receivers.next; while (tmp != &msq->q_receivers) { - struct msg_receiver* msr; - - msr = list_entry(tmp,struct msg_receiver,r_list); + struct msg_receiver *msr; + + msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; msr->r_msg = NULL; wake_up_process(msr->r_tsk); @@ -178,26 +177,28 @@ static void expunge_all(struct msg_queue* msq, int res) msr->r_msg = ERR_PTR(res); } } -/* - * freeque() wakes up waiters on the sender and receiver waiting queue, - * removes the message queue from message queue ID + +/* + * freeque() wakes up waiters on the sender and receiver waiting queue, + * removes the message queue from message queue ID * array, and cleans up all the messages associated with this queue. * * msg_ids.mutex and the spinlock for this message queue is hold * before freeque() is called. msg_ids.mutex remains locked on exit. */ -static void freeque (struct msg_queue *msq, int id) +static void freeque(struct msg_queue *msq, int id) { struct list_head *tmp; - expunge_all(msq,-EIDRM); - ss_wakeup(&msq->q_senders,1); + expunge_all(msq, -EIDRM); + ss_wakeup(&msq->q_senders, 1); msq = msg_rmid(id); msg_unlock(msq); - + tmp = msq->q_messages.next; - while(tmp != &msq->q_messages) { - struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list); + while (tmp != &msq->q_messages) { + struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); + tmp = tmp->next; atomic_dec(&msg_hdrs); free_msg(msg); @@ -207,10 +208,10 @@ static void freeque (struct msg_queue *msq, int id) ipc_rcu_putref(msq); } -asmlinkage long sys_msgget (key_t key, int msgflg) +asmlinkage long sys_msgget(key_t key, int msgflg) { - int id, ret = -EPERM; struct msg_queue *msq; + int id, ret = -EPERM; mutex_lock(&msg_ids.mutex); if (key == IPC_PRIVATE) @@ -224,31 +225,34 @@ asmlinkage long sys_msgget (key_t key, int msgflg) ret = -EEXIST; } else { msq = msg_lock(id); - BUG_ON(msq==NULL); + BUG_ON(msq == NULL); if (ipcperms(&msq->q_perm, msgflg)) ret = -EACCES; else { int qid = msg_buildid(id, msq->q_perm.seq); - ret = security_msg_queue_associate(msq, msgflg); + + ret = security_msg_queue_associate(msq, msgflg); if (!ret) ret = qid; } msg_unlock(msq); } mutex_unlock(&msg_ids.mutex); + return ret; } -static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) +static inline unsigned long +copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { switch(version) { case IPC_64: - return copy_to_user (buf, in, sizeof(*in)); + return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: - { + { struct msqid_ds out; - memset(&out,0,sizeof(out)); + memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); @@ -256,18 +260,18 @@ static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; - if(in->msg_cbytes > USHRT_MAX) + if (in->msg_cbytes > USHRT_MAX) out.msg_cbytes = USHRT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; - if(in->msg_qnum > USHRT_MAX) + if (in->msg_qnum > USHRT_MAX) out.msg_qnum = USHRT_MAX; else out.msg_qnum = in->msg_qnum; - if(in->msg_qbytes > USHRT_MAX) + if (in->msg_qbytes > USHRT_MAX) out.msg_qbytes = USHRT_MAX; else out.msg_qbytes = in->msg_qbytes; @@ -276,8 +280,8 @@ static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ out.msg_lspid = in->msg_lspid; out.msg_lrpid = in->msg_lrpid; - return copy_to_user (buf, &out, sizeof(out)); - } + return copy_to_user(buf, &out, sizeof(out)); + } default: return -EINVAL; } @@ -290,14 +294,15 @@ struct msq_setbuf { mode_t mode; }; -static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) { switch(version) { case IPC_64: - { + { struct msqid64_ds tbuf; - if (copy_from_user (&tbuf, buf, sizeof (tbuf))) + if (copy_from_user(&tbuf, buf, sizeof(tbuf))) return -EFAULT; out->qbytes = tbuf.msg_qbytes; @@ -306,60 +311,61 @@ static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __ out->mode = tbuf.msg_perm.mode; return 0; - } + } case IPC_OLD: - { + { struct msqid_ds tbuf_old; - if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->uid = tbuf_old.msg_perm.uid; out->gid = tbuf_old.msg_perm.gid; out->mode = tbuf_old.msg_perm.mode; - if(tbuf_old.msg_qbytes == 0) + if (tbuf_old.msg_qbytes == 0) out->qbytes = tbuf_old.msg_lqbytes; else out->qbytes = tbuf_old.msg_qbytes; return 0; - } + } default: return -EINVAL; } } -asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) +asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) { - int err, version; - struct msg_queue *msq; - struct msq_setbuf setbuf; struct kern_ipc_perm *ipcp; - + struct msq_setbuf setbuf; + struct msg_queue *msq; + int err, version; + if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); switch (cmd) { - case IPC_INFO: - case MSG_INFO: - { + case IPC_INFO: + case MSG_INFO: + { struct msginfo msginfo; int max_id; + if (!buf) return -EFAULT; - /* We must not return kernel stack data. + /* + * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ - err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; - memset(&msginfo,0,sizeof(msginfo)); + memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = msg_ctlmni; msginfo.msgmax = msg_ctlmax; msginfo.msgmnb = msg_ctlmnb; @@ -377,36 +383,37 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) } max_id = msg_ids.max_id; mutex_unlock(&msg_ids.mutex); - if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) + if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; - return (max_id < 0) ? 0: max_id; + return (max_id < 0) ? 0 : max_id; } case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; int success_return; + if (!buf) return -EFAULT; - if(cmd == MSG_STAT && msqid >= msg_ids.entries->size) + if (cmd == MSG_STAT && msqid >= msg_ids.entries->size) return -EINVAL; - memset(&tbuf,0,sizeof(tbuf)); + memset(&tbuf, 0, sizeof(tbuf)); msq = msg_lock(msqid); if (msq == NULL) return -EINVAL; - if(cmd == MSG_STAT) { + if (cmd == MSG_STAT) { success_return = msg_buildid(msqid, msq->q_perm.seq); } else { err = -EIDRM; - if (msg_checkid(msq,msqid)) + if (msg_checkid(msq, msqid)) goto out_unlock; success_return = 0; } err = -EACCES; - if (ipcperms (&msq->q_perm, S_IRUGO)) + if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); @@ -430,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) case IPC_SET: if (!buf) return -EFAULT; - if (copy_msqid_from_user (&setbuf, buf, version)) + if (copy_msqid_from_user(&setbuf, buf, version)) return -EFAULT; break; case IPC_RMID: @@ -441,12 +448,12 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) mutex_lock(&msg_ids.mutex); msq = msg_lock(msqid); - err=-EINVAL; + err = -EINVAL; if (msq == NULL) goto out_up; err = -EIDRM; - if (msg_checkid(msq,msqid)) + if (msg_checkid(msq, msqid)) goto out_unlock_up; ipcp = &msq->q_perm; @@ -454,15 +461,16 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) if (err) goto out_unlock_up; if (cmd==IPC_SET) { - err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode); + err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, + setbuf.mode); if (err) goto out_unlock_up; } err = -EPERM; - if (current->euid != ipcp->cuid && + if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) - /* We _could_ check for CAP_CHOWN above, but we don't */ + /* We _could_ check for CAP_CHOWN above, but we don't */ goto out_unlock_up; err = security_msg_queue_msgctl(msq, cmd); @@ -480,22 +488,22 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) ipcp->uid = setbuf.uid; ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | - (S_IRWXUGO & setbuf.mode); + ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | + (S_IRWXUGO & setbuf.mode); msq->q_ctime = get_seconds(); /* sleeping receivers might be excluded by * stricter permissions. */ - expunge_all(msq,-EAGAIN); + expunge_all(msq, -EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ - ss_wakeup(&msq->q_senders,0); + ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } case IPC_RMID: - freeque (msq, msqid); + freeque(msq, msqid); break; } err = 0; @@ -510,41 +518,44 @@ out_unlock: return err; } -static int testmsg(struct msg_msg* msg,long type,int mode) +static int testmsg(struct msg_msg *msg, long type, int mode) { switch(mode) { case SEARCH_ANY: return 1; case SEARCH_LESSEQUAL: - if(msg->m_type <=type) + if (msg->m_type <=type) return 1; break; case SEARCH_EQUAL: - if(msg->m_type == type) + if (msg->m_type == type) return 1; break; case SEARCH_NOTEQUAL: - if(msg->m_type != type) + if (msg->m_type != type) return 1; break; } return 0; } -static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg) +static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) { - struct list_head* tmp; + struct list_head *tmp; tmp = msq->q_receivers.next; while (tmp != &msq->q_receivers) { - struct msg_receiver* msr; - msr = list_entry(tmp,struct msg_receiver,r_list); + struct msg_receiver *msr; + + msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; - if(testmsg(msg,msr->r_msgtype,msr->r_mode) && - !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { + if (testmsg(msg, msr->r_msgtype, msr->r_mode) && + !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + msr->r_msgtype, msr->r_mode)) { + list_del(&msr->r_list); - if(msr->r_maxsize < msg->m_ts) { + if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); @@ -556,6 +567,7 @@ static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg) wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; + return 1; } } @@ -563,40 +575,41 @@ static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg) return 0; } -asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) +asmlinkage long +sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; long mtype; int err; - + if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (get_user(mtype, &msgp->mtype)) - return -EFAULT; + return -EFAULT; if (mtype < 1) return -EINVAL; msg = load_msg(msgp->mtext, msgsz); - if(IS_ERR(msg)) + if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock(msqid); - err=-EINVAL; - if(msq==NULL) + err = -EINVAL; + if (msq == NULL) goto out_free; err= -EIDRM; - if (msg_checkid(msq,msqid)) + if (msg_checkid(msq, msqid)) goto out_unlock_free; for (;;) { struct msg_sender s; - err=-EACCES; + err = -EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; @@ -604,14 +617,14 @@ asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, if (err) goto out_unlock_free; - if(msgsz + msq->q_cbytes <= msq->q_qbytes && + if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ - if(msgflg&IPC_NOWAIT) { - err=-EAGAIN; + if (msgflg & IPC_NOWAIT) { + err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); @@ -626,9 +639,9 @@ asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, goto out_unlock_free; } ss_del(&s); - + if (signal_pending(current)) { - err=-ERESTARTNOHAND; + err = -ERESTARTNOHAND; goto out_unlock_free; } } @@ -636,47 +649,47 @@ asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, msq->q_lspid = current->tgid; msq->q_stime = get_seconds(); - if(!pipelined_send(msq,msg)) { + if (!pipelined_send(msq, msg)) { /* noone is waiting for this message, enqueue it */ - list_add_tail(&msg->m_list,&msq->q_messages); + list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; - atomic_add(msgsz,&msg_bytes); + atomic_add(msgsz, &msg_bytes); atomic_inc(&msg_hdrs); } - + err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: - if(msg!=NULL) + if (msg != NULL) free_msg(msg); return err; } -static inline int convert_mode(long* msgtyp, int msgflg) +static inline int convert_mode(long *msgtyp, int msgflg) { - /* + /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. - * msgtyp < 0 => get message with least type must be < abs(msgtype). + * msgtyp < 0 => get message with least type must be < abs(msgtype). */ - if(*msgtyp==0) + if (*msgtyp == 0) return SEARCH_ANY; - if(*msgtyp<0) { - *msgtyp=-(*msgtyp); + if (*msgtyp < 0) { + *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } - if(msgflg & MSG_EXCEPT) + if (msgflg & MSG_EXCEPT) return SEARCH_NOTEQUAL; return SEARCH_EQUAL; } -asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg) +asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, + long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; @@ -684,44 +697,51 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, if (msqid < 0 || (long) msgsz < 0) return -EINVAL; - mode = convert_mode(&msgtyp,msgflg); + mode = convert_mode(&msgtyp, msgflg); msq = msg_lock(msqid); - if(msq==NULL) + if (msq == NULL) return -EINVAL; msg = ERR_PTR(-EIDRM); - if (msg_checkid(msq,msqid)) + if (msg_checkid(msq, msqid)) goto out_unlock; for (;;) { struct msg_receiver msr_d; - struct list_head* tmp; + struct list_head *tmp; msg = ERR_PTR(-EACCES); - if (ipcperms (&msq->q_perm, S_IRUGO)) + if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *walk_msg; - walk_msg = list_entry(tmp,struct msg_msg,m_list); - if(testmsg(walk_msg,msgtyp,mode) && - !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { + + walk_msg = list_entry(tmp, struct msg_msg, m_list); + if (testmsg(walk_msg, msgtyp, mode) && + !security_msg_queue_msgrcv(msq, walk_msg, current, + msgtyp, mode)) { + msg = walk_msg; - if(mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { - msg=walk_msg; - msgtyp=walk_msg->m_type-1; + if (mode == SEARCH_LESSEQUAL && + walk_msg->m_type != 1) { + msg = walk_msg; + msgtyp = walk_msg->m_type - 1; } else { - msg=walk_msg; + msg = walk_msg; break; } } tmp = tmp->next; } - if(!IS_ERR(msg)) { - /* Found a suitable message. Unlink it from the queue. */ + if (!IS_ERR(msg)) { + /* + * Found a suitable message. + * Unlink it from the queue. + */ if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; @@ -731,9 +751,9 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, msq->q_rtime = get_seconds(); msq->q_lrpid = current->tgid; msq->q_cbytes -= msg->m_ts; - atomic_sub(msg->m_ts,&msg_bytes); + atomic_sub(msg->m_ts, &msg_bytes); atomic_dec(&msg_hdrs); - ss_wakeup(&msq->q_senders,0); + ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } @@ -742,13 +762,13 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, msg = ERR_PTR(-ENOMSG); goto out_unlock; } - list_add_tail(&msr_d.r_list,&msq->q_receivers); + list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; - if(msgflg & MSG_NOERROR) + if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; - else + else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; @@ -773,17 +793,17 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ - msg = (struct msg_msg*) msr_d.r_msg; + msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); - msg = (struct msg_msg*) msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ - if(msg != ERR_PTR(-EAGAIN)) { + if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } @@ -798,7 +818,7 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; - if(msg != ERR_PTR(-EAGAIN)) + if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); @@ -810,14 +830,15 @@ out_unlock: } } if (IS_ERR(msg)) - return PTR_ERR(msg); + return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; if (put_user (msg->m_type, &msgp->mtype) || store_msg(msgp->mtext, msg, msgsz)) { - msgsz = -EFAULT; + msgsz = -EFAULT; } free_msg(msg); + return msgsz; } @@ -827,20 +848,20 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it) struct msg_queue *msq = it; return seq_printf(s, - "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", - msq->q_perm.key, - msq->q_id, - msq->q_perm.mode, - msq->q_cbytes, - msq->q_qnum, - msq->q_lspid, - msq->q_lrpid, - msq->q_perm.uid, - msq->q_perm.gid, - msq->q_perm.cuid, - msq->q_perm.cgid, - msq->q_stime, - msq->q_rtime, - msq->q_ctime); + "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", + msq->q_perm.key, + msq->q_id, + msq->q_perm.mode, + msq->q_cbytes, + msq->q_qnum, + msq->q_lspid, + msq->q_lrpid, + msq->q_perm.uid, + msq->q_perm.gid, + msq->q_perm.cuid, + msq->q_perm.cgid, + msq->q_stime, + msq->q_rtime, + msq->q_ctime); } #endif diff --git a/kernel/audit.c b/kernel/audit.c index d417ca1db79b..0a36091ed712 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -690,9 +690,7 @@ static const struct inotify_operations audit_inotify_ops = { /* Initialize audit support at boot time. */ static int __init audit_init(void) { -#ifdef CONFIG_AUDITSYSCALL int i; -#endif printk(KERN_INFO "audit: initializing netlink socket (%s)\n", audit_default ? "enabled" : "disabled"); @@ -717,10 +715,10 @@ static int __init audit_init(void) audit_ih = inotify_init(&audit_inotify_ops); if (IS_ERR(audit_ih)) audit_panic("cannot initialize inotify handle"); +#endif for (i = 0; i < AUDIT_INODE_BUCKETS; i++) INIT_LIST_HEAD(&audit_inode_hash[i]); -#endif return 0; } diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 5b4e16276ca0..6a9a5c5a4e7d 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -442,6 +442,7 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) case AUDIT_EQUAL: break; default: + err = -EINVAL; goto exit_free; } } @@ -579,6 +580,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_EQUAL: break; default: + err = -EINVAL; goto exit_free; } } @@ -1134,6 +1136,14 @@ static inline int audit_add_rule(struct audit_entry *entry, struct audit_watch *watch = entry->rule.watch; struct nameidata *ndp, *ndw; int h, err, putnd_needed = 0; +#ifdef CONFIG_AUDITSYSCALL + int dont_count = 0; + + /* If either of these, don't count towards total */ + if (entry->rule.listnr == AUDIT_FILTER_USER || + entry->rule.listnr == AUDIT_FILTER_TYPE) + dont_count = 1; +#endif if (inode_f) { h = audit_hash_ino(inode_f->val); @@ -1174,6 +1184,10 @@ static inline int audit_add_rule(struct audit_entry *entry, } else { list_add_tail_rcu(&entry->list, list); } +#ifdef CONFIG_AUDITSYSCALL + if (!dont_count) + audit_n_rules++; +#endif mutex_unlock(&audit_filter_mutex); if (putnd_needed) @@ -1198,6 +1212,14 @@ static inline int audit_del_rule(struct audit_entry *entry, struct audit_watch *watch, *tmp_watch = entry->rule.watch; LIST_HEAD(inotify_list); int h, ret = 0; +#ifdef CONFIG_AUDITSYSCALL + int dont_count = 0; + + /* If either of these, don't count towards total */ + if (entry->rule.listnr == AUDIT_FILTER_USER || + entry->rule.listnr == AUDIT_FILTER_TYPE) + dont_count = 1; +#endif if (inode_f) { h = audit_hash_ino(inode_f->val); @@ -1235,6 +1257,10 @@ static inline int audit_del_rule(struct audit_entry *entry, list_del_rcu(&e->list); call_rcu(&e->rcu, audit_free_rule_rcu); +#ifdef CONFIG_AUDITSYSCALL + if (!dont_count) + audit_n_rules--; +#endif mutex_unlock(&audit_filter_mutex); if (!list_empty(&inotify_list)) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index ae40ac8c39e7..efc1b74bebf3 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -85,6 +85,9 @@ extern int audit_enabled; /* Indicates that audit should log the full pathname. */ #define AUDIT_NAME_FULL -1 +/* number of audit rules */ +int audit_n_rules; + /* When fs/namei.c:getname() is called, we store the pointer in name and * we don't let putname() free it (instead we free all of the saved * pointers at syscall exit time). @@ -174,6 +177,7 @@ struct audit_aux_data_path { /* The per-task audit context. */ struct audit_context { + int dummy; /* must be the first element */ int in_syscall; /* 1 if task is in a syscall */ enum audit_state state; unsigned int serial; /* serial number for record */ @@ -514,7 +518,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, context->return_valid = return_valid; context->return_code = return_code; - if (context->in_syscall && !context->auditable) { + if (context->in_syscall && !context->dummy && !context->auditable) { enum audit_state state; state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); @@ -530,17 +534,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, } get_context: - context->pid = tsk->pid; - context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ - context->uid = tsk->uid; - context->gid = tsk->gid; - context->euid = tsk->euid; - context->suid = tsk->suid; - context->fsuid = tsk->fsuid; - context->egid = tsk->egid; - context->sgid = tsk->sgid; - context->fsgid = tsk->fsgid; - context->personality = tsk->personality; + tsk->audit_context = NULL; return context; } @@ -749,6 +743,17 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts const char *tty; /* tsk == current */ + context->pid = tsk->pid; + context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ + context->uid = tsk->uid; + context->gid = tsk->gid; + context->euid = tsk->euid; + context->suid = tsk->suid; + context->fsuid = tsk->fsuid; + context->egid = tsk->egid; + context->sgid = tsk->sgid; + context->fsgid = tsk->fsgid; + context->personality = tsk->personality; ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); if (!ab) @@ -1066,7 +1071,8 @@ void audit_syscall_entry(int arch, int major, context->argv[3] = a4; state = context->state; - if (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT) + context->dummy = !audit_n_rules; + if (!context->dummy && (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT)) state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); if (likely(state == AUDIT_DISABLED)) return; @@ -1199,14 +1205,18 @@ void audit_putname(const char *name) #endif } -static void audit_inode_context(int idx, const struct inode *inode) +/* Copy inode data into an audit_names. */ +static void audit_copy_inode(struct audit_names *name, const struct inode *inode) { - struct audit_context *context = current->audit_context; - - selinux_get_inode_sid(inode, &context->names[idx].osid); + name->ino = inode->i_ino; + name->dev = inode->i_sb->s_dev; + name->mode = inode->i_mode; + name->uid = inode->i_uid; + name->gid = inode->i_gid; + name->rdev = inode->i_rdev; + selinux_get_inode_sid(inode, &name->osid); } - /** * audit_inode - store the inode and device from a lookup * @name: name being audited @@ -1240,20 +1250,14 @@ void __audit_inode(const char *name, const struct inode *inode) ++context->ino_count; #endif } - context->names[idx].ino = inode->i_ino; - context->names[idx].dev = inode->i_sb->s_dev; - context->names[idx].mode = inode->i_mode; - context->names[idx].uid = inode->i_uid; - context->names[idx].gid = inode->i_gid; - context->names[idx].rdev = inode->i_rdev; - audit_inode_context(idx, inode); + audit_copy_inode(&context->names[idx], inode); } /** * audit_inode_child - collect inode info for created/removed objects * @dname: inode's dentry name * @inode: inode being audited - * @pino: inode number of dentry parent + * @parent: inode of dentry parent * * For syscalls that create or remove filesystem objects, audit_inode * can only collect information for the filesystem object's parent. @@ -1264,7 +1268,7 @@ void __audit_inode(const char *name, const struct inode *inode) * unsuccessful attempts. */ void __audit_inode_child(const char *dname, const struct inode *inode, - unsigned long pino) + const struct inode *parent) { int idx; struct audit_context *context = current->audit_context; @@ -1278,7 +1282,7 @@ void __audit_inode_child(const char *dname, const struct inode *inode, if (!dname) goto update_context; for (idx = 0; idx < context->name_count; idx++) - if (context->names[idx].ino == pino) { + if (context->names[idx].ino == parent->i_ino) { const char *name = context->names[idx].name; if (!name) @@ -1302,16 +1306,47 @@ update_context: context->names[idx].name_len = AUDIT_NAME_FULL; context->names[idx].name_put = 0; /* don't call __putname() */ - if (inode) { - context->names[idx].ino = inode->i_ino; - context->names[idx].dev = inode->i_sb->s_dev; - context->names[idx].mode = inode->i_mode; - context->names[idx].uid = inode->i_uid; - context->names[idx].gid = inode->i_gid; - context->names[idx].rdev = inode->i_rdev; - audit_inode_context(idx, inode); - } else - context->names[idx].ino = (unsigned long)-1; + if (!inode) + context->names[idx].ino = (unsigned long)-1; + else + audit_copy_inode(&context->names[idx], inode); + + /* A parent was not found in audit_names, so copy the inode data for the + * provided parent. */ + if (!found_name) { + idx = context->name_count++; +#if AUDIT_DEBUG + context->ino_count++; +#endif + audit_copy_inode(&context->names[idx], parent); + } +} + +/** + * audit_inode_update - update inode info for last collected name + * @inode: inode being audited + * + * When open() is called on an existing object with the O_CREAT flag, the inode + * data audit initially collects is incorrect. This additional hook ensures + * audit has the inode data for the actual object to be opened. + */ +void __audit_inode_update(const struct inode *inode) +{ + struct audit_context *context = current->audit_context; + int idx; + + if (!context->in_syscall || !inode) + return; + + if (context->name_count == 0) { + context->name_count++; +#if AUDIT_DEBUG + context->ino_count++; +#endif + } + idx = context->name_count - 1; + + audit_copy_inode(&context->names[idx], inode); } /** @@ -1642,7 +1677,7 @@ int audit_bprm(struct linux_binprm *bprm) unsigned long p, next; void *to; - if (likely(!audit_enabled || !context)) + if (likely(!audit_enabled || !context || context->dummy)) return 0; ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p, @@ -1680,7 +1715,7 @@ int audit_socketcall(int nargs, unsigned long *args) struct audit_aux_data_socketcall *ax; struct audit_context *context = current->audit_context; - if (likely(!context)) + if (likely(!context || context->dummy)) return 0; ax = kmalloc(sizeof(*ax) + nargs * sizeof(unsigned long), GFP_KERNEL); @@ -1708,7 +1743,7 @@ int audit_sockaddr(int len, void *a) struct audit_aux_data_sockaddr *ax; struct audit_context *context = current->audit_context; - if (likely(!context)) + if (likely(!context || context->dummy)) return 0; ax = kmalloc(sizeof(*ax) + len, GFP_KERNEL); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1a649f2bb9bb..4ea6f0dc2fc5 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -816,6 +816,10 @@ static int update_cpumask(struct cpuset *cs, char *buf) struct cpuset trialcs; int retval, cpus_unchanged; + /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ + if (cs == &top_cpuset) + return -EACCES; + trialcs = *cs; retval = cpulist_parse(buf, trialcs.cpus_allowed); if (retval < 0) @@ -2033,6 +2037,33 @@ out: return err; } +/* + * The top_cpuset tracks what CPUs and Memory Nodes are online, + * period. This is necessary in order to make cpusets transparent + * (of no affect) on systems that are actively using CPU hotplug + * but making no active use of cpusets. + * + * This handles CPU hotplug (cpuhp) events. If someday Memory + * Nodes can be hotplugged (dynamically changing node_online_map) + * then we should handle that too, perhaps in a similar way. + */ + +#ifdef CONFIG_HOTPLUG_CPU +static int cpuset_handle_cpuhp(struct notifier_block *nb, + unsigned long phase, void *cpu) +{ + mutex_lock(&manage_mutex); + mutex_lock(&callback_mutex); + + top_cpuset.cpus_allowed = cpu_online_map; + + mutex_unlock(&callback_mutex); + mutex_unlock(&manage_mutex); + + return 0; +} +#endif + /** * cpuset_init_smp - initialize cpus_allowed * @@ -2043,6 +2074,8 @@ void __init cpuset_init_smp(void) { top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_online_map; + + hotcpu_notifier(cpuset_handle_cpuhp, 0); } /** @@ -2387,7 +2420,7 @@ EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); int cpuset_excl_nodes_overlap(const struct task_struct *p) { const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ - int overlap = 0; /* do cpusets overlap? */ + int overlap = 1; /* do cpusets overlap? */ task_lock(current); if (current->flags & PF_EXITING) { diff --git a/kernel/delayacct.c b/kernel/delayacct.c index f05392d64267..57ca3730205d 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -19,15 +19,15 @@ #include #include -int delayacct_on __read_mostly; /* Delay accounting turned on/off */ +int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ kmem_cache_t *delayacct_cache; -static int __init delayacct_setup_enable(char *str) +static int __init delayacct_setup_disable(char *str) { - delayacct_on = 1; + delayacct_on = 0; return 1; } -__setup("delayacct", delayacct_setup_enable); +__setup("nodelayacct", delayacct_setup_disable); void delayacct_init(void) { diff --git a/kernel/fork.c b/kernel/fork.c index 1b0f7b1e0881..aa36c43783cc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1387,8 +1387,10 @@ long do_fork(unsigned long clone_flags, if (clone_flags & CLONE_VFORK) { wait_for_completion(&vfork); - if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) + if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { + current->ptrace_message = nr; ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); + } } } else { free_pid(pid); diff --git a/kernel/futex.c b/kernel/futex.c index cf0c8e21d1ab..b9b8aea5389e 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -297,7 +297,7 @@ static int futex_handle_fault(unsigned long address, int attempt) struct vm_area_struct * vma; struct mm_struct *mm = current->mm; - if (attempt >= 2 || !(vma = find_vma(mm, address)) || + if (attempt > 2 || !(vma = find_vma(mm, address)) || vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) return -EFAULT; @@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) p = NULL; goto out_unlock; } - if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { + if (p->exit_state != 0) { p = NULL; goto out_unlock; } @@ -415,15 +415,15 @@ out_unlock: */ void exit_pi_state_list(struct task_struct *curr) { - struct futex_hash_bucket *hb; struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; + struct futex_hash_bucket *hb; union futex_key key; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful - * versus waiters unqueueing themselfs + * versus waiters unqueueing themselves: */ spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { @@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr) next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; + hb = hash_futex(&key); spin_unlock_irq(&curr->pi_lock); - hb = hash_futex(&key); spin_lock(&hb->lock); spin_lock_irq(&curr->pi_lock); + /* + * We dropped the pi-lock, so re-check whether this + * task still owns the PI-state: + */ if (head->next != next) { spin_unlock(&hb->lock); continue; } - list_del_init(&pi_state->list); - WARN_ON(pi_state->owner != curr); - + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); pi_state->owner = NULL; spin_unlock_irq(&curr->pi_lock); @@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) head = &hb->chain; list_for_each_entry_safe(this, next, head, list) { - if (match_futex (&this->key, &me->key)) { + if (match_futex(&this->key, &me->key)) { /* * Another waiter already exists - bump up * the refcount and return its pi_state: @@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) if (unlikely(!pi_state)) return -EINVAL; + WARN_ON(!atomic_read(&pi_state->refcount)); + atomic_inc(&pi_state->refcount); me->pi_state = pi_state; @@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) } /* - * We are the first waiter - try to look up the real owner and - * attach the new pi_state to it: + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when the owner died bit is set + * and TID = 0: */ pid = uval & FUTEX_TID_MASK; + if (!pid && (uval & FUTEX_OWNER_DIED)) + return -ESRCH; p = futex_find_get_task(pid); if (!p) return -ESRCH; @@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) pi_state->key = me->key; spin_lock_irq(&p->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &p->pi_state_list); pi_state->owner = p; spin_unlock_irq(&p->pi_lock); @@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) * kept enabled while there is PI state around. We must also * preserve the owner died bit.) */ - newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; + if (!(uval & FUTEX_OWNER_DIED)) { + newval = FUTEX_WAITERS | new_owner->pid; - inc_preempt_count(); - curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); - dec_preempt_count(); + inc_preempt_count(); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + dec_preempt_count(); + if (curval == -EFAULT) + return -EFAULT; + if (curval != uval) + return -EINVAL; + } - if (curval == -EFAULT) - return -EFAULT; - if (curval != uval) - return -EINVAL; + spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + spin_unlock_irq(&pi_state->owner->pi_lock); - list_del_init(&pi_state->owner->pi_state_list); + spin_lock_irq(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; + spin_unlock_irq(&new_owner->pi_lock); + rt_mutex_unlock(&pi_state->pi_mutex); return 0; @@ -729,8 +747,10 @@ retry: */ if (attempt++) { if (futex_handle_fault((unsigned long)uaddr2, - attempt)) + attempt)) { + ret = -EFAULT; goto out; + } goto retry; } @@ -930,6 +950,7 @@ static int unqueue_me(struct futex_q *q) /* In the common case we don't take the spinlock, which is nice. */ retry: lock_ptr = q->lock_ptr; + barrier(); if (lock_ptr != 0) { spin_lock(lock_ptr); /* @@ -1236,6 +1257,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, /* Owner died? */ if (q.pi_state->owner != NULL) { spin_lock_irq(&q.pi_state->owner->pi_lock); + WARN_ON(list_empty(&q.pi_state->list)); list_del_init(&q.pi_state->list); spin_unlock_irq(&q.pi_state->owner->pi_lock); } else @@ -1244,6 +1266,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, q.pi_state->owner = current; spin_lock_irq(¤t->pi_lock); + WARN_ON(!list_empty(&q.pi_state->list)); list_add(&q.pi_state->list, ¤t->pi_state_list); spin_unlock_irq(¤t->pi_lock); @@ -1301,9 +1324,10 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr, attempt)) + if (futex_handle_fault((unsigned long)uaddr, attempt)) { + ret = -EFAULT; goto out_unlock_release_sem; - + } goto retry_locked; } @@ -1427,9 +1451,11 @@ retry_locked: * again. If it succeeds then we can return without waking * anyone else up: */ - inc_preempt_count(); - uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); - dec_preempt_count(); + if (!(uval & FUTEX_OWNER_DIED)) { + inc_preempt_count(); + uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); + dec_preempt_count(); + } if (unlikely(uval == -EFAULT)) goto pi_faulted; @@ -1462,9 +1488,11 @@ retry_locked: /* * No waiters - kernel unlocks the futex: */ - ret = unlock_futex_pi(uaddr, uval); - if (ret == -EFAULT) - goto pi_faulted; + if (!(uval & FUTEX_OWNER_DIED)) { + ret = unlock_futex_pi(uaddr, uval); + if (ret == -EFAULT) + goto pi_faulted; + } out_unlock: spin_unlock(&hb->lock); @@ -1481,9 +1509,10 @@ pi_faulted: * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr, attempt)) + if (futex_handle_fault((unsigned long)uaddr, attempt)) { + ret = -EFAULT; goto out_unlock; - + } goto retry_locked; } @@ -1683,9 +1712,9 @@ err_unlock: * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ -int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { - u32 uval, nval; + u32 uval, nval, mval; retry: if (get_user(uval, uaddr)) @@ -1702,20 +1731,44 @@ retry: * thread-death.) The rest of the cleanup is done in * userspace. */ - nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, - uval | FUTEX_OWNER_DIED); + mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; + nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); + if (nval == -EFAULT) return -1; if (nval != uval) goto retry; - if (uval & FUTEX_WAITERS) - futex_wake(uaddr, 1); + /* + * Wake robust non-PI futexes here. The wakeup of + * PI futexes happens in exit_pi_state(): + */ + if (!pi) { + if (uval & FUTEX_WAITERS) + futex_wake(uaddr, 1); + } } return 0; } +/* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int fetch_robust_entry(struct robust_list __user **entry, + struct robust_list __user **head, int *pi) +{ + unsigned long uentry; + + if (get_user(uentry, (unsigned long *)head)) + return -EFAULT; + + *entry = (void *)(uentry & ~1UL); + *pi = uentry & 1; + + return 0; +} + /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. @@ -1726,14 +1779,14 @@ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned long futex_offset; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (get_user(entry, &head->list.next)) + if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: @@ -1744,10 +1797,11 @@ void exit_robust_list(struct task_struct *curr) * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (get_user(pending, &head->list_op_pending)) + if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; + if (pending) - handle_futex_death((void *)pending + futex_offset, curr); + handle_futex_death((void *)pending + futex_offset, curr, pip); while (entry != &head->list) { /* @@ -1756,12 +1810,12 @@ void exit_robust_list(struct task_struct *curr) */ if (entry != pending) if (handle_futex_death((void *)entry + futex_offset, - curr)) + curr, pi)) return; /* * Fetch the next entry in the list: */ - if (get_user(entry, &entry->next)) + if (fetch_robust_entry(&entry, &entry->next, &pi)) return; /* * Avoid excessively long or circular lists: diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d1d92b441fb7..c5cca3f65cb7 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -12,6 +12,23 @@ #include + +/* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int +fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, + compat_uptr_t *head, int *pi) +{ + if (get_user(*uentry, head)) + return -EFAULT; + + *entry = compat_ptr((*uentry) & ~1); + *pi = (unsigned int)(*uentry) & 1; + + return 0; +} + /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. @@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; compat_uptr_t uentry, upending; - unsigned int limit = ROBUST_LIST_LIMIT; compat_long_t futex_offset; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ - if (get_user(uentry, &head->list.next)) + if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) return; - entry = compat_ptr(uentry); /* * Fetch the relative futex offset: */ @@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr) * Fetch any possibly pending lock-add first, and handle it * if it exists: */ - if (get_user(upending, &head->list_op_pending)) + if (fetch_robust_entry(&upending, &pending, + &head->list_op_pending, &pip)) return; - pending = compat_ptr(upending); if (upending) - handle_futex_death((void *)pending + futex_offset, curr); + handle_futex_death((void *)pending + futex_offset, curr, pip); while (compat_ptr(uentry) != &head->list) { /* @@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr) */ if (entry != pending) if (handle_futex_death((void *)entry + futex_offset, - curr)) + curr, pi)) return; /* * Fetch the next entry in the list: */ - if (get_user(uentry, (compat_uptr_t *)&entry->next)) + if (fetch_robust_entry(&uentry, &entry, + (compat_uptr_t *)&entry->next, &pi)) return; - entry = compat_ptr(uentry); /* * Avoid excessively long or circular lists: */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d17766d40dab..21c38a7e666b 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -187,7 +187,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) { struct hrtimer_base *new_base; - new_base = &__get_cpu_var(hrtimer_bases[base->index]); + new_base = &__get_cpu_var(hrtimer_bases)[base->index]; if (base != new_base) { /* @@ -835,7 +835,7 @@ static void migrate_hrtimers(int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit hrtimer_cpu_notify(struct notifier_block *self, +static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -859,7 +859,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata hrtimers_nb = { +static struct notifier_block __cpuinitdata hrtimers_nb = { .notifier_call = hrtimer_cpu_notify, }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4e461438e48b..92be519eff26 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -137,16 +137,40 @@ EXPORT_SYMBOL(enable_irq); * @irq: interrupt to control * @on: enable/disable power management wakeup * - * Enable/disable power management wakeup mode + * Enable/disable power management wakeup mode, which is + * disabled by default. Enables and disables must match, + * just as they match for non-wakeup mode support. + * + * Wakeup mode lets this IRQ wake the system from sleep + * states like "suspend to RAM". */ int set_irq_wake(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_desc + irq; unsigned long flags; int ret = -ENXIO; + int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake; + /* wakeup-capable irqs can be shared between drivers that + * don't need to have the same sleep mode behaviors. + */ spin_lock_irqsave(&desc->lock, flags); - if (desc->chip->set_wake) + if (on) { + if (desc->wake_depth++ == 0) + desc->status |= IRQ_WAKEUP; + else + set_wake = NULL; + } else { + if (desc->wake_depth == 0) { + printk(KERN_WARNING "Unbalanced IRQ %d " + "wake disable\n", irq); + WARN_ON(1); + } else if (--desc->wake_depth == 0) + desc->status &= ~IRQ_WAKEUP; + else + set_wake = NULL; + } + if (set_wake) ret = desc->chip->set_wake(irq, on); spin_unlock_irqrestore(&desc->lock, flags); return ret; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 64aab081153b..3f57dfdc8f92 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -393,6 +393,7 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { copy_kprobe(p, ap); + flush_insn_slot(ap); ap->addr = p->addr; ap->pre_handler = aggr_pre_handler; ap->fault_handler = aggr_fault_handler; diff --git a/kernel/panic.c b/kernel/panic.c index d8a0bca21233..9b8dcfd1ca93 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -18,6 +18,7 @@ #include #include #include +#include int panic_on_oops; int tainted; diff --git a/kernel/power/process.c b/kernel/power/process.c index b2a5f671d6cd..72e72d2c61e6 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -66,13 +66,25 @@ static inline void freeze_process(struct task_struct *p) } } +static void cancel_freezing(struct task_struct *p) +{ + unsigned long flags; + + if (freezing(p)) { + pr_debug(" clean up: %s\n", p->comm); + do_not_freeze(p); + spin_lock_irqsave(&p->sighand->siglock, flags); + recalc_sigpending_tsk(p); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } +} + /* 0 = success, else # of processes that we failed to stop */ int freeze_processes(void) { int todo, nr_user, user_frozen; unsigned long start_time; struct task_struct *g, *p; - unsigned long flags; printk( "Stopping tasks: " ); start_time = jiffies; @@ -85,6 +97,10 @@ int freeze_processes(void) continue; if (frozen(p)) continue; + if (p->state == TASK_TRACED && frozen(p->parent)) { + cancel_freezing(p); + continue; + } if (p->mm && !(p->flags & PF_BORROWED_MM)) { /* The task is a user-space one. * Freeze it unless there's a vfork completion @@ -126,13 +142,7 @@ int freeze_processes(void) do_each_thread(g, p) { if (freezeable(p) && !frozen(p)) printk(KERN_ERR " %s\n", p->comm); - if (freezing(p)) { - pr_debug(" clean up: %s\n", p->comm); - p->flags &= ~PF_FREEZE; - spin_lock_irqsave(&p->sighand->siglock, flags); - recalc_sigpending_tsk(p); - spin_unlock_irqrestore(&p->sighand->siglock, flags); - } + cancel_freezing(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); return todo; diff --git a/kernel/printk.c b/kernel/printk.c index 65ca0688f86f..1149365e989e 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -799,6 +799,9 @@ void release_console_sem(void) up(&secondary_console_sem); return; } + + console_may_schedule = 0; + for ( ; ; ) { spin_lock_irqsave(&logbuf_lock, flags); wake_klogd |= log_start - log_end; @@ -812,7 +815,6 @@ void release_console_sem(void) local_irq_restore(flags); } console_locked = 0; - console_may_schedule = 0; up(&console_sem); spin_unlock_irqrestore(&logbuf_lock, flags); if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) { diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 759805c9859a..436ab35f6fa7 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -548,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu) tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); } -static int __devinit rcu_cpu_notify(struct notifier_block *self, +static int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -565,7 +565,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata rcu_nb = { +static struct notifier_block __cpuinitdata rcu_nb = { .notifier_call = rcu_cpu_notify, }; diff --git a/kernel/resource.c b/kernel/resource.c index 0dd3a857579e..46286434af80 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -244,6 +244,7 @@ int find_next_system_ram(struct resource *res) start = res->start; end = res->end; + BUG_ON(start >= end); read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { @@ -254,15 +255,17 @@ int find_next_system_ram(struct resource *res) p = NULL; break; } - if (p->start >= start) + if ((p->end >= start) && (p->start < end)) break; } read_unlock(&resource_lock); if (!p) return -1; /* copy data */ - res->start = p->start; - res->end = p->end; + if (res->start < p->start) + res->start = p->start; + if (res->end > p->end) + res->end = p->end; return 0; } #endif diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index d2ef13b485e7..3e13a1e5856f 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -7,6 +7,8 @@ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen + * + * See Documentation/rt-mutex-design.txt for details. */ #include #include diff --git a/kernel/sched.c b/kernel/sched.c index b44b9a43b0fc..a234fbee1238 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4162,10 +4162,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) read_unlock_irq(&tasklist_lock); return -ESRCH; } - get_task_struct(p); - read_unlock_irq(&tasklist_lock); retval = sched_setscheduler(p, policy, &lparam); - put_task_struct(p); + read_unlock_irq(&tasklist_lock); return retval; } @@ -4456,9 +4454,9 @@ asmlinkage long sys_sched_yield(void) return 0; } -static inline int __resched_legal(void) +static inline int __resched_legal(int expected_preempt_count) { - if (unlikely(preempt_count())) + if (unlikely(preempt_count() != expected_preempt_count)) return 0; if (unlikely(system_state != SYSTEM_RUNNING)) return 0; @@ -4484,7 +4482,7 @@ static void __cond_resched(void) int __sched cond_resched(void) { - if (need_resched() && __resched_legal()) { + if (need_resched() && __resched_legal(0)) { __cond_resched(); return 1; } @@ -4510,7 +4508,7 @@ int cond_resched_lock(spinlock_t *lock) ret = 1; spin_lock(lock); } - if (need_resched() && __resched_legal()) { + if (need_resched() && __resched_legal(1)) { spin_release(&lock->dep_map, 1, _THIS_IP_); _raw_spin_unlock(lock); preempt_enable_no_resched(); @@ -4526,7 +4524,7 @@ int __sched cond_resched_softirq(void) { BUG_ON(!in_softirq()); - if (need_resched() && __resched_legal()) { + if (need_resched() && __resched_legal(0)) { raw_local_irq_disable(); _local_bh_enable(); raw_local_irq_enable(); @@ -6494,7 +6492,12 @@ static int build_sched_domains(const cpumask_t *cpu_map) for (i = 0; i < MAX_NUMNODES; i++) init_numa_sched_groups_power(sched_group_nodes[i]); - init_numa_sched_groups_power(sched_group_allnodes); + if (sched_group_allnodes) { + int group = cpu_to_allnodes_group(first_cpu(*cpu_map)); + struct sched_group *sg = &sched_group_allnodes[group]; + + init_numa_sched_groups_power(sg); + } #endif /* Attach the domains */ @@ -6761,6 +6764,11 @@ void __init sched_init(void) } set_load_weight(&init_task); + +#ifdef CONFIG_RT_MUTEXES + plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); +#endif + /* * The boot idle thread does lazy MMU switching as well: */ diff --git a/kernel/signal.c b/kernel/signal.c index 7fe874d12fae..bfdb5686fa3e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -791,22 +791,31 @@ out: /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. + * + * Note: If we unblock the signal, we always reset it to SIG_DFL, + * since we do not want to have a signal handler that was blocked + * be invoked when user space had explicitly blocked it. + * + * We don't want to have recursive SIGSEGV's etc, for example. */ - int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; - int ret; + int ret, blocked, ignored; + struct k_sigaction *action; spin_lock_irqsave(&t->sighand->siglock, flags); - if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { - t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; + action = &t->sighand->action[sig-1]; + ignored = action->sa.sa_handler == SIG_IGN; + blocked = sigismember(&t->blocked, sig); + if (blocked || ignored) { + action->sa.sa_handler = SIG_DFL; + if (blocked) { + sigdelset(&t->blocked, sig); + recalc_sigpending_tsk(t); + } } - if (sigismember(&t->blocked, sig)) { - sigdelset(&t->blocked, sig); - } - recalc_sigpending_tsk(t); ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); diff --git a/kernel/softirq.c b/kernel/softirq.c index 0f08a84ae307..3789ca98197c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -65,6 +65,7 @@ static inline void wakeup_softirqd(void) * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ +#ifdef CONFIG_TRACE_IRQFLAGS static void __local_bh_disable(unsigned long ip) { unsigned long flags; @@ -80,6 +81,13 @@ static void __local_bh_disable(unsigned long ip) trace_softirqs_off(ip); raw_local_irq_restore(flags); } +#else /* !CONFIG_TRACE_IRQFLAGS */ +static inline void __local_bh_disable(unsigned long ip) +{ + add_preempt_count(SOFTIRQ_OFFSET); + barrier(); +} +#endif /* CONFIG_TRACE_IRQFLAGS */ void local_bh_disable(void) { @@ -121,12 +129,16 @@ EXPORT_SYMBOL(_local_bh_enable); void local_bh_enable(void) { +#ifdef CONFIG_TRACE_IRQFLAGS unsigned long flags; WARN_ON_ONCE(in_irq()); +#endif WARN_ON_ONCE(irqs_disabled()); +#ifdef CONFIG_TRACE_IRQFLAGS local_irq_save(flags); +#endif /* * Are softirqs going to be turned on now: */ @@ -142,18 +154,22 @@ void local_bh_enable(void) do_softirq(); dec_preempt_count(); +#ifdef CONFIG_TRACE_IRQFLAGS local_irq_restore(flags); +#endif preempt_check_resched(); } EXPORT_SYMBOL(local_bh_enable); void local_bh_enable_ip(unsigned long ip) { +#ifdef CONFIG_TRACE_IRQFLAGS unsigned long flags; WARN_ON_ONCE(in_irq()); local_irq_save(flags); +#endif /* * Are softirqs going to be turned on now: */ @@ -169,7 +185,9 @@ void local_bh_enable_ip(unsigned long ip) do_softirq(); dec_preempt_count(); +#ifdef CONFIG_TRACE_IRQFLAGS local_irq_restore(flags); +#endif preempt_check_resched(); } EXPORT_SYMBOL(local_bh_enable_ip); @@ -547,7 +565,7 @@ static void takeover_tasklets(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -587,7 +605,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __devinitdata cpu_nfb = { +static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 6b76caa22981..03e6a2b0b787 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu) /* * Create/destroy watchdog threads as CPUs come and go: */ -static int __devinit +static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; @@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_OK; } -static struct notifier_block __devinitdata cpu_nfb = { +static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index dcfb5d731466..51cacd111dbd 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -111,7 +111,6 @@ static int stop_machine(void) /* If some failed, kill them all. */ if (ret < 0) { stopmachine_set_state(STOPMACHINE_EXIT); - up(&stopmachine_mutex); return ret; } diff --git a/kernel/taskstats.c b/kernel/taskstats.c index f45179ce028e..e78187657330 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -121,46 +121,45 @@ static int send_reply(struct sk_buff *skb, pid_t pid) /* * Send taskstats data in @skb to listeners registered for @cpu's exit data */ -static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) +static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) { struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); struct listener_list *listeners; struct listener *s, *tmp; struct sk_buff *skb_next, *skb_cur = skb; void *reply = genlmsg_data(genlhdr); - int rc, ret, delcount = 0; + int rc, delcount = 0; rc = genlmsg_end(skb, reply); if (rc < 0) { nlmsg_free(skb); - return rc; + return; } rc = 0; listeners = &per_cpu(listener_array, cpu); down_read(&listeners->sem); - list_for_each_entry_safe(s, tmp, &listeners->list, list) { + list_for_each_entry(s, &listeners->list, list) { skb_next = NULL; if (!list_is_last(&s->list, &listeners->list)) { skb_next = skb_clone(skb_cur, GFP_KERNEL); - if (!skb_next) { - nlmsg_free(skb_cur); - rc = -ENOMEM; + if (!skb_next) break; - } } - ret = genlmsg_unicast(skb_cur, s->pid); - if (ret == -ECONNREFUSED) { + rc = genlmsg_unicast(skb_cur, s->pid); + if (rc == -ECONNREFUSED) { s->valid = 0; delcount++; - rc = ret; } skb_cur = skb_next; } up_read(&listeners->sem); + if (skb_cur) + nlmsg_free(skb_cur); + if (!delcount) - return rc; + return; /* Delete invalidated entries */ down_write(&listeners->sem); @@ -171,13 +170,12 @@ static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) } } up_write(&listeners->sem); - return rc; } static int fill_pid(pid_t pid, struct task_struct *pidtsk, struct taskstats *stats) { - int rc; + int rc = 0; struct task_struct *tsk = pidtsk; if (!pidtsk) { @@ -196,12 +194,10 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk, * Each accounting subsystem adds calls to its functions to * fill in relevant parts of struct taskstsats as follows * - * rc = per-task-foo(stats, tsk); - * if (rc) - * goto err; + * per-task-foo(stats, tsk); */ - rc = delayacct_add_tsk(stats, tsk); + delayacct_add_tsk(stats, tsk); stats->version = TASKSTATS_VERSION; /* Define err: label here if needed */ diff --git a/kernel/timer.c b/kernel/timer.c index 05809c2e2fd6..1d7dd6267c2d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -84,7 +84,7 @@ typedef struct tvec_t_base_s tvec_base_t; tvec_base_t boot_tvec_bases; EXPORT_SYMBOL(boot_tvec_bases); -static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases }; +static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; static inline void set_running_timer(tvec_base_t *base, struct timer_list *timer) @@ -408,7 +408,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) * This function cascades all vectors and executes all expired timer * vectors. */ -#define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK +#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) static inline void __run_timers(tvec_base_t *base) { @@ -1324,46 +1324,19 @@ asmlinkage long sys_getpid(void) } /* - * Accessing ->group_leader->real_parent is not SMP-safe, it could - * change from under us. However, rather than getting any lock - * we can use an optimistic algorithm: get the parent - * pid, and go back and check that the parent is still - * the same. If it has changed (which is extremely unlikely - * indeed), we just try again.. - * - * NOTE! This depends on the fact that even if we _do_ - * get an old value of "parent", we can happily dereference - * the pointer (it was and remains a dereferencable kernel pointer - * no matter what): we just can't necessarily trust the result - * until we know that the parent pointer is valid. - * - * NOTE2: ->group_leader never changes from under us. + * Accessing ->real_parent is not SMP-safe, it could + * change from under us. However, we can use a stale + * value of ->real_parent under rcu_read_lock(), see + * release_task()->call_rcu(delayed_put_task_struct). */ asmlinkage long sys_getppid(void) { int pid; - struct task_struct *me = current; - struct task_struct *parent; - parent = me->group_leader->real_parent; - for (;;) { - pid = parent->tgid; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) -{ - struct task_struct *old = parent; + rcu_read_lock(); + pid = rcu_dereference(current->real_parent)->tgid; + rcu_read_unlock(); - /* - * Make sure we read the pid before re-reading the - * parent pointer: - */ - smp_rmb(); - parent = me->group_leader->real_parent; - if (old != parent) - continue; -} -#endif - break; - } return pid; } @@ -1688,7 +1661,7 @@ static void __devinit migrate_timers(int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int __devinit timer_cpu_notify(struct notifier_block *self, +static int __cpuinit timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1708,7 +1681,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __devinitdata timers_nb = { +static struct notifier_block __cpuinitdata timers_nb = { .notifier_call = timer_cpu_notify, }; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index eebb1d839235..835fe28b87a8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -68,7 +68,7 @@ struct workqueue_struct { /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove threads to each one as cpus come/go. */ -static DEFINE_SPINLOCK(workqueue_lock); +static DEFINE_MUTEX(workqueue_mutex); static LIST_HEAD(workqueues); static int singlethread_cpu; @@ -93,9 +93,12 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, spin_unlock_irqrestore(&cwq->lock, flags); } -/* - * Queue work on a workqueue. Return non-zero if it was successfully - * added. +/** + * queue_work - queue work on a workqueue + * @wq: workqueue to use + * @work: work to queue + * + * Returns non-zero if it was successfully added. * * We queue the work to the CPU it was submitted, but there is no * guarantee that it will be processed by that CPU. @@ -128,6 +131,14 @@ static void delayed_work_timer_fn(unsigned long __data) __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } +/** + * queue_delayed_work - queue work on a workqueue after delay + * @wq: workqueue to use + * @work: work to queue + * @delay: number of jiffies to wait before queueing + * + * Returns non-zero if it was successfully added. + */ int fastcall queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay) { @@ -150,6 +161,15 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work); +/** + * queue_delayed_work_on - queue work on specific CPU after delay + * @cpu: CPU number to execute work on + * @wq: workqueue to use + * @work: work to queue + * @delay: number of jiffies to wait before queueing + * + * Returns non-zero if it was successfully added. + */ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work, unsigned long delay) { @@ -275,8 +295,9 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) } } -/* +/** * flush_workqueue - ensure that any scheduled work has run to completion. + * @wq: workqueue to flush * * Forces execution of the workqueue and blocks until its completion. * This is typically used in driver shutdown handlers. @@ -299,10 +320,10 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) } else { int cpu; - lock_cpu_hotplug(); + mutex_lock(&workqueue_mutex); for_each_online_cpu(cpu) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); - unlock_cpu_hotplug(); + mutex_unlock(&workqueue_mutex); } } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -350,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name, } wq->name = name; - /* We don't need the distraction of CPUs appearing and vanishing. */ - lock_cpu_hotplug(); + mutex_lock(&workqueue_mutex); if (singlethread) { INIT_LIST_HEAD(&wq->list); p = create_workqueue_thread(wq, singlethread_cpu); @@ -360,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name, else wake_up_process(p); } else { - spin_lock(&workqueue_lock); list_add(&wq->list, &workqueues); - spin_unlock(&workqueue_lock); for_each_online_cpu(cpu) { p = create_workqueue_thread(wq, cpu); if (p) { @@ -372,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name, destroy = 1; } } - unlock_cpu_hotplug(); + mutex_unlock(&workqueue_mutex); /* * Was there any error during startup? If yes then clean up: @@ -400,6 +418,12 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) kthread_stop(p); } +/** + * destroy_workqueue - safely terminate a workqueue + * @wq: target workqueue + * + * Safely destroy a workqueue. All work currently pending will be done first. + */ void destroy_workqueue(struct workqueue_struct *wq) { int cpu; @@ -407,17 +431,15 @@ void destroy_workqueue(struct workqueue_struct *wq) flush_workqueue(wq); /* We don't need the distraction of CPUs appearing and vanishing. */ - lock_cpu_hotplug(); + mutex_lock(&workqueue_mutex); if (is_single_threaded(wq)) cleanup_workqueue_thread(wq, singlethread_cpu); else { for_each_online_cpu(cpu) cleanup_workqueue_thread(wq, cpu); - spin_lock(&workqueue_lock); list_del(&wq->list); - spin_unlock(&workqueue_lock); } - unlock_cpu_hotplug(); + mutex_unlock(&workqueue_mutex); free_percpu(wq->cpu_wq); kfree(wq); } @@ -425,18 +447,41 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); static struct workqueue_struct *keventd_wq; +/** + * schedule_work - put work task in global workqueue + * @work: job to be done + * + * This puts a job in the kernel-global workqueue. + */ int fastcall schedule_work(struct work_struct *work) { return queue_work(keventd_wq, work); } EXPORT_SYMBOL(schedule_work); +/** + * schedule_delayed_work - put work task in global workqueue after delay + * @work: job to be done + * @delay: number of jiffies to wait + * + * After waiting for a given time this puts a job in the kernel-global + * workqueue. + */ int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) { return queue_delayed_work(keventd_wq, work, delay); } EXPORT_SYMBOL(schedule_delayed_work); +/** + * schedule_delayed_work_on - queue work in global workqueue on CPU after delay + * @cpu: cpu to use + * @work: job to be done + * @delay: number of jiffies to wait + * + * After waiting for a given time this puts a job in the kernel-global + * workqueue on the specified CPU. + */ int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay) { @@ -465,11 +510,13 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info) if (!works) return -ENOMEM; + mutex_lock(&workqueue_mutex); for_each_online_cpu(cpu) { INIT_WORK(per_cpu_ptr(works, cpu), func, info); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), per_cpu_ptr(works, cpu)); } + mutex_unlock(&workqueue_mutex); flush_workqueue(keventd_wq); free_percpu(works); return 0; @@ -585,6 +632,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + mutex_lock(&workqueue_mutex); /* Create a new workqueue thread for it. */ list_for_each_entry(wq, &workqueues, list) { if (!create_workqueue_thread(wq, hotcpu)) { @@ -603,6 +651,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, kthread_bind(cwq->thread, hotcpu); wake_up_process(cwq->thread); } + mutex_unlock(&workqueue_mutex); break; case CPU_UP_CANCELED: @@ -614,6 +663,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, any_online_cpu(cpu_online_map)); cleanup_workqueue_thread(wq, hotcpu); } + mutex_unlock(&workqueue_mutex); + break; + + case CPU_DOWN_PREPARE: + mutex_lock(&workqueue_mutex); + break; + + case CPU_DOWN_FAILED: + mutex_unlock(&workqueue_mutex); break; case CPU_DEAD: @@ -621,6 +679,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, cleanup_workqueue_thread(wq, hotcpu); list_for_each_entry(wq, &workqueues, list) take_over_work(wq, hotcpu); + mutex_unlock(&workqueue_mutex); break; } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 2b1530fc573b..7f20e7b857cb 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -50,10 +50,6 @@ static char *action_to_string(enum kobject_action action) return "offline"; case KOBJ_ONLINE: return "online"; - case KOBJ_DOCK: - return "dock"; - case KOBJ_UNDOCK: - return "undock"; default: return NULL; } diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3d9c4dc965ed..58c577dd82e5 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -162,6 +162,7 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) +#if 0 /* __write_lock_debug() can lock up - maybe this can too? */ static void __read_lock_debug(rwlock_t *lock) { int print_once = 1; @@ -184,12 +185,12 @@ static void __read_lock_debug(rwlock_t *lock) } } } +#endif void _raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - if (unlikely(!__raw_read_trylock(&lock->raw_lock))) - __read_lock_debug(lock); + __raw_read_lock(&lock->raw_lock); } int _raw_read_trylock(rwlock_t *lock) @@ -235,6 +236,7 @@ static inline void debug_write_unlock(rwlock_t *lock) lock->owner_cpu = -1; } +#if 0 /* This can cause lockups */ static void __write_lock_debug(rwlock_t *lock) { int print_once = 1; @@ -257,12 +259,12 @@ static void __write_lock_debug(rwlock_t *lock) } } } +#endif void _raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); - if (unlikely(!__raw_write_trylock(&lock->raw_lock))) - __write_lock_debug(lock); + __raw_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 0110e4414805..d90822c378a4 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -111,15 +111,14 @@ static int subpattern(u8 *pattern, int i, int j, int g) return ret; } -static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, - unsigned int len) +static void compute_prefix_tbl(struct ts_bm *bm) { int i, j, g; for (i = 0; i < ASIZE; i++) - bm->bad_shift[i] = len; - for (i = 0; i < len - 1; i++) - bm->bad_shift[pattern[i]] = len - 1 - i; + bm->bad_shift[i] = bm->patlen; + for (i = 0; i < bm->patlen - 1; i++) + bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; /* Compute the good shift array, used to match reocurrences * of a subpattern */ @@ -150,8 +149,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len, bm = ts_config_priv(conf); bm->patlen = len; bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; - compute_prefix_tbl(bm, pattern, len); memcpy(bm->pattern, pattern, len); + compute_prefix_tbl(bm); return conf; } diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 7f922dccf1a5..fceb97c3aff7 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -347,7 +347,10 @@ int zlib_inflate(z_streamp strm, int flush) static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; - if (strm == NULL || strm->state == NULL || strm->next_out == NULL || + /* Do not check for strm->next_out == NULL here as ppc zImage + inflates to strm->next_out = 0 */ + + if (strm == NULL || strm->state == NULL || (strm->next_in == NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; diff --git a/mm/fadvise.c b/mm/fadvise.c index 60a5d55e51d9..168c78a121bb 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -73,7 +73,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) file->f_ra.ra_pages = bdi->ra_pages * 2; break; case POSIX_FADV_WILLNEED: - case POSIX_FADV_NOREUSE: if (!mapping->a_ops->readpage) { ret = -EINVAL; break; @@ -94,6 +93,8 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) if (ret > 0) ret = 0; break; + case POSIX_FADV_NOREUSE: + break; case POSIX_FADV_DONTNEED: if (!bdi_write_congested(mapping->backing_dev_info)) filemap_flush(mapping); diff --git a/mm/filemap.c b/mm/filemap.c index d087fc3d3281..b9a60c43b61a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -849,8 +849,6 @@ static void shrink_readahead_size_eio(struct file *filp, return; ra->ra_pages /= 4; - printk(KERN_WARNING "Reducing readahead size to %luK\n", - ra->ra_pages << (PAGE_CACHE_SHIFT - 10)); } /** diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 01c9fb97c619..c37319542b70 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -52,6 +52,9 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) int nr_pages = PAGES_PER_SECTION; int ret; + if (pfn_valid(phys_start_pfn)) + return -EEXIST; + ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); if (ret < 0) @@ -76,15 +79,22 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, { unsigned long i; int err = 0; + int start_sec, end_sec; + /* during initialize mem_map, align hot-added range to section */ + start_sec = pfn_to_section_nr(phys_start_pfn); + end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); - for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { - err = __add_section(zone, phys_start_pfn + i); + for (i = start_sec; i <= end_sec; i++) { + err = __add_section(zone, i << PFN_SECTION_SHIFT); - /* We want to keep adding the rest of the - * sections if the first ones already exist + /* + * EEXIST is finally dealed with by ioresource collision + * check. see add_memory() => register_memory_resource() + * Warning will be printed if there is collision. */ if (err && (err != -EEXIST)) break; + err = 0; } return err; @@ -156,7 +166,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) res.flags = IORESOURCE_MEM; /* we just need system ram */ section_end = res.end; - while (find_next_system_ram(&res) >= 0) { + while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); nr_pages = (unsigned long) ((res.end + 1 - res.start) >> PAGE_SHIFT); @@ -213,10 +223,9 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat) } /* add this memory to iomem resource */ -static void register_memory_resource(u64 start, u64 size) +static struct resource *register_memory_resource(u64 start, u64 size) { struct resource *res; - res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(!res); @@ -228,7 +237,18 @@ static void register_memory_resource(u64 start, u64 size) printk("System RAM resource %llx - %llx cannot be added\n", (unsigned long long)res->start, (unsigned long long)res->end); kfree(res); + res = NULL; } + return res; +} + +static void release_memory_resource(struct resource *res) +{ + if (!res) + return; + release_resource(res); + kfree(res); + return; } @@ -237,8 +257,13 @@ int add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat = NULL; int new_pgdat = 0; + struct resource *res; int ret; + res = register_memory_resource(start, size); + if (!res) + return -EEXIST; + if (!node_online(nid)) { pgdat = hotadd_new_pgdat(nid, start); if (!pgdat) @@ -268,14 +293,13 @@ int add_memory(int nid, u64 start, u64 size) BUG_ON(ret); } - /* register this memory as resource */ - register_memory_resource(start, size); - return ret; error: /* rollback pgdat allocation and others */ if (new_pgdat) rollback_node_hotadd(nid, pgdat); + if (res) + release_memory_resource(res); return ret; } diff --git a/mm/slab.c b/mm/slab.c index 0f20843beffd..21ba06035700 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1106,7 +1106,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int __cpuinit cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -3224,7 +3224,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) EXPORT_SYMBOL(kmem_cache_alloc); /** - * kmem_cache_alloc - Allocate an object. The memory is set to zero. + * kmem_cache_zalloc - Allocate an object. The memory is set to zero. * @cache: The cache to allocate from. * @flags: See kmalloc(). * diff --git a/mm/swap.c b/mm/swap.c index 8fd095c4ae51..687686a61f7c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -54,6 +54,26 @@ void put_page(struct page *page) } EXPORT_SYMBOL(put_page); +/** + * put_pages_list(): release a list of pages + * + * Release a list of pages which are strung together on page.lru. Currently + * used by read_cache_pages() and related error recovery code. + * + * @pages: list of pages threaded on page->lru + */ +void put_pages_list(struct list_head *pages) +{ + while (!list_empty(pages)) { + struct page *victim; + + victim = list_entry(pages->prev, struct page, lru); + list_del(&victim->lru); + page_cache_release(victim); + } +} +EXPORT_SYMBOL(put_pages_list); + /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the diff --git a/mm/swapfile.c b/mm/swapfile.c index e70d6c6d6fee..f1f5ec783781 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -442,11 +442,12 @@ int swap_type_of(dev_t device) if (!(swap_info[i].flags & SWP_WRITEOK)) continue; + if (!device) { spin_unlock(&swap_lock); return i; } - inode = swap_info->swap_file->f_dentry->d_inode; + inode = swap_info[i].swap_file->f_dentry->d_inode; if (S_ISBLK(inode->i_mode) && device == MKDEV(imajor(inode), iminor(inode))) { spin_unlock(&swap_lock); diff --git a/net/atm/proc.c b/net/atm/proc.c index 3f95b0886a6a..91fe5f53ff11 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -507,7 +507,7 @@ err_out: goto out; } -void __exit atm_proc_exit(void) +void atm_proc_exit(void) { atm_proc_dirs_remove(); } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 6ccd32b30809..864fbbc7b24d 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -40,11 +40,15 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) else { #ifdef CONFIG_BRIDGE_NETFILTER /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ - nf_bridge_maybe_copy_header(skb); + if (nf_bridge_maybe_copy_header(skb)) + kfree_skb(skb); + else #endif - skb_push(skb, ETH_HLEN); + { + skb_push(skb, ETH_HLEN); - dev_queue_xmit(skb); + dev_queue_xmit(skb); + } } return 0; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f55ef682ef84..b1211d5342f6 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br) checksum = 0; if (feature & NETIF_F_GSO) - feature |= NETIF_F_TSO; + feature |= NETIF_F_GSO_SOFTWARE; feature |= NETIF_F_GSO; features &= feature; } + if (!(checksum & NETIF_F_ALL_CSUM)) + features &= ~NETIF_F_SG; + if (!(features & NETIF_F_SG)) + features &= ~NETIF_F_GSO_MASK; + br->dev->features = features | checksum | NETIF_F_LLTX | NETIF_F_GSO_ROBUST; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 06abb6634f5b..53086fb75089 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -85,7 +85,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) goto err_out; err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0); - if (err) + if (err < 0) goto err_kfree; NETLINK_CB(skb).dst_group = RTNLGRP_LINK; diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 02693a230dc1..9f950db3b76f 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -74,6 +74,9 @@ static void ulog_send(unsigned int nlgroup) if (timer_pending(&ub->timer)) del_timer(&ub->timer); + if (!ub->skb) + return; + /* last nlmsg needs NLMSG_DONE */ if (ub->qlen > 1) ub->lastnlh->nlmsg_type = NLMSG_DONE; diff --git a/net/core/Makefile b/net/core/Makefile index e9bd2467d5a9..2645ba428d48 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o -obj-y += dev.o ethtool.o dev_mcast.o dst.o \ +obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o obj-$(CONFIG_XFRM) += flow.o diff --git a/net/core/dev.c b/net/core/dev.c index 4d2b5167d7f5..d4a1ec3bded5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -116,6 +116,7 @@ #include #include #include +#include /* * The list of packet types we will receive (as opposed to discard) @@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas * @name: name string * * Network device names need to be valid file names to - * to allow sysfs to work + * to allow sysfs to work. We also disallow any kind of + * whitespace. */ int dev_valid_name(const char *name) { - return !(*name == '\0' - || !strcmp(name, ".") - || !strcmp(name, "..") - || strchr(name, '/')); + if (*name == '\0') + return 0; + if (!strcmp(name, ".") || !strcmp(name, "..")) + return 0; + + while (*name) { + if (*name == '/' || isspace(*name)) + return 0; + name++; + } + return 1; } /** @@ -1166,11 +1175,6 @@ int skb_checksum_help(struct sk_buff *skb, int inward) goto out_set_summed; if (unlikely(skb_shinfo(skb)->gso_size)) { - static int warned; - - WARN_ON(!warned); - warned = 1; - /* Let GSO fix up the checksum. */ goto out_set_summed; } @@ -1220,11 +1224,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) __skb_pull(skb, skb->mac_len); if (unlikely(skb->ip_summed != CHECKSUM_HW)) { - static int warned; - - WARN_ON(!warned); - warned = 1; - if (skb_header_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return ERR_PTR(err); @@ -1629,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) struct net_device *dev = skb->dev; if (dev->master) { - /* - * On bonding slaves other than the currently active - * slave, suppress duplicates except for 802.3ad - * ETH_P_SLOW and alb non-mcast/bcast. - */ - if (dev->priv_flags & IFF_SLAVE_INACTIVE) { - if (dev->master->priv_flags & IFF_MASTER_ALB) { - if (skb->pkt_type != PACKET_BROADCAST && - skb->pkt_type != PACKET_MULTICAST) - goto keep; - } - - if (dev->master->priv_flags & IFF_MASTER_8023AD && - skb->protocol == __constant_htons(ETH_P_SLOW)) - goto keep; - + if (skb_bond_should_drop(skb)) { kfree_skb(skb); return NULL; } -keep: skb->dev = dev->master; } @@ -3429,12 +3412,9 @@ static void net_dma_rebalance(void) unsigned int cpu, i, n; struct dma_chan *chan; - lock_cpu_hotplug(); - if (net_dma_count == 0) { for_each_online_cpu(cpu) - rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); - unlock_cpu_hotplug(); + rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); return; } @@ -3447,15 +3427,13 @@ static void net_dma_rebalance(void) + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); while(n) { - per_cpu(softnet_data.net_dma, cpu) = chan; + per_cpu(softnet_data, cpu).net_dma = chan; cpu = next_cpu(cpu, cpu_online_map); n--; } i++; } rcu_read_unlock(); - - unlock_cpu_hotplug(); } /** diff --git a/net/core/dst.c b/net/core/dst.c index 470c05bc4cb2..1a5e49da0e77 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy) dst_gc_timer_inc = DST_GC_INC; dst_gc_timer_expires = DST_GC_MIN; } - dst_gc_timer.expires = jiffies + dst_gc_timer_expires; #if RT_CACHE_DEBUG >= 2 printk("dst_total: %d/%d %ld\n", atomic_read(&dst_total), delayed, dst_gc_timer_expires); #endif - add_timer(&dst_gc_timer); + mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); out: spin_unlock(&dst_lock); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 7ad681f5e712..5130d2efdbbe 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -754,6 +755,7 @@ static void neigh_timer_handler(unsigned long arg) neigh->nud_state = NUD_STALE; neigh->updated = jiffies; neigh_suspect(neigh); + notify = 1; } } else if (state & NUD_DELAY) { if (time_before_eq(now, @@ -762,6 +764,7 @@ static void neigh_timer_handler(unsigned long arg) neigh->nud_state = NUD_REACHABLE; neigh->updated = jiffies; neigh_connect(neigh); + notify = 1; next = neigh->confirmed + neigh->parms->reachable_time; } else { NEIGH_PRINTK2("neigh %p is probed.\n", neigh); @@ -819,6 +822,8 @@ static void neigh_timer_handler(unsigned long arg) out: write_unlock(&neigh->lock); } + if (notify) + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); #ifdef CONFIG_ARPD if (notify && neigh->parms->app_probes) @@ -926,9 +931,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, { u8 old; int err; -#ifdef CONFIG_ARPD int notify = 0; -#endif struct net_device *dev; int update_isrouter = 0; @@ -948,9 +951,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh_suspect(neigh); neigh->nud_state = new; err = 0; -#ifdef CONFIG_ARPD notify = old & NUD_VALID; -#endif goto out; } @@ -1022,9 +1023,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, if (!(new & NUD_CONNECTED)) neigh->confirmed = jiffies - (neigh->parms->base_reachable_time << 1); -#ifdef CONFIG_ARPD notify = 1; -#endif } if (new == old) goto out; @@ -1056,6 +1055,9 @@ out: (neigh->flags & ~NTF_ROUTER); } write_unlock_bh(&neigh->lock); + + if (notify) + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); #ifdef CONFIG_ARPD if (notify && neigh->parms->app_probes) neigh_app_notify(neigh); diff --git a/net/core/netevent.c b/net/core/netevent.c new file mode 100644 index 000000000000..35d02c38554e --- /dev/null +++ b/net/core/netevent.c @@ -0,0 +1,69 @@ +/* + * Network event notifiers + * + * Authors: + * Tom Tucker + * Steve Wise + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Fixes: + */ + +#include +#include + +static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); + +/** + * register_netevent_notifier - register a netevent notifier block + * @nb: notifier + * + * Register a notifier to be called when a netevent occurs. + * The notifier passed is linked into the kernel structures and must + * not be reused until it has been unregistered. A negative errno code + * is returned on a failure. + */ +int register_netevent_notifier(struct notifier_block *nb) +{ + int err; + + err = atomic_notifier_chain_register(&netevent_notif_chain, nb); + return err; +} + +/** + * netevent_unregister_notifier - unregister a netevent notifier block + * @nb: notifier + * + * Unregister a notifier previously registered by + * register_neigh_notifier(). The notifier is unlinked into the + * kernel structures and may then be reused. A negative errno code + * is returned on a failure. + */ + +int unregister_netevent_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); +} + +/** + * call_netevent_notifiers - call all netevent notifier blocks + * @val: value passed unmodified to notifier function + * @v: pointer passed unmodified to notifier function + * + * Call all neighbour notifier blocks. Parameters and return value + * are as for notifier_call_chain(). + */ + +int call_netevent_notifiers(unsigned long val, void *v) +{ + return atomic_notifier_call_chain(&netevent_notif_chain, val, v); +} + +EXPORT_SYMBOL_GPL(register_netevent_notifier); +EXPORT_SYMBOL_GPL(unregister_netevent_notifier); +EXPORT_SYMBOL_GPL(call_netevent_notifiers); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 67ed14ddabd2..6a7320b39ed0 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); skb->dev = odev; skb->pkt_type = PACKET_HOST; + skb->nh.iph = iph; + skb->h.uh = udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); @@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; + skb->nh.ipv6h = iph; + skb->h.uh = udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 20e5bb73f147..30cc1ba6ed5c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } if (ida[IFLA_ADDRESS - 1]) { + struct sockaddr *sa; + int len; + if (!dev->set_mac_address) { err = -EOPNOTSUPP; goto out; @@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) goto out; - err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); + len = sizeof(sa_family_t) + dev->addr_len; + sa = kmalloc(len, GFP_KERNEL); + if (!sa) { + err = -ENOMEM; + goto out; + } + sa->sa_family = dev->type; + memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]), + dev->addr_len); + err = dev->set_mac_address(dev, sa); + kfree(sa); if (err) goto out; send_addr_notify = 1; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 476aa3978504..c54f3664bce5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -70,13 +70,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; static kmem_cache_t *skbuff_fclone_cache __read_mostly; -/* - * lockdep: lock class key used by skb_queue_head_init(): - */ -struct lock_class_key skb_queue_lock_key; - -EXPORT_SYMBOL(skb_queue_lock_key); - /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always @@ -256,6 +249,31 @@ nodata: goto out; } +/** + * __netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on + * @length: length to allocate + * @gfp_mask: get_free_pages mask, passed to alloc_skb + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. + */ +struct sk_buff *__netdev_alloc_skb(struct net_device *dev, + unsigned int length, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); + if (likely(skb)) { + skb_reserve(skb, NET_SKB_PAD); + skb->dev = dev; + } + return skb; +} static void skb_drop_list(struct sk_buff **listp) { @@ -846,7 +864,11 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) return err; - for (i = 0; i < nfrags; i++) { + i = 0; + if (offset >= len) + goto drop_pages; + + for (; i < nfrags; i++) { int end = offset + skb_shinfo(skb)->frags[i].size; if (end < len) { @@ -854,9 +876,9 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) continue; } - if (len > offset) - skb_shinfo(skb)->frags[i++].size = len - offset; + skb_shinfo(skb)->frags[i++].size = len - offset; +drop_pages: skb_shinfo(skb)->nr_frags = i; for (; i < nfrags; i++) @@ -864,7 +886,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) if (skb_shinfo(skb)->frag_list) skb_drop_fraglist(skb); - break; + goto done; } for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); @@ -879,6 +901,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) return -ENOMEM; nfrag->next = frag->next; + kfree_skb(frag); frag = nfrag; *fragp = frag; } @@ -897,6 +920,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) break; } +done: if (len > skb_headlen(skb)) { skb->data_len -= skb->len - len; skb->len = len; @@ -2042,6 +2066,7 @@ EXPORT_SYMBOL(__kfree_skb); EXPORT_SYMBOL(kfree_skb); EXPORT_SYMBOL(__pskb_pull_tail); EXPORT_SYMBOL(__alloc_skb); +EXPORT_SYMBOL(__netdev_alloc_skb); EXPORT_SYMBOL(pskb_copy); EXPORT_SYMBOL(pskb_expand_head); EXPORT_SYMBOL(skb_checksum); diff --git a/net/core/utils.c b/net/core/utils.c index 4f96f389243d..e31c90e05594 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -130,12 +130,13 @@ void __init net_random_init(void) static int net_random_reseed(void) { int i; - unsigned long seed[NR_CPUS]; + unsigned long seed; - get_random_bytes(seed, sizeof(seed)); for_each_possible_cpu(i) { struct nrnd_state *state = &per_cpu(net_rand_state,i); - __net_srandom(state, seed[i]); + + get_random_bytes(&seed, sizeof(seed)); + __net_srandom(state, seed); } return 0; } diff --git a/net/core/wireless.c b/net/core/wireless.c index d2bc72d318f7..de0bde4b51dd 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -82,6 +82,7 @@ #include /* for __init */ #include /* ARPHRD_ETHER */ #include /* compare_ether_addr */ +#include #include /* Pretty obvious */ #include /* New driver API */ @@ -1842,6 +1843,18 @@ int wireless_rtnetlink_set(struct net_device * dev, */ #ifdef WE_EVENT_RTNETLINK +static struct sk_buff_head wireless_nlevent_queue; + +static void wireless_nlevent_process(unsigned long data) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&wireless_nlevent_queue))) + netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); +} + +static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0); + /* ---------------------------------------------------------------- */ /* * Fill a rtnetlink message with our event data. @@ -1904,8 +1917,17 @@ static inline void rtmsg_iwinfo(struct net_device * dev, return; } NETLINK_CB(skb).dst_group = RTNLGRP_LINK; - netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); + skb_queue_tail(&wireless_nlevent_queue, skb); + tasklet_schedule(&wireless_nlevent_tasklet); } + +static int __init wireless_nlevent_init(void) +{ + skb_queue_head_init(&wireless_nlevent_queue); + return 0; +} + +subsys_initcall(wireless_nlevent_init); #endif /* WE_EVENT_RTNETLINK */ /* ---------------------------------------------------------------- */ diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index c39bff706cfc..090bc39e8199 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -2,7 +2,7 @@ * net/dccp/ccids/ccid3.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005-6 Ian McDonald + * Copyright (c) 2005-6 Ian McDonald * * An implementation of the DCCP protocol * @@ -342,6 +342,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; + timeval_add_usecs(&hctx->ccid3hctx_t_nom, + hctx->ccid3hctx_t_ipi); } out: return rc; @@ -413,7 +415,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len) case TFRC_SSTATE_NO_FBACK: case TFRC_SSTATE_FBACK: if (len > 0) { - hctx->ccid3hctx_t_nom = now; + timeval_sub_usecs(&hctx->ccid3hctx_t_nom, + hctx->ccid3hctx_t_ipi); ccid3_calc_new_t_ipi(hctx); ccid3_calc_new_delta(hctx); timeval_add_usecs(&hctx->ccid3hctx_t_nom, @@ -757,8 +760,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) } hcrx->ccid3hcrx_tstamp_last_feedback = now; - hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; - hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno; + hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval; hcrx->ccid3hcrx_bytes_recv = 0; /* Convert to multiples of 10us */ @@ -782,7 +784,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) return 0; - DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; + DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter; if (dccp_packet_without_ack(skb)) return 0; @@ -854,6 +856,11 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) interval = 1; } found: + if (!tail) { + LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n", + __FUNCTION__); + return ~0; + } rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", dccp_role(sk), sk, rtt); @@ -864,9 +871,20 @@ found: delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); + if (x_recv == 0) + x_recv = hcrx->ccid3hcrx_x_recv; + tmp1 = (u64)x_recv * (u64)rtt; do_div(tmp1,10000000); tmp2 = (u32)tmp1; + + if (!tmp2) { + LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 " + "%s: x_recv = %u, rtt =%u\n", + __FUNCTION__, x_recv, rtt); + return ~0; + } + fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; /* do not alter order above or you will get overflow on 32 bit */ p = tfrc_calc_x_reverse_lookup(fval); @@ -882,31 +900,101 @@ found: static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); + struct dccp_li_hist_entry *next, *head; + u64 seq_temp; - if (seq_loss != DCCP_MAX_SEQNO + 1 && - list_empty(&hcrx->ccid3hcrx_li_hist)) { - struct dccp_li_hist_entry *li_tail; - - li_tail = dccp_li_hist_interval_new(ccid3_li_hist, - &hcrx->ccid3hcrx_li_hist, - seq_loss, win_loss); - if (li_tail == NULL) + if (list_empty(&hcrx->ccid3hcrx_li_hist)) { + if (!dccp_li_hist_interval_new(ccid3_li_hist, + &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss)) return; - li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); - } else - LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " - "interval\n", __FUNCTION__); + + next = (struct dccp_li_hist_entry *) + hcrx->ccid3hcrx_li_hist.next; + next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); + } else { + struct dccp_li_hist_entry *entry; + struct list_head *tail; + + head = (struct dccp_li_hist_entry *) + hcrx->ccid3hcrx_li_hist.next; + /* FIXME win count check removed as was wrong */ + /* should make this check with receive history */ + /* and compare there as per section 10.2 of RFC4342 */ + + /* new loss event detected */ + /* calculate last interval length */ + seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); + entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); + + if (entry == NULL) { + printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__); + dump_stack(); + return; + } + + list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist); + + tail = hcrx->ccid3hcrx_li_hist.prev; + list_del(tail); + kmem_cache_free(ccid3_li_hist->dccplih_slab, tail); + + /* Create the newest interval */ + entry->dccplih_seqno = seq_loss; + entry->dccplih_interval = seq_temp; + entry->dccplih_win_count = win_loss; + } } -static void ccid3_hc_rx_detect_loss(struct sock *sk) +static int ccid3_hc_rx_detect_loss(struct sock *sk, + struct dccp_rx_hist_entry *packet) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); - u8 win_loss; - const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, - &hcrx->ccid3hcrx_li_hist, - &win_loss); + struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist); + u64 seqno = packet->dccphrx_seqno; + u64 tmp_seqno; + int loss = 0; + u8 ccval; - ccid3_hc_rx_update_li(sk, seq_loss, win_loss); + + tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; + + if (!rx_hist || + follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) { + hcrx->ccid3hcrx_seqno_nonloss = seqno; + hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval; + goto detect_out; + } + + + while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno) + > TFRC_RECV_NUM_LATE_LOSS) { + loss = 1; + ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss, + hcrx->ccid3hcrx_ccval_nonloss); + tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; + dccp_inc_seqno(&tmp_seqno); + hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; + dccp_inc_seqno(&tmp_seqno); + while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist, + tmp_seqno, &ccval)) { + hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; + hcrx->ccid3hcrx_ccval_nonloss = ccval; + dccp_inc_seqno(&tmp_seqno); + } + } + + /* FIXME - this code could be simplified with above while */ + /* but works at moment */ + if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) { + hcrx->ccid3hcrx_seqno_nonloss = seqno; + hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval; + } + +detect_out: + dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, + &hcrx->ccid3hcrx_li_hist, packet, + hcrx->ccid3hcrx_seqno_nonloss); + return loss; } static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) @@ -916,8 +1004,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) struct dccp_rx_hist_entry *packet; struct timeval now; u8 win_count; - u32 p_prev, r_sample, t_elapsed; - int ins; + u32 p_prev, rtt_prev, r_sample, t_elapsed; + int loss; BUG_ON(hcrx == NULL || !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || @@ -932,7 +1020,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case DCCP_PKT_DATAACK: if (opt_recv->dccpor_timestamp_echo == 0) break; - p_prev = hcrx->ccid3hcrx_rtt; + rtt_prev = hcrx->ccid3hcrx_rtt; dccp_timestamp(sk, &now); timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); r_sample = timeval_usecs(&now); @@ -951,8 +1039,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + r_sample / 10; - if (p_prev != hcrx->ccid3hcrx_rtt) - ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", + if (rtt_prev != hcrx->ccid3hcrx_rtt) + ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n", dccp_role(sk), hcrx->ccid3hcrx_rtt, opt_recv->dccpor_elapsed_time); break; @@ -973,8 +1061,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) win_count = packet->dccphrx_ccval; - ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, - &hcrx->ccid3hcrx_li_hist, packet); + loss = ccid3_hc_rx_detect_loss(sk, packet); if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) return; @@ -991,7 +1078,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case TFRC_RSTATE_DATA: hcrx->ccid3hcrx_bytes_recv += skb->len - dccp_hdr(skb)->dccph_doff * 4; - if (ins != 0) + if (loss) break; dccp_timestamp(sk, &now); @@ -1012,7 +1099,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", dccp_role(sk), sk, dccp_state_name(sk->sk_state)); - ccid3_hc_rx_detect_loss(sk); p_prev = hcrx->ccid3hcrx_p; /* Calculate loss event rate */ @@ -1022,6 +1108,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) /* Scaling up by 1000000 as fixed decimal */ if (i_mean != 0) hcrx->ccid3hcrx_p = 1000000 / i_mean; + } else { + printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__); + dump_stack(); } if (hcrx->ccid3hcrx_p > p_prev) { @@ -1230,7 +1319,7 @@ static __exit void ccid3_module_exit(void) } module_exit(ccid3_module_exit); -MODULE_AUTHOR("Ian McDonald , " +MODULE_AUTHOR("Ian McDonald , " "Arnaldo Carvalho de Melo "); MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); MODULE_LICENSE("GPL"); diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index 5ade4f668b22..0a2cb7536d26 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h @@ -1,13 +1,13 @@ /* * net/dccp/ccids/ccid3.h * - * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz + * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -120,9 +120,10 @@ struct ccid3_hc_rx_sock { #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p - u64 ccid3hcrx_seqno_last_counter:48, + u64 ccid3hcrx_seqno_nonloss:48, + ccid3hcrx_ccval_nonloss:4, ccid3hcrx_state:8, - ccid3hcrx_last_counter:4; + ccid3hcrx_ccval_last_counter:4; u32 ccid3hcrx_bytes_recv; struct timeval ccid3hcrx_tstamp_last_feedback; struct timeval ccid3hcrx_tstamp_last_ack; diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 5d7b7d864385..906c81ab9d4f 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c @@ -2,7 +2,7 @@ * net/dccp/ccids/lib/loss_interval.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005-6 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * * This program is free software; you can redistribute it and/or modify @@ -12,6 +12,7 @@ */ #include +#include #include "loss_interval.h" @@ -90,13 +91,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) u32 w_tot = 0; list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { - if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { + if (li_entry->dccplih_interval != ~0) { i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; w_tot += dccp_li_hist_w[i]; + if (i != 0) + i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1]; } - if (i != 0) - i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1]; if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) break; @@ -107,37 +108,36 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) i_tot = max(i_tot0, i_tot1); - /* FIXME: Why do we do this? -Ian McDonald */ - if (i_tot * 4 < w_tot) - i_tot = w_tot * 4; + if (!w_tot) { + LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__); + return 1; + } - return i_tot * 4 / w_tot; + return i_tot / w_tot; } EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); -struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, - struct list_head *list, - const u64 seq_loss, - const u8 win_loss) +int dccp_li_hist_interval_new(struct dccp_li_hist *hist, + struct list_head *list, const u64 seq_loss, const u8 win_loss) { - struct dccp_li_hist_entry *tail = NULL, *entry; + struct dccp_li_hist_entry *entry; int i; - for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { + for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); if (entry == NULL) { dccp_li_hist_purge(hist, list); - return NULL; + dump_stack(); + return 0; } - if (tail == NULL) - tail = entry; + entry->dccplih_interval = ~0; list_add(&entry->dccplih_node, list); } entry->dccplih_seqno = seq_loss; entry->dccplih_win_count = win_loss; - return tail; + return 1; } EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 43bf78269d1d..0ae85f0340b2 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -4,7 +4,7 @@ * net/dccp/ccids/lib/loss_interval.h * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * * This program is free software; you can redistribute it and/or modify it @@ -52,9 +52,6 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist, extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); -extern struct dccp_li_hist_entry * - dccp_li_hist_interval_new(struct dccp_li_hist *hist, - struct list_head *list, - const u64 seq_loss, - const u8 win_loss); +extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist, + struct list_head *list, const u64 seq_loss, const u8 win_loss); #endif /* _DCCP_LI_HIST_ */ diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index ad98d6a322eb..b876c9c81c65 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c @@ -1,13 +1,13 @@ /* - * net/dccp/packet_history.h + * net/dccp/packet_history.c * - * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz + * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -112,64 +112,27 @@ struct dccp_rx_hist_entry * EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); -int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, +void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, struct list_head *rx_list, struct list_head *li_list, - struct dccp_rx_hist_entry *packet) + struct dccp_rx_hist_entry *packet, + u64 nonloss_seqno) { - struct dccp_rx_hist_entry *entry, *next, *iter; + struct dccp_rx_hist_entry *entry, *next; u8 num_later = 0; - iter = dccp_rx_hist_head(rx_list); - if (iter == NULL) - dccp_rx_hist_add_entry(rx_list, packet); - else { - const u64 seqno = packet->dccphrx_seqno; + list_add(&packet->dccphrx_node, rx_list); - if (after48(seqno, iter->dccphrx_seqno)) - dccp_rx_hist_add_entry(rx_list, packet); - else { - if (dccp_rx_hist_entry_data_packet(iter)) - num_later = 1; - - list_for_each_entry_continue(iter, rx_list, - dccphrx_node) { - if (after48(seqno, iter->dccphrx_seqno)) { - dccp_rx_hist_add_entry(&iter->dccphrx_node, - packet); - goto trim_history; - } - - if (dccp_rx_hist_entry_data_packet(iter)) - num_later++; - - if (num_later == TFRC_RECV_NUM_LATE_LOSS) { - dccp_rx_hist_entry_delete(hist, packet); - return 1; - } - } - - if (num_later < TFRC_RECV_NUM_LATE_LOSS) - dccp_rx_hist_add_entry(rx_list, packet); - /* - * FIXME: else what? should we destroy the packet - * like above? - */ - } - } - -trim_history: - /* - * Trim history (remove all packets after the NUM_LATE_LOSS + 1 - * data packets) - */ num_later = TFRC_RECV_NUM_LATE_LOSS + 1; if (!list_empty(li_list)) { list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { if (num_later == 0) { - list_del_init(&entry->dccphrx_node); - dccp_rx_hist_entry_delete(hist, entry); + if (after48(nonloss_seqno, + entry->dccphrx_seqno)) { + list_del_init(&entry->dccphrx_node); + dccp_rx_hist_entry_delete(hist, entry); + } } else if (dccp_rx_hist_entry_data_packet(entry)) --num_later; } @@ -217,94 +180,10 @@ trim_history: --num_later; } } - - return 0; } EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); -u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, - struct list_head *li_list, u8 *win_loss) -{ - struct dccp_rx_hist_entry *entry, *next, *packet; - struct dccp_rx_hist_entry *a_loss = NULL; - struct dccp_rx_hist_entry *b_loss = NULL; - u64 seq_loss = DCCP_MAX_SEQNO + 1; - u8 num_later = TFRC_RECV_NUM_LATE_LOSS; - - list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { - if (num_later == 0) { - b_loss = entry; - break; - } else if (dccp_rx_hist_entry_data_packet(entry)) - --num_later; - } - - if (b_loss == NULL) - goto out; - - num_later = 1; - list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) { - if (num_later == 0) { - a_loss = entry; - break; - } else if (dccp_rx_hist_entry_data_packet(entry)) - --num_later; - } - - if (a_loss == NULL) { - if (list_empty(li_list)) { - /* no loss event have occured yet */ - LIMIT_NETDEBUG("%s: TODO: find a lost data packet by " - "comparing to initial seqno\n", - __FUNCTION__); - goto out; - } else { - LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!", - __FUNCTION__); - goto out; - } - } - - /* Locate a lost data packet */ - entry = packet = b_loss; - list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) { - u64 delta = dccp_delta_seqno(entry->dccphrx_seqno, - packet->dccphrx_seqno); - - if (delta != 0) { - if (dccp_rx_hist_entry_data_packet(packet)) - --delta; - /* - * FIXME: check this, probably this % usage is because - * in earlier drafts the ndp count was just 8 bits - * long, but now it cam be up to 24 bits long. - */ -#if 0 - if (delta % DCCP_NDP_LIMIT != - (packet->dccphrx_ndp - - entry->dccphrx_ndp) % DCCP_NDP_LIMIT) -#endif - if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) { - seq_loss = entry->dccphrx_seqno; - dccp_inc_seqno(&seq_loss); - } - } - packet = entry; - if (packet == a_loss) - break; - } -out: - if (seq_loss != DCCP_MAX_SEQNO + 1) - *win_loss = a_loss->dccphrx_ccval; - else - *win_loss = 0; /* Paranoia */ - - return seq_loss; -} - -EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss); - struct dccp_tx_hist *dccp_tx_hist_new(const char *name) { struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); @@ -365,6 +244,25 @@ struct dccp_tx_hist_entry * EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); +int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, + u8 *ccval) +{ + struct dccp_rx_hist_entry *packet = NULL, *entry; + + list_for_each_entry(entry, list, dccphrx_node) + if (entry->dccphrx_seqno == seq) { + packet = entry; + break; + } + + if (packet) + *ccval = packet->dccphrx_ccval; + + return packet != NULL; +} + +EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry); + void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, struct list_head *list, struct dccp_tx_hist_entry *packet) @@ -391,7 +289,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list) EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); -MODULE_AUTHOR("Ian McDonald , " +MODULE_AUTHOR("Ian McDonald , " "Arnaldo Carvalho de Melo "); MODULE_DESCRIPTION("DCCP TFRC library"); MODULE_LICENSE("GPL"); diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 673c209e4e85..067cf1c85a37 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -1,13 +1,13 @@ /* * net/dccp/packet_history.h * - * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz + * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -106,6 +106,8 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist, extern struct dccp_tx_hist_entry * dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq); +extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, + u8 *ccval); static inline void dccp_tx_hist_add_entry(struct list_head *list, struct dccp_tx_hist_entry *entry) @@ -164,12 +166,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist, extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list); -static inline void dccp_rx_hist_add_entry(struct list_head *list, - struct dccp_rx_hist_entry *entry) -{ - list_add(&entry->dccphrx_node, list); -} - static inline struct dccp_rx_hist_entry * dccp_rx_hist_head(struct list_head *list) { @@ -188,10 +184,11 @@ static inline int entry->dccphrx_type == DCCP_PKT_DATAACK; } -extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, +extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, struct list_head *rx_list, struct list_head *li_list, - struct dccp_rx_hist_entry *packet); + struct dccp_rx_hist_entry *packet, + u64 nonloss_seqno); extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, struct list_head *li_list, u8 *win_loss); diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index 130c4c40cfe3..45f30f59ea2a 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h @@ -4,7 +4,7 @@ * net/dccp/ccids/lib/tfrc.h * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index 4fd2ebebf5a0..44076e0c6591 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c @@ -2,7 +2,7 @@ * net/dccp/ccids/lib/tfrc_equation.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * Copyright (c) 2005 Arnaldo Carvalho de Melo * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index d00a2f4ee5dd..a5c5475724c0 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -5,7 +5,7 @@ * * An implementation of the DCCP protocol * Copyright (c) 2005 Arnaldo Carvalho de Melo - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005-6 Ian McDonald * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as @@ -81,6 +81,14 @@ static inline u64 max48(const u64 seq1, const u64 seq2) return after48(seq1, seq2) ? seq1 : seq2; } +/* is seq1 next seqno after seq2 */ +static inline int follows48(const u64 seq1, const u64 seq2) +{ + int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF); + + return diff==1; +} + enum { DCCP_MIB_NUM = 0, DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 9f3d4d7cd0bf..610c722ac27f 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -230,7 +230,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, ipv6_addr_copy(&np->saddr, saddr); inet->rcv_saddr = LOOPBACK4_IPV6; - ip6_dst_store(sk, dst, NULL); + __ip6_dst_store(sk, dst, NULL); icsk->icsk_ext_hdr_len = 0; if (np->opt != NULL) @@ -863,7 +863,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, * comment in that function for the gory details. -acme */ - ip6_dst_store(newsk, dst, NULL); + __ip6_dst_store(newsk, dst, NULL); newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | NETIF_F_TSO); newdp6 = (struct dccp6_sock *)newsk; diff --git a/net/dccp/options.c b/net/dccp/options.c index daf72bb671f0..07a34696ac97 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -4,7 +4,7 @@ * An implementation of the DCCP protocol * Copyright (c) 2005 Aristeu Sergio Rozanski Filho * Copyright (c) 2005 Arnaldo Carvalho de Melo - * Copyright (c) 2005 Ian McDonald + * Copyright (c) 2005 Ian McDonald * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 1355614ec11b..743e9fcf7c5a 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -925,8 +925,13 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { if (!dev_out->dn_ptr) continue; - if (dn_dev_islocal(dev_out, oldflp->fld_src)) - break; + if (!dn_dev_islocal(dev_out, oldflp->fld_src)) + continue; + if ((dev_out->flags & IFF_LOOPBACK) && + oldflp->fld_dst && + !dn_dev_islocal(dev_out, oldflp->fld_dst)) + continue; + break; } read_unlock(&dev_base_lock); if (dev_out == NULL) diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig index dbb08528ddf5..f7e84e9d13ad 100644 --- a/net/ieee80211/Kconfig +++ b/net/ieee80211/Kconfig @@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP depends on IEEE80211 && NET_RADIO select CRYPTO select CRYPTO_MICHAEL_MIC + select CRC32 ---help--- Include software based cipher suites in support of IEEE 802.11i (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index ebc33ca6e692..4cef39e171d0 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data) kfree(auth); } +/* Sends a response to an auth challenge (for shared key auth). */ +static void +ieee80211softmac_auth_challenge_response(void *_aq) +{ + struct ieee80211softmac_auth_queue_item *aq = _aq; + + /* Send our response */ + ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); +} + /* Handle the auth response from the AP * This should be registered with ieee80211 as handle_auth */ @@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: /* Check to make sure we have a challenge IE */ data = (u8 *)auth->info_element; - if(*data++ != MFIE_TYPE_CHALLENGE){ + if (*data++ != MFIE_TYPE_CHALLENGE) { printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); break; } /* Save the challenge */ spin_lock_irqsave(&mac->lock, flags); net->challenge_len = *data++; - if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) + if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; - if(net->challenge != NULL) + if (net->challenge != NULL) kfree(net->challenge); net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); memcpy(net->challenge, data, net->challenge_len); aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; - spin_unlock_irqrestore(&mac->lock, flags); - /* Send our response */ - ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); + /* We reuse the work struct from the auth request here. + * It is safe to do so as each one is per-request, and + * at this point (dealing with authentication response) + * we have obviously already sent the initial auth + * request. */ + cancel_delayed_work(&aq->work); + INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); + schedule_work(&aq->work); + spin_unlock_irqrestore(&mac->lock, flags); return 0; case IEEE80211SOFTMAC_AUTH_SHARED_PASS: kfree(net->challenge); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 9be53a8e72c3..51738000f3dc 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi) void fib_release_info(struct fib_info *fi) { - write_lock(&fib_info_lock); + write_lock_bh(&fib_info_lock); if (fi && --fi->fib_treeref == 0) { hlist_del(&fi->fib_hash); if (fi->fib_prefsrc) @@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi) fi->fib_dead = 1; fib_info_put(fi); } - write_unlock(&fib_info_lock); + write_unlock_bh(&fib_info_lock); } static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) @@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash, unsigned int old_size = fib_hash_size; unsigned int i, bytes; - write_lock(&fib_info_lock); + write_lock_bh(&fib_info_lock); old_info_hash = fib_info_hash; old_laddrhash = fib_info_laddrhash; fib_hash_size = new_size; @@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash, } fib_info_laddrhash = new_laddrhash; - write_unlock(&fib_info_lock); + write_unlock_bh(&fib_info_lock); bytes = old_size * sizeof(struct hlist_head *); fib_hash_free(old_info_hash, bytes); @@ -820,7 +820,7 @@ link_it: fi->fib_treeref++; atomic_inc(&fi->fib_clntref); - write_lock(&fib_info_lock); + write_lock_bh(&fib_info_lock); hlist_add_head(&fi->fib_hash, &fib_info_hash[fib_info_hashfn(fi)]); if (fi->fib_prefsrc) { @@ -839,7 +839,7 @@ link_it: head = &fib_info_devhash[hash]; hlist_add_head(&nh->nh_hash, head); } endfor_nexthops(fi) - write_unlock(&fib_info_lock); + write_unlock_bh(&fib_info_lock); return fi; err_inval: diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9f4b752f5a33..8e8117c19e4d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) struct in_device *in_dev; u32 group = imr->imr_multiaddr.s_addr; u32 ifindex; + int ret = -EADDRNOTAVAIL; rtnl_lock(); in_dev = ip_mc_find_dev(imr); - if (!in_dev) { - rtnl_unlock(); - return -ENODEV; - } ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { - if (iml->multi.imr_multiaddr.s_addr == group && - iml->multi.imr_ifindex == ifindex) { - (void) ip_mc_leave_src(sk, iml, in_dev); + if (iml->multi.imr_multiaddr.s_addr != group) + continue; + if (ifindex) { + if (iml->multi.imr_ifindex != ifindex) + continue; + } else if (imr->imr_address.s_addr && imr->imr_address.s_addr != + iml->multi.imr_address.s_addr) + continue; - *imlp = iml->next; + (void) ip_mc_leave_src(sk, iml, in_dev); + *imlp = iml->next; + + if (in_dev) ip_mc_dec_group(in_dev, group); - rtnl_unlock(); - sock_kfree_s(sk, iml, sizeof(*iml)); - return 0; - } + rtnl_unlock(); + sock_kfree_s(sk, iml, sizeof(*iml)); + return 0; } + if (!in_dev) + ret = -ENODEV; rtnl_unlock(); - return -EADDRNOTAVAIL; + return ret; } int ip_mc_source(int add, int omode, struct sock *sk, struct @@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk) struct in_device *in_dev; inet->mc_list = iml->next; - if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { - (void) ip_mc_leave_src(sk, iml, in_dev); + in_dev = inetdev_by_index(iml->multi.imr_ifindex); + (void) ip_mc_leave_src(sk, iml, in_dev); + if (in_dev != NULL) { ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); in_dev_put(in_dev); } sock_kfree_s(sk, iml, sizeof(*iml)); - } rtnl_unlock(); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 7c9f9a6421b8..4c20f5546893 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -526,6 +526,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) err = output(skb); + if (!err) + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; @@ -649,9 +651,6 @@ slow_path: /* * Put this fragment into the sending queue. */ - - IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); - iph->tot_len = htons(len + hlen); ip_send_check(iph); @@ -659,6 +658,8 @@ slow_path: err = output(skb2); if (err) goto fail; + + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); } kfree_skb(skb); IP_INC_STATS(IPSTATS_MIB_FRAGOKS); @@ -946,7 +947,7 @@ alloc_new_skb: skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; - skb_trim(skb_prev, maxfraglen); + pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; @@ -1141,7 +1142,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, data, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); - skb_trim(skb_prev, maxfraglen); + pskb_trim_unique(skb_prev, maxfraglen); } /* diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 84f43a3c9098..2d05c4133d3e 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -112,14 +112,19 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) { char *secdata; - u32 seclen; + u32 seclen, secid; int err; - err = security_socket_getpeersec_dgram(skb, &secdata, &seclen); + err = security_socket_getpeersec_dgram(NULL, skb, &secid); + if (err) + return; + + err = security_secid_to_secctx(secid, &secdata, &seclen); if (err) return; put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); + security_release_secctx(secdata, seclen); } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 80c73ca90116..8d1d7a6e72a5 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, struct arpt_entry *e, *back; const char *indev, *outdev; void *table_base; - struct xt_table_info *private = table->private; + struct xt_table_info *private; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + @@ -248,6 +248,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, outdev = out ? out->name : nulldevname; read_lock_bh(&table->lock); + private = table->private; table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); @@ -1170,21 +1171,34 @@ static int __init arp_tables_init(void) { int ret; - xt_proto_init(NF_ARP); + ret = xt_proto_init(NF_ARP); + if (ret < 0) + goto err1; /* Noone else will be downing sem now, so we won't sleep */ - xt_register_target(&arpt_standard_target); - xt_register_target(&arpt_error_target); + ret = xt_register_target(&arpt_standard_target); + if (ret < 0) + goto err2; + ret = xt_register_target(&arpt_error_target); + if (ret < 0) + goto err3; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); - if (ret < 0) { - duprintf("Unable to register sockopts.\n"); - return ret; - } + if (ret < 0) + goto err4; printk("arp_tables: (C) 2002 David S. Miller\n"); return 0; + +err4: + xt_unregister_target(&arpt_error_target); +err3: + xt_unregister_target(&arpt_standard_target); +err2: + xt_proto_fini(NF_ARP); +err1: + return ret; } static void __exit arp_tables_fini(void) diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index 33891bb1fde4..0d4cc92391fa 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c @@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0], *id); read_lock_bh(&ip_conntrack_lock); + last = (struct ip_conntrack *)cb->args[1]; for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { restart: - last = (struct ip_conntrack *)cb->args[1]; list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { h = (struct ip_conntrack_tuple_hash *) i; if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = tuplehash_to_ctrack(h); - if (last != NULL) { - if (ct == last) { - ip_conntrack_put(last); - cb->args[1] = 0; - last = NULL; - } else + if (cb->args[1]) { + if (ct != last) continue; + cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -440,17 +437,17 @@ restart: goto out; } } - if (last != NULL) { - ip_conntrack_put(last); + if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: read_unlock_bh(&ip_conntrack_lock); + if (last) + ip_conntrack_put(last); DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); - return skb->len; } diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c index fc87ce0da40d..4f222d6be009 100644 --- a/net/ipv4/netfilter/ip_conntrack_sip.c +++ b/net/ipv4/netfilter/ip_conntrack_sip.c @@ -442,7 +442,7 @@ static int __init init(void) sip[i].tuple.src.u.udp.port = htons(ports[i]); sip[i].mask.src.u.udp.port = 0xFFFF; sip[i].mask.dst.protonum = 0xFF; - sip[i].max_expected = 1; + sip[i].max_expected = 2; sip[i].timeout = 3 * 60; /* 3 minutes */ sip[i].me = THIS_MODULE; sip[i].help = sip_help; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index fc5bdd5eb7d3..048514f15f2f 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb, const char *indev, *outdev; void *table_base; struct ipt_entry *e, *back; - struct xt_table_info *private = table->private; + struct xt_table_info *private; /* Initialization */ ip = (*pskb)->nh.iph; @@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb, read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); + private = table->private; table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); @@ -2239,22 +2240,39 @@ static int __init ip_tables_init(void) { int ret; - xt_proto_init(AF_INET); + ret = xt_proto_init(AF_INET); + if (ret < 0) + goto err1; /* Noone else will be downing sem now, so we won't sleep */ - xt_register_target(&ipt_standard_target); - xt_register_target(&ipt_error_target); - xt_register_match(&icmp_matchstruct); + ret = xt_register_target(&ipt_standard_target); + if (ret < 0) + goto err2; + ret = xt_register_target(&ipt_error_target); + if (ret < 0) + goto err3; + ret = xt_register_match(&icmp_matchstruct); + if (ret < 0) + goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ipt_sockopts); - if (ret < 0) { - duprintf("Unable to register sockopts.\n"); - return ret; - } + if (ret < 0) + goto err5; printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); return 0; + +err5: + xt_unregister_match(&icmp_matchstruct); +err4: + xt_unregister_target(&ipt_error_target); +err3: + xt_unregister_target(&ipt_standard_target); +err2: + xt_proto_fini(AF_INET); +err1: + return ret; } static void __exit ip_tables_fini(void) diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index d7dd7fe7051c..d46fd677fa11 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -115,6 +115,11 @@ static void ulog_send(unsigned int nlgroupnum) del_timer(&ub->timer); } + if (!ub->skb) { + DEBUGP("ipt_ULOG: ulog_send: nothing to send\n"); + return; + } + /* last nlmsg needs NLMSG_DONE */ if (ub->qlen > 1) ub->lastnlh->nlmsg_type = NLMSG_DONE; diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c index 92980ab8ce48..3bd2368e1fc9 100644 --- a/net/ipv4/netfilter/ipt_hashlimit.c +++ b/net/ipv4/netfilter/ipt_hashlimit.c @@ -454,15 +454,12 @@ hashlimit_match(const struct sk_buff *skb, dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); dh->rateinfo.cost = user2credits(hinfo->cfg.avg); - - spin_unlock_bh(&hinfo->lock); - return 1; + } else { + /* update expiration timeout */ + dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); + rateinfo_recalc(dh, now); } - /* update expiration timeout */ - dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); - - rateinfo_recalc(dh, now); if (dh->rateinfo.credit >= dh->rateinfo.cost) { /* We're underlimit. */ dh->rateinfo.credit -= dh->rateinfo.cost; @@ -508,6 +505,9 @@ hashlimit_checkentry(const char *tablename, if (!r->cfg.expire) return 0; + if (r->name[sizeof(r->name) - 1] != '\0') + return 0; + /* This is the best we've got: We cannot release and re-grab lock, * since checkentry() is called before ip_tables.c grabs ipt_mutex. * We also cannot grab the hashtable spinlock, since htable_create will diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2dc6dbb28467..b873cbcdd0b8 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -104,6 +104,7 @@ #include #include #include +#include #ifdef CONFIG_SYSCTL #include #endif @@ -1125,6 +1126,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, struct rtable *rth, **rthp; u32 skeys[2] = { saddr, 0 }; int ikeys[2] = { dev->ifindex, 0 }; + struct netevent_redirect netevent; if (!in_dev) return; @@ -1216,6 +1218,11 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, rt_drop(rt); goto do_next; } + + netevent.old = &rth->u.dst; + netevent.new = &rt->u.dst; + call_netevent_notifiers(NETEVENT_REDIRECT, + &netevent); rt_del(hash, rth); if (!rt_intern_hash(hash, rt, &rt)) @@ -1452,6 +1459,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) } dst->metrics[RTAX_MTU-1] = mtu; dst_set_expires(dst, ip_rt_mtu_expires); + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); } } @@ -3149,7 +3157,7 @@ int __init ip_rt_init(void) rhash_entries, (num_physpages >= 128 * 1024) ? 15 : 17, - HASH_HIGHMEM, + 0, &rt_hash_log, &rt_hash_mask, 0); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f6a2d9223d07..934396bb1376 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1132,7 +1132,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, tp->ucopy.dma_chan = NULL; preempt_disable(); if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && - !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { + !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); } else @@ -1659,7 +1659,8 @@ adjudge_to_death: const int tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { - inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); + inet_csk_reset_keepalive_timer(sk, + tmo - TCP_TIMEWAIT_LEN); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 738dad9f7d49..104af5d5bcbc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3541,7 +3541,8 @@ void tcp_cwnd_application_limited(struct sock *sk) if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { /* Limited by application or receiver window. */ - u32 win_used = max(tp->snd_cwnd_used, 2U); + u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); + u32 win_used = max(tp->snd_cwnd_used, init_win); if (win_used < tp->snd_cwnd) { tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f6f39e814291..4b04c3edd4a9 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -438,7 +438,6 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) It can f.e. if SYNs crossed. */ if (!sock_owned_by_user(sk)) { - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); @@ -874,7 +873,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) drop_and_free: reqsk_free(req); drop: - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); return 0; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0ccb7cb22b15..624e2b2c7f53 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -589,8 +589,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, /* RFC793: "second check the RST bit" and * "fourth, check the SYN bit" */ - if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) + if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); goto embryonic_reset; + } /* ACK sequence verified above, just make sure ACK is * set. If ACK not set, just silently drop the packet. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5c08ea20a18d..b4f3ffe1b3b4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss, * See RFC1323 for an explanation of the limit to 14 */ space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); + space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; @@ -466,7 +467,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (skb->len != tcp_header_size) tcp_event_data_sent(tp, skb, sk); - TCP_INC_STATS(TCP_MIB_OUTSEGS); + if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) + TCP_INC_STATS(TCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); if (likely(err <= 0)) @@ -2157,10 +2159,9 @@ int tcp_connect(struct sock *sk) skb_shinfo(buff)->gso_size = 0; skb_shinfo(buff)->gso_type = 0; buff->csum = 0; + tp->snd_nxt = tp->write_seq; TCP_SKB_CB(buff)->seq = tp->write_seq++; TCP_SKB_CB(buff)->end_seq = tp->write_seq; - tp->snd_nxt = tp->write_seq; - tp->pushed_seq = tp->write_seq; /* Send it off. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; @@ -2170,6 +2171,12 @@ int tcp_connect(struct sock *sk) sk_charge_skb(sk, buff); tp->packets_out += tcp_skb_pcount(buff); tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); + + /* We change tp->snd_nxt after the tcp_transmit_skb() call + * in order to make this packet get counted in tcpOutSegs. + */ + tp->snd_nxt = tp->write_seq; + tp->pushed_seq = tp->write_seq; TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); /* Timer for repeating the SYN until an answer. */ diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index d7d517a3a238..dab37d2f65fc 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -114,7 +114,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file) static ssize_t tcpprobe_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { - int error = 0, cnt; + int error = 0, cnt = 0; unsigned char *tbuf; if (!buf || len < 0) @@ -130,11 +130,12 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, error = wait_event_interruptible(tcpw.wait, __kfifo_len(tcpw.fifo) != 0); if (error) - return error; + goto out_free; cnt = kfifo_get(tcpw.fifo, tbuf, len); error = copy_to_user(buf, tbuf, cnt); +out_free: vfree(tbuf); return error ? error : cnt; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2316a4315a18..0c5042e7380d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1869,15 +1869,21 @@ err_exit: /* * Manual configuration of address on an interface */ -static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) +static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, + __u32 prefered_lft, __u32 valid_lft) { struct inet6_ifaddr *ifp; struct inet6_dev *idev; struct net_device *dev; + __u8 ifa_flags = 0; int scope; ASSERT_RTNL(); + /* check the lifetime */ + if (!valid_lft || prefered_lft > valid_lft) + return -EINVAL; + if ((dev = __dev_get_by_index(ifindex)) == NULL) return -ENODEV; @@ -1889,10 +1895,29 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) scope = ipv6_addr_scope(pfx); - ifp = ipv6_add_addr(idev, pfx, plen, scope, IFA_F_PERMANENT); + if (valid_lft == INFINITY_LIFE_TIME) + ifa_flags |= IFA_F_PERMANENT; + else if (valid_lft >= 0x7FFFFFFF/HZ) + valid_lft = 0x7FFFFFFF/HZ; + + if (prefered_lft == 0) + ifa_flags |= IFA_F_DEPRECATED; + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && + (prefered_lft != INFINITY_LIFE_TIME)) + prefered_lft = 0x7FFFFFFF/HZ; + + ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); + if (!IS_ERR(ifp)) { + spin_lock_bh(&ifp->lock); + ifp->valid_lft = valid_lft; + ifp->prefered_lft = prefered_lft; + ifp->tstamp = jiffies; + spin_unlock_bh(&ifp->lock); + addrconf_dad_start(ifp, 0); in6_ifa_put(ifp); + addrconf_verify(0); return 0; } @@ -1945,7 +1970,8 @@ int addrconf_add_ifaddr(void __user *arg) return -EFAULT; rtnl_lock(); - err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); + err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); rtnl_unlock(); return err; } @@ -2771,12 +2797,16 @@ restart: ifp->idev->nd_parms->retrans_time / HZ; #endif - if (age >= ifp->valid_lft) { + if (ifp->valid_lft != INFINITY_LIFE_TIME && + age >= ifp->valid_lft) { spin_unlock(&ifp->lock); in6_ifa_hold(ifp); read_unlock(&addrconf_hash_lock); ipv6_del_addr(ifp); goto restart; + } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { + spin_unlock(&ifp->lock); + continue; } else if (age >= ifp->prefered_lft) { /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ int deprecate = 0; @@ -2853,7 +2883,8 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) pfx = RTA_DATA(rta[IFA_ADDRESS-1]); } if (rta[IFA_LOCAL-1]) { - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) return -EINVAL; pfx = RTA_DATA(rta[IFA_LOCAL-1]); } @@ -2863,12 +2894,62 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return inet6_addr_del(ifm->ifa_index, pfx, ifm->ifa_prefixlen); } +static int +inet6_addr_modify(int ifindex, struct in6_addr *pfx, + __u32 prefered_lft, __u32 valid_lft) +{ + struct inet6_ifaddr *ifp = NULL; + struct net_device *dev; + int ifa_flags = 0; + + if ((dev = __dev_get_by_index(ifindex)) == NULL) + return -ENODEV; + + if (!(dev->flags&IFF_UP)) + return -ENETDOWN; + + if (!valid_lft || (prefered_lft > valid_lft)) + return -EINVAL; + + ifp = ipv6_get_ifaddr(pfx, dev, 1); + if (ifp == NULL) + return -ENOENT; + + if (valid_lft == INFINITY_LIFE_TIME) + ifa_flags = IFA_F_PERMANENT; + else if (valid_lft >= 0x7FFFFFFF/HZ) + valid_lft = 0x7FFFFFFF/HZ; + + if (prefered_lft == 0) + ifa_flags = IFA_F_DEPRECATED; + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && + (prefered_lft != INFINITY_LIFE_TIME)) + prefered_lft = 0x7FFFFFFF/HZ; + + spin_lock_bh(&ifp->lock); + ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED|IFA_F_PERMANENT)) | ifa_flags; + + ifp->tstamp = jiffies; + ifp->valid_lft = valid_lft; + ifp->prefered_lft = prefered_lft; + + spin_unlock_bh(&ifp->lock); + if (!(ifp->flags&IFA_F_TENTATIVE)) + ipv6_ifa_notify(0, ifp); + in6_ifa_put(ifp); + + addrconf_verify(0); + + return 0; +} + static int inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct rtattr **rta = arg; struct ifaddrmsg *ifm = NLMSG_DATA(nlh); struct in6_addr *pfx; + __u32 valid_lft = INFINITY_LIFE_TIME, prefered_lft = INFINITY_LIFE_TIME; pfx = NULL; if (rta[IFA_ADDRESS-1]) { @@ -2877,14 +2958,34 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) pfx = RTA_DATA(rta[IFA_ADDRESS-1]); } if (rta[IFA_LOCAL-1]) { - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) return -EINVAL; pfx = RTA_DATA(rta[IFA_LOCAL-1]); } if (pfx == NULL) return -EINVAL; - return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen); + if (rta[IFA_CACHEINFO-1]) { + struct ifa_cacheinfo *ci; + if (RTA_PAYLOAD(rta[IFA_CACHEINFO-1]) < sizeof(*ci)) + return -EINVAL; + ci = RTA_DATA(rta[IFA_CACHEINFO-1]); + valid_lft = ci->ifa_valid; + prefered_lft = ci->ifa_prefered; + } + + if (nlh->nlmsg_flags & NLM_F_REPLACE) { + int ret; + ret = inet6_addr_modify(ifm->ifa_index, pfx, + prefered_lft, valid_lft); + if (ret == 0 || !(nlh->nlmsg_flags & NLM_F_CREATE)) + return ret; + } + + return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, + prefered_lft, valid_lft); + } /* Maximum length of ifa_cacheinfo attributes */ @@ -3121,6 +3222,62 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) return inet6_dump_addr(skb, cb, type); } +static int inet6_rtm_getaddr(struct sk_buff *in_skb, + struct nlmsghdr* nlh, void *arg) +{ + struct rtattr **rta = arg; + struct ifaddrmsg *ifm = NLMSG_DATA(nlh); + struct in6_addr *addr = NULL; + struct net_device *dev = NULL; + struct inet6_ifaddr *ifa; + struct sk_buff *skb; + int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + INET6_IFADDR_RTA_SPACE); + int err; + + if (rta[IFA_ADDRESS-1]) { + if (RTA_PAYLOAD(rta[IFA_ADDRESS-1]) < sizeof(*addr)) + return -EINVAL; + addr = RTA_DATA(rta[IFA_ADDRESS-1]); + } + if (rta[IFA_LOCAL-1]) { + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*addr) || + (addr && memcmp(addr, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*addr)))) + return -EINVAL; + addr = RTA_DATA(rta[IFA_LOCAL-1]); + } + if (addr == NULL) + return -EINVAL; + + if (ifm->ifa_index) + dev = __dev_get_by_index(ifm->ifa_index); + + if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL) + return -EADDRNOTAVAIL; + + if ((skb = alloc_skb(size, GFP_KERNEL)) == NULL) { + err = -ENOBUFS; + goto out; + } + + NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; + err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, + nlh->nlmsg_seq, RTM_NEWADDR, 0); + if (err < 0) { + err = -EMSGSIZE; + goto out_free; + } + + err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); + if (err > 0) + err = 0; +out: + in6_ifa_put(ifa); + return err; +out_free: + kfree_skb(skb); + goto out; +} + static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) { struct sk_buff *skb; @@ -3363,7 +3520,8 @@ static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = { [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, - [RTM_GETADDR - RTM_BASE] = { .dumpit = inet6_dump_ifaddr, }, + [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr, + .dumpit = inet6_dump_ifaddr, }, [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, }, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 5a0ba58b86cc..ac85e9c532c2 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -658,7 +658,7 @@ int inet6_sk_rebuild_header(struct sock *sk) return err; } - ip6_dst_store(sk, dst, NULL); + __ip6_dst_store(sk, dst, NULL); } return 0; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 1044b6fce0d5..3d6e9a351150 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -712,6 +712,11 @@ discard_it: return 0; } +/* + * Special lock-class for __icmpv6_socket: + */ +static struct lock_class_key icmpv6_socket_sk_dst_lock_key; + int __init icmpv6_init(struct net_proto_family *ops) { struct sock *sk; @@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops) sk = per_cpu(__icmpv6_socket, i)->sk; sk->sk_allocation = GFP_ATOMIC; + /* + * Split off their lock-class, because sk->sk_dst_lock + * gets used from softirqs, which is safe for + * __icmpv6_socket (because those never get directly used + * via userspace syscalls), but unsafe for normal sockets. + */ + lockdep_set_class(&sk->sk_dst_lock, + &icmpv6_socket_sk_dst_lock_key); /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 5c950cc79d80..bf491077b822 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) return err; } - ip6_dst_store(sk, dst, NULL); + __ip6_dst_store(sk, dst, NULL); } skb->dst = dst_clone(dst); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3bc74ce78800..4fb47a252913 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -356,6 +356,7 @@ int ip6_forward(struct sk_buff *skb) skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); + IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; @@ -595,6 +596,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) } err = output(skb); + if(!err) + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); + if (err || !frag) break; @@ -706,12 +710,11 @@ slow_path: /* * Put this fragment into the sending queue. */ - - IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); - err = output(frag); if (err) goto fail; + + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); } kfree_skb(skb); IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); @@ -723,48 +726,51 @@ fail: return err; } -int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) +static struct dst_entry *ip6_sk_dst_check(struct sock *sk, + struct dst_entry *dst, + struct flowi *fl) { - int err = 0; + struct ipv6_pinfo *np = inet6_sk(sk); + struct rt6_info *rt = (struct rt6_info *)dst; - *dst = NULL; - if (sk) { - struct ipv6_pinfo *np = inet6_sk(sk); - - *dst = sk_dst_check(sk, np->dst_cookie); - if (*dst) { - struct rt6_info *rt = (struct rt6_info*)*dst; - - /* Yes, checking route validity in not connected - * case is not very simple. Take into account, - * that we do not support routing by source, TOS, - * and MSG_DONTROUTE --ANK (980726) - * - * 1. If route was host route, check that - * cached destination is current. - * If it is network route, we still may - * check its validity using saved pointer - * to the last used address: daddr_cache. - * We do not want to save whole address now, - * (because main consumer of this service - * is tcp, which has not this problem), - * so that the last trick works only on connected - * sockets. - * 2. oif also should be the same. - */ - if (((rt->rt6i_dst.plen != 128 || - !ipv6_addr_equal(&fl->fl6_dst, - &rt->rt6i_dst.addr)) - && (np->daddr_cache == NULL || - !ipv6_addr_equal(&fl->fl6_dst, - np->daddr_cache))) - || (fl->oif && fl->oif != (*dst)->dev->ifindex)) { - dst_release(*dst); - *dst = NULL; - } - } + if (!dst) + goto out; + + /* Yes, checking route validity in not connected + * case is not very simple. Take into account, + * that we do not support routing by source, TOS, + * and MSG_DONTROUTE --ANK (980726) + * + * 1. If route was host route, check that + * cached destination is current. + * If it is network route, we still may + * check its validity using saved pointer + * to the last used address: daddr_cache. + * We do not want to save whole address now, + * (because main consumer of this service + * is tcp, which has not this problem), + * so that the last trick works only on connected + * sockets. + * 2. oif also should be the same. + */ + if (((rt->rt6i_dst.plen != 128 || + !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr)) + && (np->daddr_cache == NULL || + !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache))) + || (fl->oif && fl->oif != dst->dev->ifindex)) { + dst_release(dst); + dst = NULL; } +out: + return dst; +} + +static int ip6_dst_lookup_tail(struct sock *sk, + struct dst_entry **dst, struct flowi *fl) +{ + int err; + if (*dst == NULL) *dst = ip6_route_output(sk, fl); @@ -773,7 +779,6 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) if (ipv6_addr_any(&fl->fl6_src)) { err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); - if (err) goto out_err_release; } @@ -786,8 +791,48 @@ out_err_release: return err; } +/** + * ip6_dst_lookup - perform route lookup on flow + * @sk: socket which provides route info + * @dst: pointer to dst_entry * for result + * @fl: flow to lookup + * + * This function performs a route lookup on the given flow. + * + * It returns zero on success, or a standard errno code on error. + */ +int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) +{ + *dst = NULL; + return ip6_dst_lookup_tail(sk, dst, fl); +} EXPORT_SYMBOL_GPL(ip6_dst_lookup); +/** + * ip6_sk_dst_lookup - perform socket cached route lookup on flow + * @sk: socket which provides the dst cache and route info + * @dst: pointer to dst_entry * for result + * @fl: flow to lookup + * + * This function performs a route lookup on the given flow with the + * possibility of using the cached route in the socket if it is valid. + * It will take the socket dst lock when operating on the dst cache. + * As a result, this function can only be used in process context. + * + * It returns zero on success, or a standard errno code on error. + */ +int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) +{ + *dst = NULL; + if (sk) { + *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); + *dst = ip6_sk_dst_check(sk, *dst, fl); + } + + return ip6_dst_lookup_tail(sk, dst, fl); +} +EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup); + static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), @@ -1050,7 +1095,7 @@ alloc_new_skb: skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; - skb_trim(skb_prev, maxfraglen); + pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy < 0) { diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9d697d4dcffc..639eb20c9f1f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { struct inet6_dev *idev = in6_dev_get(dev); + (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) { - (void) ip6_mc_leave_src(sk,mc_lst,idev); __ipv6_dev_mc_dec(idev, &mc_lst->addr); in6_dev_put(idev); } dev_put(dev); - } + } else + (void) ip6_mc_leave_src(sk, mc_lst, NULL); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return 0; } @@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk) if (dev) { struct inet6_dev *idev = in6_dev_get(dev); + (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) { - (void) ip6_mc_leave_src(sk, mc_lst, idev); __ipv6_dev_mc_dec(idev, &mc_lst->addr); in6_dev_put(idev); } dev_put(dev); - } + } else + (void) ip6_mc_leave_src(sk, mc_lst, NULL); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f26898b00347..c9d6b23cd3f7 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1398,23 +1398,39 @@ static int __init ip6_tables_init(void) { int ret; - xt_proto_init(AF_INET6); + ret = xt_proto_init(AF_INET6); + if (ret < 0) + goto err1; /* Noone else will be downing sem now, so we won't sleep */ - xt_register_target(&ip6t_standard_target); - xt_register_target(&ip6t_error_target); - xt_register_match(&icmp6_matchstruct); + ret = xt_register_target(&ip6t_standard_target); + if (ret < 0) + goto err2; + ret = xt_register_target(&ip6t_error_target); + if (ret < 0) + goto err3; + ret = xt_register_match(&icmp6_matchstruct); + if (ret < 0) + goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); - if (ret < 0) { - duprintf("Unable to register sockopts.\n"); - xt_proto_fini(AF_INET6); - return ret; - } + if (ret < 0) + goto err5; printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); return 0; + +err5: + xt_unregister_match(&icmp6_matchstruct); +err4: + xt_unregister_target(&ip6t_error_target); +err3: + xt_unregister_target(&ip6t_standard_target); +err2: + xt_proto_fini(AF_INET6); +err1: + return ret; } static void __exit ip6_tables_fini(void) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 87c39c978cd0..4b163711f3a8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -53,6 +53,7 @@ #include #include #include +#include #include @@ -742,6 +743,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; } dst->metrics[RTAX_MTU-1] = mtu; + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); } } @@ -1155,6 +1157,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr, struct rt6_info *rt, *nrt = NULL; int strict; struct fib6_node *fn; + struct netevent_redirect netevent; /* * Get the "current" route for this destination and @@ -1252,6 +1255,10 @@ restart: if (ip6_ins_rt(nrt, NULL, NULL, NULL)) goto out; + netevent.old = &rt->u.dst; + netevent.new = &nrt->u.dst; + call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); + if (rt->rt6i_flags&RTF_CACHE) { ip6_del_rt(rt, NULL, NULL, NULL); return; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 923989d0520d..802a1a6b1037 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -270,7 +270,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, inet->rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; - ip6_dst_store(sk, dst, NULL); + __ip6_dst_store(sk, dst, NULL); icsk->icsk_ext_hdr_len = 0; if (np->opt) @@ -427,7 +427,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, case TCP_SYN_RECV: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (!sock_owned_by_user(sk)) { - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ @@ -831,7 +830,6 @@ drop: if (req) reqsk_free(req); - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); return 0; /* don't send reset */ } @@ -946,8 +944,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, * comment in that function for the gory details. -acme */ - sk->sk_gso_type = SKB_GSO_TCPV6; - ip6_dst_store(newsk, dst, NULL); + newsk->sk_gso_type = SKB_GSO_TCPV6; + __ip6_dst_store(newsk, dst, NULL); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index ccc57f434cd3..3d54f246411e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -782,7 +782,7 @@ do_udp_sendmsg: connected = 0; } - err = ip6_dst_lookup(sk, &dst, fl); + err = ip6_sk_dst_lookup(sk, &dst, fl); if (err) goto out; if (final_p) diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 0eea60ea9ebc..c8c8b44a0f58 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -125,7 +125,7 @@ static int xfrm6_output_finish(struct sk_buff *skb) if (!skb_is_gso(skb)) return xfrm6_output_finish2(skb); - skb->protocol = htons(ETH_P_IP); + skb->protocol = htons(ETH_P_IPV6); segs = skb_gso_segment(skb, 0); kfree_skb(skb); if (unlikely(IS_ERR(segs))) diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index aa34ff4b707c..bef3f61569f7 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out; - ipx = ipx_hdr(skb); - ipx_pktsize = ntohs(ipx->ipx_pktsize); + if (!pskb_may_pull(skb, sizeof(struct ipxhdr))) + goto drop; + + ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize); /* Too small or invalid header? */ - if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) + if (ipx_pktsize < sizeof(struct ipxhdr) || + !pskb_may_pull(skb, ipx_pktsize)) goto drop; + ipx = ipx_hdr(skb); if (ipx->ipx_checksum != IPX_NO_CHECKSUM && ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) goto drop; diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index d504eed416f6..7e6bc41eeb21 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -238,11 +238,13 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) goto out_put; if (lapb->state == LAPB_STATE_0) { - if (((parms->mode & LAPB_EXTENDED) && - (parms->window < 1 || parms->window > 127)) || - (parms->window < 1 || parms->window > 7)) - goto out_put; - + if (parms->mode & LAPB_EXTENDED) { + if (parms->window < 1 || parms->window > 127) + goto out_put; + } else { + if (parms->window < 1 || parms->window > 7) + goto out_put; + } lapb->mode = parms->mode; lapb->window = parms->window; } diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index d6cfe84d521b..2652ead96c64 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -784,24 +784,20 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, copied += used; len -= used; - if (used + offset < skb->len) - continue; - if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, 0); *seq = 0; } + + /* For non stream protcols we get one packet per recvmsg call */ + if (sk->sk_type != SOCK_STREAM) + goto copy_uaddr; + + /* Partial read */ + if (used + offset < skb->len) + continue; } while (len > 0); - /* - * According to UNIX98, msg_name/msg_namelen are ignored - * on connected socket. -ANK - * But... af_llc still doesn't have separate sets of methods for - * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will - * eventually fix this tho :-) -acme - */ - if (sk->sk_type == SOCK_DGRAM) - goto copy_uaddr; out: release_sock(sk); return copied; diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 20c4eb5c1ac6..61cb8cf7d153 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -51,10 +51,10 @@ void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim) { struct sockaddr_llc *addr; - if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */ - return; /* save primitive for use by the user. */ addr = llc_ui_skb_cb(skb); + + memset(addr, 0, sizeof(*addr)); addr->sllc_family = sk->sk_family; addr->sllc_arphrd = skb->dev->type; addr->sllc_test = prim == LLC_TEST_PRIM; @@ -330,6 +330,9 @@ static void llc_sap_mcast(struct llc_sap *sap, if (llc->laddr.lsap != laddr->lsap) continue; + if (llc->dev != skb->dev) + continue; + skb1 = skb_clone(skb, GFP_ATOMIC); if (!skb1) break; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index af4845971f70..6527d4e048d8 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0], *id); read_lock_bh(&nf_conntrack_lock); + last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: - last = (struct nf_conn *)cb->args[1]; list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { h = (struct nf_conntrack_tuple_hash *) i; if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) @@ -442,13 +442,10 @@ restart: * then dump everything. */ if (l3proto && L3PROTO(ct) != l3proto) continue; - if (last != NULL) { - if (ct == last) { - nf_ct_put(last); - cb->args[1] = 0; - last = NULL; - } else + if (cb->args[1]) { + if (ct != last) continue; + cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -459,17 +456,17 @@ restart: goto out; } } - if (last != NULL) { - nf_ct_put(last); + if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: read_unlock_bh(&nf_conntrack_lock); + if (last) + nf_ct_put(last); DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); - return skb->len; } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 61cdda4e5d3b..b59d3b2bde21 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -366,6 +366,9 @@ __nfulnl_send(struct nfulnl_instance *inst) if (timer_pending(&inst->timer)) del_timer(&inst->timer); + if (!inst->skb) + return 0; + if (inst->qlen > 1) inst->lastnlh->nlmsg_type = NLMSG_DONE; diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index c2ce9c4011cc..de9537ad9a7c 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c @@ -57,6 +57,8 @@ static int checkentry_selinux(struct xt_secmark_target_info *info) { int err; struct xt_secmark_target_selinux_info *sel = &info->u.sel; + + sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; err = selinux_string_to_sid(sel->selctx, &sel->selsid); if (err) { diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index a9f4f6f3c628..63a965467465 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 0ebb6ac2c8c7..275330fcdaaa 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -37,7 +37,7 @@ static int match(const struct sk_buff *skb, return (skb_find_text((struct sk_buff *)skb, conf->from_offset, conf->to_offset, conf->config, &state) - != UINT_MAX) && !conf->invert; + != UINT_MAX) ^ conf->invert; } #define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m) @@ -55,7 +55,10 @@ static int checkentry(const char *tablename, /* Damn, can't handle this case properly with iptables... */ if (conf->from_offset > conf->to_offset) return 0; - + if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') + return 0; + if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) + return 0; ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, GFP_KERNEL, TS_AUTOLOAD); if (IS_ERR(ts_conf)) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index eea366966740..0a6cfa0005be 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -796,7 +796,7 @@ static int __init init_u32(void) { printk("u32 classifier\n"); #ifdef CONFIG_CLS_U32_PERF - printk(" Perfomance counters on\n"); + printk(" Performance counters on\n"); #endif #ifdef CONFIG_NET_CLS_POLICE printk(" OLD policer on \n"); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c7844bacbbcb..a19eff12cf78 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -430,7 +430,7 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) } #endif - err = -EINVAL; + err = -ENOENT; if (ops == NULL) goto err_out; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 4f11f5858209..17b509282cf2 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -806,38 +806,26 @@ no_mem: /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - const struct msghdr *msg) + const struct msghdr *msg, + size_t paylen) { struct sctp_chunk *retval; - void *payload = NULL, *payoff; - size_t paylen = 0; - struct iovec *iov = NULL; - int iovlen = 0; + void *payload = NULL; + int err; - if (msg) { - iov = msg->msg_iov; - iovlen = msg->msg_iovlen; - paylen = get_user_iov_size(iov, iovlen); - } - - retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); + retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ - payload = kmalloc(paylen, GFP_ATOMIC); + payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; - payoff = payload; - for (; iovlen > 0; --iovlen) { - if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) - goto err_copy; - payoff += iov->iov_len; - iov++; - } + err = memcpy_fromiovec(payload, msg->msg_iov, paylen); + if (err < 0) + goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ead3f1b0ea3d..5b5ae7958322 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -4031,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( * from its upper layer, but retransmits data to the far end * if necessary to fill gaps. */ - struct msghdr *msg = arg; - struct sctp_chunk *abort; + struct sctp_chunk *abort = arg; sctp_disposition_t retval; retval = SCTP_DISPOSITION_CONSUME; - /* Generate ABORT chunk to send the peer. */ - abort = sctp_make_abort_user(asoc, NULL, msg); - if (!abort) - retval = SCTP_DISPOSITION_NOMEM; - else - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. @@ -4166,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( void *arg, sctp_cmd_seq_t *commands) { - struct msghdr *msg = arg; - struct sctp_chunk *abort; + struct sctp_chunk *abort = arg; sctp_disposition_t retval; /* Stop T1-init timer */ @@ -4175,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); retval = SCTP_DISPOSITION_CONSUME; - /* Generate ABORT chunk to send the peer */ - abort = sctp_make_abort_user(asoc, NULL, msg); - if (!abort) - retval = SCTP_DISPOSITION_NOMEM; - else - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 54722e622e6d..fde3f55bfd4b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1520,8 +1520,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { + struct sctp_chunk *chunk; + + chunk = sctp_make_abort_user(asoc, msg, msg_len); + if (!chunk) { + err = -ENOMEM; + goto out_unlock; + } + SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); - sctp_primitive_ABORT(asoc, msg); + sctp_primitive_ABORT(asoc, chunk); err = 0; goto out_unlock; } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4a9aa9393b97..ef1cf5b476c8 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -718,8 +718,7 @@ gss_destroy(struct rpc_auth *auth) auth, auth->au_flavor); gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->path); - dput(gss_auth->dentry); + rpc_unlink(gss_auth->dentry); gss_auth->dentry = NULL; gss_mech_put(gss_auth->mech); diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 7026b0866b7b..00cb388ece03 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -71,7 +71,12 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, new = detail->alloc(); if (!new) return NULL; + /* must fully initialise 'new', else + * we might get lose if we need to + * cache_put it soon. + */ cache_init(new); + detail->init(new, key); write_lock(&detail->hash_lock); @@ -85,7 +90,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, return tmp; } } - detail->init(new, key); new->next = *head; *head = new; detail->entries++; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 4ba271f892c8..3e19d321067a 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -183,8 +183,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, out_no_auth: if (!IS_ERR(clnt->cl_dentry)) { - rpc_rmdir(clnt->cl_pathname); - dput(clnt->cl_dentry); + rpc_rmdir(clnt->cl_dentry); rpc_put_mount(); } out_no_path: @@ -251,10 +250,8 @@ rpc_clone_client(struct rpc_clnt *clnt) new->cl_autobind = 0; new->cl_oneshot = 0; new->cl_dead = 0; - if (!IS_ERR(new->cl_dentry)) { + if (!IS_ERR(new->cl_dentry)) dget(new->cl_dentry); - rpc_get_mount(); - } rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); @@ -317,11 +314,15 @@ rpc_destroy_client(struct rpc_clnt *clnt) clnt->cl_auth = NULL; } if (clnt->cl_parent != clnt) { + if (!IS_ERR(clnt->cl_dentry)) + dput(clnt->cl_dentry); rpc_destroy_client(clnt->cl_parent); goto out_free; } - if (clnt->cl_pathname[0]) - rpc_rmdir(clnt->cl_pathname); + if (!IS_ERR(clnt->cl_dentry)) { + rpc_rmdir(clnt->cl_dentry); + rpc_put_mount(); + } if (clnt->cl_xprt) { xprt_destroy(clnt->cl_xprt); clnt->cl_xprt = NULL; @@ -331,10 +332,6 @@ rpc_destroy_client(struct rpc_clnt *clnt) out_free: rpc_free_iostats(clnt->cl_metrics); clnt->cl_metrics = NULL; - if (!IS_ERR(clnt->cl_dentry)) { - dput(clnt->cl_dentry); - rpc_put_mount(); - } kfree(clnt); return 0; } @@ -921,26 +918,43 @@ call_transmit(struct rpc_task *task) task->tk_status = xprt_prepare_transmit(task); if (task->tk_status != 0) return; + task->tk_action = call_transmit_status; /* Encode here so that rpcsec_gss can use correct sequence number. */ if (rpc_task_need_encode(task)) { - task->tk_rqstp->rq_bytes_sent = 0; + BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); call_encode(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) - goto out_nosend; + return; } - task->tk_action = call_transmit_status; xprt_transmit(task); if (task->tk_status < 0) return; - if (!task->tk_msg.rpc_proc->p_decode) { - task->tk_action = rpc_exit_task; - rpc_wake_up_task(task); - } - return; -out_nosend: - /* release socket write lock before attempting to handle error */ - xprt_abort_transmit(task); + /* + * On success, ensure that we call xprt_end_transmit() before sleeping + * in order to allow access to the socket to other RPC requests. + */ + call_transmit_status(task); + if (task->tk_msg.rpc_proc->p_decode != NULL) + return; + task->tk_action = rpc_exit_task; + rpc_wake_up_task(task); +} + +/* + * 5a. Handle cleanup after a transmission + */ +static void +call_transmit_status(struct rpc_task *task) +{ + task->tk_action = call_status; + /* + * Special case: if we've been waiting on the socket's write_space() + * callback, then don't call xprt_end_transmit(). + */ + if (task->tk_status == -EAGAIN) + return; + xprt_end_transmit(task); rpc_task_force_reencode(task); } @@ -992,18 +1006,7 @@ call_status(struct rpc_task *task) } /* - * 6a. Handle transmission errors. - */ -static void -call_transmit_status(struct rpc_task *task) -{ - if (task->tk_status != -EAGAIN) - rpc_task_force_reencode(task); - call_status(task); -} - -/* - * 6b. Handle RPC timeout + * 6a. Handle RPC timeout * We do not release the request slot, so we keep using the * same XID for all retransmits. */ @@ -1178,6 +1181,17 @@ call_verify(struct rpc_task *task) u32 *p = iov->iov_base, n; int error = -EACCES; + if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { + /* RFC-1014 says that the representation of XDR data must be a + * multiple of four bytes + * - if it isn't pointer subtraction in the NFS client may give + * undefined results + */ + printk(KERN_WARNING + "call_verify: XDR representation not a multiple of" + " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); + goto out_eio; + } if ((len -= 3) < 0) goto out_overflow; p += 1; /* skip XID */ diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index dc6cb93c8830..0b1a1ac8a4bc 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -539,6 +539,7 @@ repeat: rpc_close_pipes(dentry->d_inode); simple_unlink(dir, dentry); } + inode_dir_notify(dir, DN_DELETE); dput(dentry); } while (n); goto repeat; @@ -610,8 +611,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) int error; shrink_dcache_parent(dentry); - if (dentry->d_inode) - rpc_close_pipes(dentry->d_inode); + if (d_unhashed(dentry)) + return 0; if ((error = simple_rmdir(dir, dentry)) != 0) return error; if (!error) { @@ -667,10 +668,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client) RPCAUTH_info, RPCAUTH_EOF); if (error) goto err_depopulate; + dget(dentry); out: mutex_unlock(&dir->i_mutex); rpc_release_path(&nd); - return dget(dentry); + return dentry; err_depopulate: rpc_depopulate(dentry); __rpc_rmdir(dir, dentry); @@ -683,28 +685,20 @@ err_dput: } int -rpc_rmdir(char *path) +rpc_rmdir(struct dentry *dentry) { - struct nameidata nd; - struct dentry *dentry; + struct dentry *parent; struct inode *dir; int error; - if ((error = rpc_lookup_parent(path, &nd)) != 0) - return error; - dir = nd.dentry->d_inode; + parent = dget_parent(dentry); + dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_release; - } rpc_depopulate(dentry); error = __rpc_rmdir(dir, dentry); dput(dentry); -out_release: mutex_unlock(&dir->i_mutex); - rpc_release_path(&nd); + dput(parent); return error; } @@ -731,10 +725,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) rpci->flags = flags; rpci->ops = ops; inode_dir_notify(dir, DN_CREATE); + dget(dentry); out: mutex_unlock(&dir->i_mutex); rpc_release_path(&nd); - return dget(dentry); + return dentry; err_dput: dput(dentry); dentry = ERR_PTR(-ENOMEM); @@ -744,32 +739,26 @@ err_dput: } int -rpc_unlink(char *path) +rpc_unlink(struct dentry *dentry) { - struct nameidata nd; - struct dentry *dentry; + struct dentry *parent; struct inode *dir; - int error; + int error = 0; - if ((error = rpc_lookup_parent(path, &nd)) != 0) - return error; - dir = nd.dentry->d_inode; + parent = dget_parent(dentry); + dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_release; - } - d_drop(dentry); - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); - error = simple_unlink(dir, dentry); + if (!d_unhashed(dentry)) { + d_drop(dentry); + if (dentry->d_inode) { + rpc_close_pipes(dentry->d_inode); + error = simple_unlink(dir, dentry); + } + inode_dir_notify(dir, DN_DELETE); } dput(dentry); - inode_dir_notify(dir, DN_DELETE); -out_release: mutex_unlock(&dir->i_mutex); - rpc_release_path(&nd); + dput(parent); return error; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 313b68d892c6..e8c2bc4977f3 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -707,12 +707,9 @@ out_unlock: return err; } -void -xprt_abort_transmit(struct rpc_task *task) +void xprt_end_transmit(struct rpc_task *task) { - struct rpc_xprt *xprt = task->tk_xprt; - - xprt_release_write(xprt, task); + xprt_release_write(task->tk_xprt, task); } /** @@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task) task->tk_status = -ENOTCONN; else if (!req->rq_received) rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); - - xprt->ops->release_xprt(xprt, task); spin_unlock_bh(&xprt->transport_lock); return; } @@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task) * schedq, and being picked up by a parallel run of rpciod(). */ task->tk_status = status; - - switch (status) { - case -ECONNREFUSED: + if (status == -ECONNREFUSED) rpc_sleep_on(&xprt->sending, task, NULL, NULL); - case -EAGAIN: - case -ENOTCONN: - return; - default: - break; - } - xprt_release_write(xprt, task); - return; } static inline void do_xprt_reserve(struct rpc_task *task) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ee678ed13b6f..441bd53f5eca 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -413,6 +413,33 @@ static int xs_tcp_send_request(struct rpc_task *task) return status; } +/** + * xs_tcp_release_xprt - clean up after a tcp transmission + * @xprt: transport + * @task: rpc task + * + * This cleans up if an error causes us to abort the transmission of a request. + * In this case, the socket may need to be reset in order to avoid confusing + * the server. + */ +static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) +{ + struct rpc_rqst *req; + + if (task != xprt->snd_task) + return; + if (task == NULL) + goto out_release; + req = task->tk_rqstp; + if (req->rq_bytes_sent == 0) + goto out_release; + if (req->rq_bytes_sent == req->rq_snd_buf.len) + goto out_release; + set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); +out_release: + xprt_release_xprt(xprt, task); +} + /** * xs_close - close a socket * @xprt: transport @@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .reserve_xprt = xprt_reserve_xprt, - .release_xprt = xprt_release_xprt, + .release_xprt = xs_tcp_release_xprt, .set_port = xs_set_port, .connect = xs_connect, .buf_alloc = rpc_malloc, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 6f2909279268..de6ec519272e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -128,23 +128,17 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0); #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) #ifdef CONFIG_SECURITY_NETWORK -static void unix_get_peersec_dgram(struct sk_buff *skb) +static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { - int err; - - err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb), - UNIXSECLEN(skb)); - if (err) - *(UNIXSECDATA(skb)) = NULL; + memcpy(UNIXSID(skb), &scm->secid, sizeof(u32)); } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { - scm->secdata = *UNIXSECDATA(skb); - scm->seclen = *UNIXSECLEN(skb); + scm->secid = *UNIXSID(skb); } #else -static inline void unix_get_peersec_dgram(struct sk_buff *skb) +static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) @@ -1322,8 +1316,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); - - unix_get_peersec_dgram(skb); + unix_get_secdata(siocb->scm, skb); skb->h.raw = skb->data; err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f35bc676128c..3da67ca2c3ce 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1134,12 +1134,33 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) } EXPORT_SYMBOL(__xfrm_route_forward); +/* Optimize later using cookies and generation ids. */ + static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) { - /* If it is marked obsolete, which is how we even get here, - * then we have purged it from the policy bundle list and we - * did that for a good reason. + /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete + * to "-1" to force all XFRM destinations to get validated by + * dst_ops->check on every use. We do this because when a + * normal route referenced by an XFRM dst is obsoleted we do + * not go looking around for all parent referencing XFRM dsts + * so that we can invalidate them. It is just too much work. + * Instead we make the checks here on every use. For example: + * + * XFRM dst A --> IPv4 dst X + * + * X is the "xdst->route" of A (X is also the "dst->path" of A + * in this example). If X is marked obsolete, "A" will not + * notice. That's what we are validating here via the + * stale_bundle() check. + * + * When a policy's bundle is pruned, we dst_free() the XFRM + * dst which causes it's ->obsolete field to be set to a + * positive non-zero integer. If an XFRM dst has been pruned + * like this, we want to force a new route lookup. */ + if (dst->obsolete < 0 && !stale_bundle(dst)) + return dst; + return NULL; } diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 2180c88cfe89..bb19c1561f1e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -77,14 +77,20 @@ cc-option-align = $(subst -functions=0,,\ # cc-version # Usage gcc-ver := $(call cc-version, $(CC)) -cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \ - $(if $(1), $(1), $(CC))) +cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) # cc-ifversion # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \ echo $(3); fi;) +# ld-option +# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both) +ld-option = $(shell if $(CC) $(1) \ + -nostdlib -o ldtest$$$$.out -xc /dev/null \ + > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi; \ + rm -f ldtest$$$$.out) + ### # Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj= # Usage: diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index a49550205dcc..0a64688c2b5d 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -40,7 +40,7 @@ include scripts/Kbuild.include include scripts/Makefile.lib kernelsymfile := $(objtree)/Module.symvers -modulesymfile := $(KBUILD_EXTMOD)/Modules.symvers +modulesymfile := $(KBUILD_EXTMOD)/Module.symvers # Step 1), find all modules listed in $(MODVERDIR)/ __modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod))) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 2ee48c377b66..a69d8acbf274 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -357,7 +357,7 @@ int conf_read(const char *name) for (e = prop->expr; e; e = e->left.expr) if (e->right.sym->visible != no) flags &= e->right.sym->flags; - sym->flags |= flags & SYMBOL_DEF_USER; + sym->flags &= flags | ~SYMBOL_DEF_USER; } sym_change_count += conf_warnings || conf_unsaved; diff --git a/scripts/kernel-doc b/scripts/kernel-doc index f9460a6218de..c9ca0c23bd91 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1518,6 +1518,7 @@ sub dump_function($$) { $prototype =~ s/^asmlinkage +//; $prototype =~ s/^inline +//; $prototype =~ s/^__inline__ +//; + $prototype =~ s/__devinit +//; $prototype =~ s/^#define +//; #ak added $prototype =~ s/__attribute__ \(\([a-z,]*\)\)//; diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 37f67c23e11b..e2de650d3dbf 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -52,6 +52,23 @@ do { \ sprintf(str + strlen(str), "*"); \ } while(0) +/** + * Check that sizeof(device_id type) are consistent with size of section + * in .o file. If in-consistent then userspace and kernel does not agree + * on actual size which is a bug. + **/ +static void device_id_size_check(const char *modname, const char *device_id, + unsigned long size, unsigned long id_size) +{ + if (size % id_size || size < id_size) { + fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " + "of the size of section __mod_%s_device_table=%lu.\n" + "Fix definition of struct %s_device_id " + "in mod_devicetable.h\n", + modname, device_id, id_size, device_id, size, device_id); + } +} + /* USB is special because the bcdDevice can be matched against a numeric range */ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ static void do_usb_entry(struct usb_device_id *id, @@ -152,10 +169,8 @@ static void do_usb_table(void *symval, unsigned long size, unsigned int i; const unsigned long id_size = sizeof(struct usb_device_id); - if (size % id_size || size < id_size) { - warn("%s ids %lu bad size " - "(each on %lu)\n", mod->name, size, id_size); - } + device_id_size_check(mod->name, "usb", size, id_size); + /* Leave last one: it's the terminator. */ size -= id_size; @@ -376,7 +391,7 @@ static void do_input(char *alias, unsigned int i; for (i = min; i < max; i++) - if (arr[i / BITS_PER_LONG] & (1 << (i%BITS_PER_LONG))) + if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG))) sprintf(alias + strlen(alias), "%X,*", i); } @@ -434,6 +449,7 @@ static inline int sym_is(const char *symbol, const char *name) static void do_table(void *symval, unsigned long size, unsigned long id_size, + const char *device_id, void *function, struct module *mod) { @@ -441,10 +457,7 @@ static void do_table(void *symval, unsigned long size, char alias[500]; int (*do_entry)(const char *, void *entry, char *alias) = function; - if (size % id_size || size < id_size) { - warn("%s ids %lu bad size " - "(each on %lu)\n", mod->name, size, id_size); - } + device_id_size_check(mod->name, device_id, size, id_size); /* Leave last one: it's the terminator. */ size -= id_size; @@ -476,40 +489,51 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, + sym->st_value; if (sym_is(symname, "__mod_pci_device_table")) - do_table(symval, sym->st_size, sizeof(struct pci_device_id), + do_table(symval, sym->st_size, + sizeof(struct pci_device_id), "pci", do_pci_entry, mod); else if (sym_is(symname, "__mod_usb_device_table")) /* special case to handle bcdDevice ranges */ do_usb_table(symval, sym->st_size, mod); else if (sym_is(symname, "__mod_ieee1394_device_table")) - do_table(symval, sym->st_size, sizeof(struct ieee1394_device_id), + do_table(symval, sym->st_size, + sizeof(struct ieee1394_device_id), "ieee1394", do_ieee1394_entry, mod); else if (sym_is(symname, "__mod_ccw_device_table")) - do_table(symval, sym->st_size, sizeof(struct ccw_device_id), + do_table(symval, sym->st_size, + sizeof(struct ccw_device_id), "ccw", do_ccw_entry, mod); else if (sym_is(symname, "__mod_serio_device_table")) - do_table(symval, sym->st_size, sizeof(struct serio_device_id), + do_table(symval, sym->st_size, + sizeof(struct serio_device_id), "serio", do_serio_entry, mod); else if (sym_is(symname, "__mod_pnp_device_table")) - do_table(symval, sym->st_size, sizeof(struct pnp_device_id), + do_table(symval, sym->st_size, + sizeof(struct pnp_device_id), "pnp", do_pnp_entry, mod); else if (sym_is(symname, "__mod_pnp_card_device_table")) - do_table(symval, sym->st_size, sizeof(struct pnp_card_device_id), + do_table(symval, sym->st_size, + sizeof(struct pnp_card_device_id), "pnp_card", do_pnp_card_entry, mod); else if (sym_is(symname, "__mod_pcmcia_device_table")) - do_table(symval, sym->st_size, sizeof(struct pcmcia_device_id), + do_table(symval, sym->st_size, + sizeof(struct pcmcia_device_id), "pcmcia", do_pcmcia_entry, mod); else if (sym_is(symname, "__mod_of_device_table")) - do_table(symval, sym->st_size, sizeof(struct of_device_id), + do_table(symval, sym->st_size, + sizeof(struct of_device_id), "of", do_of_entry, mod); else if (sym_is(symname, "__mod_vio_device_table")) - do_table(symval, sym->st_size, sizeof(struct vio_device_id), + do_table(symval, sym->st_size, + sizeof(struct vio_device_id), "vio", do_vio_entry, mod); else if (sym_is(symname, "__mod_i2c_device_table")) - do_table(symval, sym->st_size, sizeof(struct i2c_device_id), + do_table(symval, sym->st_size, + sizeof(struct i2c_device_id), "i2c", do_i2c_entry, mod); else if (sym_is(symname, "__mod_input_device_table")) - do_table(symval, sym->st_size, sizeof(struct input_device_id), + do_table(symval, sym->st_size, + sizeof(struct input_device_id), "input", do_input_entry, mod); } diff --git a/security/dummy.c b/security/dummy.c index bbbfda70e131..58c6d399c844 100644 --- a/security/dummy.c +++ b/security/dummy.c @@ -791,8 +791,7 @@ static int dummy_socket_getpeersec_stream(struct socket *sock, char __user *optv return -ENOPROTOOPT; } -static int dummy_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, - u32 *seclen) +static int dummy_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return -ENOPROTOOPT; } @@ -876,6 +875,15 @@ static int dummy_setprocattr(struct task_struct *p, char *name, void *value, siz return -EINVAL; } +static int dummy_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +{ + return -EOPNOTSUPP; +} + +static void dummy_release_secctx(char *secdata, u32 seclen) +{ +} + #ifdef CONFIG_KEYS static inline int dummy_key_alloc(struct key *key, struct task_struct *ctx, unsigned long flags) @@ -1028,6 +1036,8 @@ void security_fixup_ops (struct security_operations *ops) set_to_dummy_if_null(ops, d_instantiate); set_to_dummy_if_null(ops, getprocattr); set_to_dummy_if_null(ops, setprocattr); + set_to_dummy_if_null(ops, secid_to_secctx); + set_to_dummy_if_null(ops, release_secctx); #ifdef CONFIG_SECURITY_NETWORK set_to_dummy_if_null(ops, unix_stream_connect); set_to_dummy_if_null(ops, unix_may_send); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index a91c961ba38b..5d1b8c733199 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3524,25 +3524,21 @@ out: return err; } -static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) +static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { + u32 peer_secid = SECSID_NULL; int err = 0; - u32 peer_sid; - if (skb->sk->sk_family == PF_UNIX) - selinux_get_inode_sid(SOCK_INODE(skb->sk->sk_socket), - &peer_sid); - else - peer_sid = selinux_socket_getpeer_dgram(skb); + if (sock && (sock->sk->sk_family == PF_UNIX)) + selinux_get_inode_sid(SOCK_INODE(sock), &peer_secid); + else if (skb) + peer_secid = selinux_socket_getpeer_dgram(skb); - if (peer_sid == SECSID_NULL) - return -EINVAL; + if (peer_secid == SECSID_NULL) + err = -EINVAL; + *secid = peer_secid; - err = security_sid_to_context(peer_sid, secdata, seclen); - if (err) - return err; - - return 0; + return err; } static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) @@ -4407,6 +4403,17 @@ static int selinux_setprocattr(struct task_struct *p, return size; } +static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +{ + return security_sid_to_context(secid, secdata, seclen); +} + +static void selinux_release_secctx(char *secdata, u32 seclen) +{ + if (secdata) + kfree(secdata); +} + #ifdef CONFIG_KEYS static int selinux_key_alloc(struct key *k, struct task_struct *tsk, @@ -4587,6 +4594,9 @@ static struct security_operations selinux_ops = { .getprocattr = selinux_getprocattr, .setprocattr = selinux_setprocattr, + .secid_to_secctx = selinux_secid_to_secctx, + .release_secctx = selinux_release_secctx, + .unix_stream_connect = selinux_socket_unix_stream_connect, .unix_may_send = selinux_socket_unix_may_send, diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 0111990ba837..f03960e697ce 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -644,10 +644,18 @@ void policydb_destroy(struct policydb *p) kfree(lra); for (rt = p->range_tr; rt; rt = rt -> next) { - kfree(lrt); + if (lrt) { + ebitmap_destroy(&lrt->range.level[0].cat); + ebitmap_destroy(&lrt->range.level[1].cat); + kfree(lrt); + } lrt = rt; } - kfree(lrt); + if (lrt) { + ebitmap_destroy(&lrt->range.level[0].cat); + ebitmap_destroy(&lrt->range.level[1].cat); + kfree(lrt); + } if (p->type_attr_map) { for (i = 0; i < p->p_types.nprim; i++) diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index d2e80e62ff0c..85e429884393 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -833,6 +833,8 @@ static int security_compute_sid(u32 ssid, goto out; } + context_init(&newcontext); + POLICY_RDLOCK; scontext = sidtab_search(&sidtab, ssid); @@ -850,8 +852,6 @@ static int security_compute_sid(u32 ssid, goto out_unlock; } - context_init(&newcontext); - /* Set the user identity. */ switch (specified) { case AVTAB_TRANSITION: diff --git a/sound/aoa/codecs/snd-aoa-codec-toonie.c b/sound/aoa/codecs/snd-aoa-codec-toonie.c index bcc555647e79..3c7d1d8a9a6f 100644 --- a/sound/aoa/codecs/snd-aoa-codec-toonie.c +++ b/sound/aoa/codecs/snd-aoa-codec-toonie.c @@ -51,6 +51,13 @@ static struct transfer_info toonie_transfers[] = { {} }; +static int toonie_usable(struct codec_info_item *cii, + struct transfer_info *ti, + struct transfer_info *out) +{ + return 1; +} + #ifdef CONFIG_PM static int toonie_suspend(struct codec_info_item *cii, pm_message_t state) { @@ -69,6 +76,7 @@ static struct codec_info toonie_codec_info = { .sysclock_factor = 256, .bus_factor = 64, .owner = THIS_MODULE, + .usable = toonie_usable, #ifdef CONFIG_PM .suspend = toonie_suspend, .resume = toonie_resume, @@ -79,19 +87,20 @@ static int toonie_init_codec(struct aoa_codec *codec) { struct toonie *toonie = codec_to_toonie(codec); + /* nothing connected? what a joke! */ + if (toonie->codec.connected != 1) + return -ENOTCONN; + if (aoa_snd_device_new(SNDRV_DEV_LOWLEVEL, toonie, &ops)) { printk(KERN_ERR PFX "failed to create toonie snd device!\n"); return -ENODEV; } - /* nothing connected? what a joke! */ - if (toonie->codec.connected != 1) - return -ENOTCONN; - if (toonie->codec.soundbus_dev->attach_codec(toonie->codec.soundbus_dev, aoa_get_card(), &toonie_codec_info, toonie)) { printk(KERN_ERR PFX "error creating toonie pcm\n"); + snd_device_free(aoa_get_card(), toonie); return -ENODEV; } diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c index 7ae0c0bdfad8..f69d33357a28 100644 --- a/sound/aoa/core/snd-aoa-gpio-feature.c +++ b/sound/aoa/core/snd-aoa-gpio-feature.c @@ -112,7 +112,10 @@ static struct device_node *get_gpio(char *name, static void get_irq(struct device_node * np, int *irqptr) { - *irqptr = irq_of_parse_and_map(np, 0); + if (np) + *irqptr = irq_of_parse_and_map(np, 0); + else + *irqptr = NO_IRQ; } /* 0x4 is outenable, 0x1 is out, thus 4 or 5 */ @@ -322,7 +325,7 @@ static int ftr_set_notify(struct gpio_runtime *rt, return -EINVAL; } - if (irq == -1) + if (irq == NO_IRQ) return -ENODEV; mutex_lock(¬if->mutex); diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c index 3d57fd1aec4b..2836c3218391 100644 --- a/sound/aoa/core/snd-aoa-gpio-pmf.c +++ b/sound/aoa/core/snd-aoa-gpio-pmf.c @@ -18,7 +18,7 @@ static void pmf_gpio_set_##name(struct gpio_runtime *rt, int on)\ \ if (unlikely(!rt)) return; \ rc = pmf_call_function(rt->node, #name "-mute", &args); \ - if (rc) \ + if (rc && rc != -ENODEV) \ printk(KERN_WARNING "pmf_gpio_set_" #name \ " failed, rc: %d\n", rc); \ rt->implementation_private &= ~(1<index == 0 && (kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0)) != NULL) { struct snd_ctl_elem_info *uinfo; - uinfo = kmalloc(sizeof(*uinfo), GFP_KERNEL); + uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL); if (! uinfo) { up_read(&mixer->card->controls_rwsem); return -ENOMEM; } - memset(uinfo, 0, sizeof(*uinfo)); if (kctl->info(kctl, uinfo)) { up_read(&mixer->card->controls_rwsem); return 0; diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index f5ff4f4a16ee..472fce0ee0e8 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -2228,6 +2228,8 @@ static int snd_pcm_oss_open_file(struct file *file, for (idx = 0; idx < 2; idx++) { if (setup[idx].disable) continue; + if (! pcm->streams[idx].substream_count) + continue; /* no matching substream */ if (idx == SNDRV_PCM_STREAM_PLAYBACK) { if (! (f_mode & FMODE_WRITE)) continue; diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c index 4260de90f36f..102ff548ce69 100644 --- a/sound/core/seq/seq_device.c +++ b/sound/core/seq/seq_device.c @@ -372,10 +372,9 @@ static struct ops_list * create_driver(char *id) { struct ops_list *ops; - ops = kmalloc(sizeof(*ops), GFP_KERNEL); + ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (ops == NULL) return ops; - memset(ops, 0, sizeof(*ops)); /* set up driver entry */ strlcpy(ops->id, id, sizeof(ops->id)); diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index 6e4d4ab34632..c30669f14ac0 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c @@ -68,21 +68,18 @@ void *snd_malloc_sgbuf_pages(struct device *device, dmab->area = NULL; dmab->addr = 0; - dmab->private_data = sgbuf = kmalloc(sizeof(*sgbuf), GFP_KERNEL); + dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); if (! sgbuf) return NULL; - memset(sgbuf, 0, sizeof(*sgbuf)); sgbuf->dev = device; pages = snd_sgbuf_aligned_pages(size); sgbuf->tblsize = sgbuf_align_table(pages); - sgbuf->table = kmalloc(sizeof(*sgbuf->table) * sgbuf->tblsize, GFP_KERNEL); + sgbuf->table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->table), GFP_KERNEL); if (! sgbuf->table) goto _failed; - memset(sgbuf->table, 0, sizeof(*sgbuf->table) * sgbuf->tblsize); - sgbuf->page_table = kmalloc(sizeof(*sgbuf->page_table) * sgbuf->tblsize, GFP_KERNEL); + sgbuf->page_table = kcalloc(sgbuf->tblsize, sizeof(*sgbuf->page_table), GFP_KERNEL); if (! sgbuf->page_table) goto _failed; - memset(sgbuf->page_table, 0, sizeof(*sgbuf->page_table) * sgbuf->tblsize); /* allocate each page */ for (i = 0; i < pages; i++) { diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c index c4af84995d05..7e65a103fbb2 100644 --- a/sound/drivers/vx/vx_pcm.c +++ b/sound/drivers/vx/vx_pcm.c @@ -1252,18 +1252,15 @@ static int vx_init_audio_io(struct vx_core *chip) chip->audio_info = rmh.Stat[1]; /* allocate pipes */ - chip->playback_pipes = kmalloc(sizeof(struct vx_pipe *) * chip->audio_outs, GFP_KERNEL); + chip->playback_pipes = kcalloc(chip->audio_outs, sizeof(struct vx_pipe *), GFP_KERNEL); if (!chip->playback_pipes) return -ENOMEM; - chip->capture_pipes = kmalloc(sizeof(struct vx_pipe *) * chip->audio_ins, GFP_KERNEL); + chip->capture_pipes = kcalloc(chip->audio_ins, sizeof(struct vx_pipe *), GFP_KERNEL); if (!chip->capture_pipes) { kfree(chip->playback_pipes); return -ENOMEM; } - memset(chip->playback_pipes, 0, sizeof(struct vx_pipe *) * chip->audio_outs); - memset(chip->capture_pipes, 0, sizeof(struct vx_pipe *) * chip->audio_ins); - preferred = chip->ibl.size; chip->ibl.size = 0; vx_set_ibl(chip, &chip->ibl); /* query the info */ diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig index f4980ca5c05c..1b7c3dfc2b41 100644 --- a/sound/oss/Kconfig +++ b/sound/oss/Kconfig @@ -31,7 +31,7 @@ config SOUND_EMU10K1 For more information on this driver and the degree of support for the different card models please check: - + It is now possible to load dsp microcode patches into the EMU10K1 chip. These patches are used to implement real time sound @@ -140,7 +140,7 @@ config SOUND_TRIDENT system support" and "Sysctl support", and after the /proc file system has been mounted, executing the command - command what is enabled + command what is enabled echo 0>/proc/ALi5451 pcm out is also set to S/PDIF out. (Default). @@ -838,7 +838,7 @@ config SOUND_WAVEARTIST config SOUND_TVMIXER tristate "TV card (bt848) mixer support" - depends on SOUND_PRIME && I2C + depends on SOUND_PRIME && I2C && VIDEO_V4L1 help Support for audio mixer facilities on the BT848 TV frame-grabber card. diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index d7ad32f514da..e49c0fe21b0d 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig @@ -16,16 +16,16 @@ config SND_AD1889 will be called snd-ad1889. config SND_ALS300 - tristate "Avance Logic ALS300/ALS300+" - depends on SND - select SND_PCM - select SND_AC97_CODEC - select SND_OPL3_LIB - help - Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+ + tristate "Avance Logic ALS300/ALS300+" + depends on SND + select SND_PCM + select SND_AC97_CODEC + select SND_OPL3_LIB + help + Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+ - To compile this driver as a module, choose M here: the module - will be called snd-als300 + To compile this driver as a module, choose M here: the module + will be called snd-als300 config SND_ALS4000 tristate "Avance Logic ALS4000" @@ -78,49 +78,49 @@ config SND_ATIIXP_MODEM will be called snd-atiixp-modem. config SND_AU8810 - tristate "Aureal Advantage" - depends on SND + tristate "Aureal Advantage" + depends on SND select SND_MPU401_UART select SND_AC97_CODEC - help + help Say Y here to include support for Aureal Advantage soundcards. Supported features: Hardware Mixer, SRC, EQ and SPDIF output. - 3D support code is in place, but not yet useable. For more info, - email the ALSA developer list, or . + 3D support code is in place, but not yet useable. For more info, + email the ALSA developer list, or . To compile this driver as a module, choose M here: the module will be called snd-au8810. - + config SND_AU8820 - tristate "Aureal Vortex" - depends on SND + tristate "Aureal Vortex" + depends on SND select SND_MPU401_UART select SND_AC97_CODEC - help + help Say Y here to include support for Aureal Vortex soundcards. - Supported features: Hardware Mixer and SRC. For more info, email - the ALSA developer list, or . + Supported features: Hardware Mixer and SRC. For more info, email + the ALSA developer list, or . To compile this driver as a module, choose M here: the module will be called snd-au8820. - + config SND_AU8830 - tristate "Aureal Vortex 2" - depends on SND + tristate "Aureal Vortex 2" + depends on SND select SND_MPU401_UART select SND_AC97_CODEC - help + help Say Y here to include support for Aureal Vortex 2 soundcards. - Supported features: Hardware Mixer, SRC, EQ and SPDIF output. - 3D support code is in place, but not yet useable. For more info, - email the ALSA developer list, or . + Supported features: Hardware Mixer, SRC, EQ and SPDIF output. + 3D support code is in place, but not yet useable. For more info, + email the ALSA developer list, or . To compile this driver as a module, choose M here: the module will be called snd-au8830. - + config SND_AZT3328 tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)" depends on SND && EXPERIMENTAL @@ -135,10 +135,10 @@ config SND_AZT3328 will be called snd-azt3328. config SND_BT87X - tristate "Bt87x Audio Capture" - depends on SND + tristate "Bt87x Audio Capture" + depends on SND select SND_PCM - help + help If you want to record audio from TV cards based on Brooktree Bt878/Bt879 chips, say Y here and read . @@ -209,7 +209,7 @@ config SND_CS46XX config SND_CS46XX_NEW_DSP bool "Cirrus Logic (Sound Fusion) New DSP support" depends on SND_CS46XX - default y + default y help Say Y here to use a new DSP image for SPDIF and dual codecs. @@ -225,7 +225,7 @@ config SND_CS5535AUDIO referred to as NS CS5535 IO or AMD CS5535 IO companion in various literature. This driver also supports the CS5536 audio device. However, for both chips, on certain boards, you may - need to use ac97_quirk=hp_only if your board has physically + need to use ac97_quirk=hp_only if your board has physically mapped headphone out to master output. If that works for you, send lspci -vvv output to the mailing list so that your board can be identified in the quirks list. @@ -468,11 +468,13 @@ config SND_FM801_TEA575X_BOOL FM801 chip with a TEA5757 tuner connected to GPIO1-3 pins (Media Forte SF256-PCS-02) into the snd-fm801 driver. + This will enable support for the old V4L1 API. + config SND_FM801_TEA575X tristate depends on SND_FM801_TEA575X_BOOL default SND_FM801 - select VIDEO_DEV + select VIDEO_V4L1 config SND_HDA_INTEL tristate "Intel HD Audio" diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 27a8dbe6f6a8..c3dafa29054f 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -236,9 +236,9 @@ static int pcm_open(struct snd_pcm_substream *substream, chip = snd_pcm_substream_chip(substream); runtime = substream->runtime; - if (!(pipe = kmalloc(sizeof(struct audiopipe), GFP_KERNEL))) + pipe = kzalloc(sizeof(struct audiopipe), GFP_KERNEL); + if (!pipe) return -ENOMEM; - memset(pipe, 0, sizeof(struct audiopipe)); pipe->index = -1; /* Not configured yet */ /* Set up hw capabilities and contraints */ diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index f9b5c3dc3b34..79f24cdf5fbf 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -936,6 +936,17 @@ static struct snd_emu_chip_details emu_chip_details[] = { .ca0151_chip = 1, .spk71 = 1, .spdif_bug = 1} , + /* Dell OEM/Creative Labs Audigy 2 ZS */ + /* See ALSA bug#1365 */ + {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10031102, + .driver = "Audigy2", .name = "Audigy 2 ZS [SB0353]", + .id = "Audigy2", + .emu10k2_chip = 1, + .ca0102_chip = 1, + .ca0151_chip = 1, + .spk71 = 1, + .spdif_bug = 1, + .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10021102, .driver = "Audigy2", .name = "Audigy 2 Platinum [SB0240P]", .id = "Audigy2", diff --git a/sound/pci/emu10k1/irq.c b/sound/pci/emu10k1/irq.c index a8b31286b6db..1076af4c3669 100644 --- a/sound/pci/emu10k1/irq.c +++ b/sound/pci/emu10k1/irq.c @@ -37,9 +37,13 @@ irqreturn_t snd_emu10k1_interrupt(int irq, void *dev_id, struct pt_regs *regs) int handled = 0; while ((status = inl(emu->port + IPR)) != 0) { - //printk("emu10k1 irq - status = 0x%x\n", status); + //snd_printk(KERN_INFO "emu10k1 irq - status = 0x%x\n", status); orig_status = status; handled = 1; + if ((status & 0xffffffff) == 0xffffffff) { + snd_printk(KERN_INFO "snd-emu10k1: Suspected sound card removal\n"); + break; + } if (status & IPR_PCIERROR) { snd_printk(KERN_ERR "interrupt: PCI error\n"); snd_emu10k1_intr_disable(emu, INTE_PCIERRORENABLE); diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c index 82d791be7499..05dabe454658 100644 --- a/sound/ppc/awacs.c +++ b/sound/ppc/awacs.c @@ -801,11 +801,10 @@ snd_pmac_awacs_init(struct snd_pmac *chip) chip->revision = (in_le32(&chip->awacs->codec_stat) >> 12) & 0xf; #ifdef PMAC_AMP_AVAIL if (chip->revision == 3 && chip->has_iic && CHECK_CUDA_AMP()) { - struct awacs_amp *amp = kmalloc(sizeof(*amp), GFP_KERNEL); + struct awacs_amp *amp = kzalloc(sizeof(*amp), GFP_KERNEL); if (! amp) return -ENOMEM; chip->mixer_data = amp; - memset(amp, 0, sizeof(*amp)); chip->mixer_free = awacs_amp_free; awacs_amp_set_vol(amp, 0, 63, 63, 0); /* mute and zero vol */ awacs_amp_set_vol(amp, 1, 63, 63, 0); diff --git a/sound/ppc/daca.c b/sound/ppc/daca.c index 46eebf5610e3..57202b0f033e 100644 --- a/sound/ppc/daca.c +++ b/sound/ppc/daca.c @@ -258,10 +258,9 @@ int __init snd_pmac_daca_init(struct snd_pmac *chip) request_module("i2c-powermac"); #endif /* CONFIG_KMOD */ - mix = kmalloc(sizeof(*mix), GFP_KERNEL); + mix = kzalloc(sizeof(*mix), GFP_KERNEL); if (! mix) return -ENOMEM; - memset(mix, 0, sizeof(*mix)); chip->mixer_data = mix; chip->mixer_free = daca_cleanup; mix->amp_on = 1; /* default on */ diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c index fb05938dcbd9..59482a4cd446 100644 --- a/sound/ppc/keywest.c +++ b/sound/ppc/keywest.c @@ -64,11 +64,10 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter) if (strncmp(i2c_device_name(adapter), "mac-io", 6)) return 0; /* ignored */ - new_client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL); + new_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); if (! new_client) return -ENOMEM; - memset(new_client, 0, sizeof(*new_client)); new_client->addr = keywest_ctx->addr; i2c_set_clientdata(new_client, keywest_ctx); new_client->adapter = adapter; diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c index fa9a44ab487e..2264574fa06b 100644 --- a/sound/ppc/powermac.c +++ b/sound/ppc/powermac.c @@ -181,21 +181,14 @@ static int __init alsa_card_pmac_init(void) if ((err = platform_driver_register(&snd_pmac_driver)) < 0) return err; device = platform_device_register_simple(SND_PMAC_DRIVER, -1, NULL, 0); - if (!IS_ERR(device)) { - if (platform_get_drvdata(device)) - return 0; - platform_device_unregister(device); - err = -ENODEV; - } else - err = PTR_ERR(device); - platform_driver_unregister(&snd_pmac_driver); - return err; + return 0; } static void __exit alsa_card_pmac_exit(void) { - platform_device_unregister(device); + if (!IS_ERR(device)) + platform_device_unregister(device); platform_driver_unregister(&snd_pmac_driver); } diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c index 692c61177678..84f6b19c07ca 100644 --- a/sound/ppc/tumbler.c +++ b/sound/ppc/tumbler.c @@ -1316,10 +1316,9 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip) request_module("i2c-powermac"); #endif /* CONFIG_KMOD */ - mix = kmalloc(sizeof(*mix), GFP_KERNEL); + mix = kzalloc(sizeof(*mix), GFP_KERNEL); if (! mix) return -ENOMEM; - memset(mix, 0, sizeof(*mix)); mix->headphone_irq = -1; chip->mixer_data = mix; diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c index d32d83d970cc..1b7f499c549d 100644 --- a/sound/usb/usbaudio.c +++ b/sound/usb/usbaudio.c @@ -2260,10 +2260,9 @@ static int add_audio_endpoint(struct snd_usb_audio *chip, int stream, struct aud } /* create a new pcm */ - as = kmalloc(sizeof(*as), GFP_KERNEL); + as = kzalloc(sizeof(*as), GFP_KERNEL); if (! as) return -ENOMEM; - memset(as, 0, sizeof(*as)); as->pcm_index = chip->pcm_devs; as->chip = chip; as->fmt_type = fp->fmt_type; @@ -2633,13 +2632,12 @@ static int parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) csep = NULL; } - fp = kmalloc(sizeof(*fp), GFP_KERNEL); + fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (! fp) { snd_printk(KERN_ERR "cannot malloc\n"); return -ENOMEM; } - memset(fp, 0, sizeof(*fp)); fp->iface = iface_no; fp->altsetting = altno; fp->altset_idx = i; diff --git a/usr/Makefile b/usr/Makefile index e93824269da2..5b31c0b61c76 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -35,6 +35,9 @@ quiet_cmd_initfs = GEN $@ cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) targets := initramfs_data.cpio.gz +# do not try to update files included in initramfs +$(deps_initramfs): ; + $(deps_initramfs): klibcdirs # We rebuild initramfs_data.cpio.gz if: # 1) Any included file is newer then initramfs_data.cpio.gz