mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Bug fixes galore, mostly in drivers as is often the case: 1) USB gadget and cdc_eem drivers need adjustments to their frame size lengths in order to handle VLANs correctly. From Ian Coolidge. 2) TIPC and several network drivers erroneously call tasklet_disable before tasklet_kill, fix from Xiaotian Feng. 3) r8169 driver needs to apply the WOL suspend quirk to more chipsets, fix from Cyril Brulebois. 4) Fix multicast filters on RTL_GIGA_MAC_VER_35 r8169 chips, from Nathan Walp. 5) FDB netlink dumps should use RTM_NEWNEIGH as the message type, not zero. From John Fastabend. 6) Fix smsc95xx tx checksum offload on big-endian, from Steve Glendinning. 7) __inet_diag_dump() needs to repsect and report the error value returned from inet_diag_lock_handler() rather than ignore it. Otherwise if an inet diag handler is not available for a particular protocol, we essentially report success instead of giving an error indication. Fix from Cyrill Gorcunov. 8) When the QFQ packet scheduler sees TSO/GSO packets it does not handle things properly, and in fact ends up corrupting it's datastructures as well as mis-schedule packets. Fix from Paolo Valente. 9) Fix oopser in skb_loop_sk(), from Eric Leblond. 10) CXGB4 passes partially uninitialized datastructures in to FW commands, fix from Vipul Pandya. 11) When we send unsolicited ipv6 neighbour advertisements, we should send them to the link-local allnodes multicast address, as per RFC4861. Fix from Hannes Frederic Sowa. 12) There is some kind of bug in the usbnet's kevent deferral mechanism, but more immediately when it triggers an uncontrolled stream of kernel messages spam the log. Rate limit the error log message triggered when this problem occurs, as sending thousands of error messages into the kernel log doesn't help matters at all, and in fact makes further diagnosis more difficult. From Steve Glendinning. 13) Fix gianfar restore from hibernation, from Wang Dongsheng. 14) The netlink message attribute sizes are wrong in the ipv6 GRE driver, it was using the size of ipv4 addresses instead of ipv6 ones :-) Fix from Nicolas Dichtel." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: gre6: fix rtnl dump messages gianfar: ethernet vanishes after restoring from hibernation usbnet: ratelimit kevent may have been dropped warnings ipv6: send unsolicited neighbour advertisements to all-nodes net: usb: cdc_eem: Fix rx skb allocation for 802.1Q VLANs usb: gadget: g_ether: fix frame size check for 802.1Q cxgb4: Fix initialization of SGE_CONTROL register isdn: Make CONFIG_ISDN depend on CONFIG_NETDEVICES cxgb4: Initialize data structures before using. af-packet: fix oops when socket is not present pkt_sched: enable QFQ to support TSO/GSO net: inet_diag -- Return error code if protocol handler is missed net: bnx2x: Fix typo in bnx2x driver smsc95xx: fix tx checksum offload for big endian rtnetlink: Use nlmsg type RTM_NEWNEIGH from dflt fdb dump ptp: update adjfreq callback description r8169: allow multicast packets on sub-8168f chipset. r8169: Fix WoL on RTL8168d/8111d. drivers/net: use tasklet_kill in device remove/close process tipc: do not use tasklet_disable before tasklet_kill
This commit is contained in:
commit
b251f0f399
@ -4,7 +4,7 @@
|
||||
|
||||
menuconfig ISDN
|
||||
bool "ISDN support"
|
||||
depends on NET
|
||||
depends on NET && NETDEVICES
|
||||
depends on !S390 && !UML
|
||||
---help---
|
||||
ISDN ("Integrated Services Digital Network", called RNIS in France)
|
||||
|
@ -6,7 +6,7 @@ if ISDN_I4L
|
||||
|
||||
config ISDN_PPP
|
||||
bool "Support synchronous PPP"
|
||||
depends on INET && NETDEVICES
|
||||
depends on INET
|
||||
select SLHC
|
||||
help
|
||||
Over digital connections such as ISDN, there is no need to
|
||||
|
@ -1312,7 +1312,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
|
||||
} else
|
||||
return -EINVAL;
|
||||
break;
|
||||
#ifdef CONFIG_NETDEVICES
|
||||
case IIOCNETGPN:
|
||||
/* Get peer phone number of a connected
|
||||
* isdn network interface */
|
||||
@ -1322,7 +1321,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
|
||||
return isdn_net_getpeer(&phone, argp);
|
||||
} else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1352,7 +1350,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
|
||||
case IIOCNETLCR:
|
||||
printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n");
|
||||
return -ENODEV;
|
||||
#ifdef CONFIG_NETDEVICES
|
||||
case IIOCNETAIF:
|
||||
/* Add a network-interface */
|
||||
if (arg) {
|
||||
@ -1491,7 +1488,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
|
||||
return -EFAULT;
|
||||
return isdn_net_force_hangup(name);
|
||||
break;
|
||||
#endif /* CONFIG_NETDEVICES */
|
||||
case IIOCSETVER:
|
||||
dev->net_verbose = arg;
|
||||
printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose);
|
||||
|
@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
|
||||
SHMEM_EEE_ADV_STATUS_SHIFT);
|
||||
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
|
||||
DP(BNX2X_MSG_ETHTOOL,
|
||||
"Direct manipulation of EEE advertisment is not supported\n");
|
||||
"Direct manipulation of EEE advertisement is not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -9941,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
|
||||
else
|
||||
rc = bnx2x_8483x_disable_eee(phy, params, vars);
|
||||
if (rc) {
|
||||
DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
|
||||
DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
@ -12987,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
|
||||
DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
|
||||
break;
|
||||
default:
|
||||
DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
|
||||
DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
|
||||
}
|
||||
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
|
||||
old_status, status);
|
||||
|
@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
|
||||
{
|
||||
struct fw_bye_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
INIT_CMD(c, BYE, WRITE);
|
||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox)
|
||||
{
|
||||
struct fw_initialize_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
INIT_CMD(c, INITIALIZE, WRITE);
|
||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
|
||||
{
|
||||
struct fw_reset_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
INIT_CMD(c, RESET, WRITE);
|
||||
c.val = htonl(reset);
|
||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
|
||||
HOSTPAGESIZEPF7(sge_hps));
|
||||
|
||||
t4_set_reg_field(adap, SGE_CONTROL,
|
||||
INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
|
||||
INGPADBOUNDARY_MASK |
|
||||
EGRSTATUSPAGESIZE_MASK,
|
||||
INGPADBOUNDARY(fl_align_log - 5) |
|
||||
EGRSTATUSPAGESIZE(stat_len != 64));
|
||||
@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
|
||||
{
|
||||
struct fw_vi_enable_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
|
||||
FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
|
||||
c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
|
||||
|
@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev)
|
||||
struct gfar_private *priv = dev_get_drvdata(dev);
|
||||
struct net_device *ndev = priv->ndev;
|
||||
|
||||
if (!netif_running(ndev))
|
||||
if (!netif_running(ndev)) {
|
||||
netif_device_attach(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
gfar_init_bds(ndev);
|
||||
init_registers(ndev);
|
||||
|
@ -1948,10 +1948,10 @@ jme_close(struct net_device *netdev)
|
||||
|
||||
JME_NAPI_DISABLE(jme);
|
||||
|
||||
tasklet_disable(&jme->linkch_task);
|
||||
tasklet_disable(&jme->txclean_task);
|
||||
tasklet_disable(&jme->rxclean_task);
|
||||
tasklet_disable(&jme->rxempty_task);
|
||||
tasklet_kill(&jme->linkch_task);
|
||||
tasklet_kill(&jme->txclean_task);
|
||||
tasklet_kill(&jme->rxclean_task);
|
||||
tasklet_kill(&jme->rxempty_task);
|
||||
|
||||
jme_disable_rx_engine(jme);
|
||||
jme_disable_tx_engine(jme);
|
||||
|
@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
|
||||
dev0 = hw->dev[0];
|
||||
unregister_netdev(dev0);
|
||||
|
||||
tasklet_disable(&hw->phy_task);
|
||||
tasklet_kill(&hw->phy_task);
|
||||
|
||||
spin_lock_irq(&hw->hw_lock);
|
||||
hw->intr_mask = 0;
|
||||
|
@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev)
|
||||
/* Delay for receive task to stop scheduling itself. */
|
||||
msleep(2000 / HZ);
|
||||
|
||||
tasklet_disable(&hw_priv->rx_tasklet);
|
||||
tasklet_disable(&hw_priv->tx_tasklet);
|
||||
tasklet_kill(&hw_priv->rx_tasklet);
|
||||
tasklet_kill(&hw_priv->tx_tasklet);
|
||||
free_irq(dev->irq, hw_priv->dev);
|
||||
|
||||
transmit_cleanup(hw_priv, 0);
|
||||
|
@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
|
||||
void __iomem *ioaddr = tp->mmio_addr;
|
||||
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_25:
|
||||
case RTL_GIGA_MAC_VER_26:
|
||||
case RTL_GIGA_MAC_VER_29:
|
||||
case RTL_GIGA_MAC_VER_30:
|
||||
case RTL_GIGA_MAC_VER_32:
|
||||
@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
|
||||
mc_filter[1] = swab32(data);
|
||||
}
|
||||
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_35)
|
||||
mc_filter[1] = mc_filter[0] = 0xffffffff;
|
||||
|
||||
RTL_W32(MAR0 + 4, mc_filter[1]);
|
||||
RTL_W32(MAR0 + 0, mc_filter[0]);
|
||||
|
||||
|
@ -990,7 +990,7 @@ static int axienet_stop(struct net_device *ndev)
|
||||
axienet_setoptions(ndev, lp->options &
|
||||
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
|
||||
|
||||
tasklet_disable(&lp->dma_err_tasklet);
|
||||
tasklet_kill(&lp->dma_err_tasklet);
|
||||
|
||||
free_irq(lp->tx_irq, ndev);
|
||||
free_irq(lp->rx_irq, ndev);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/usb/cdc.h>
|
||||
#include <linux/usb/usbnet.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
|
||||
/*
|
||||
@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
||||
/* no jumbogram (16K) support for now */
|
||||
|
||||
dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
|
||||
dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN;
|
||||
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
|
||||
|
||||
return 0;
|
||||
|
@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
|
||||
} else {
|
||||
u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
|
||||
skb_push(skb, 4);
|
||||
cpu_to_le32s(&csum_preamble);
|
||||
memcpy(skb->data, &csum_preamble, 4);
|
||||
}
|
||||
}
|
||||
|
@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
|
||||
void usbnet_defer_kevent (struct usbnet *dev, int work)
|
||||
{
|
||||
set_bit (work, &dev->flags);
|
||||
if (!schedule_work (&dev->kevent))
|
||||
netdev_err(dev->net, "kevent %d may have been dropped\n", work);
|
||||
else
|
||||
if (!schedule_work (&dev->kevent)) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(dev->net, "kevent %d may have been dropped\n", work);
|
||||
} else {
|
||||
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
|
||||
|
||||
|
@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue)
|
||||
{
|
||||
struct b43legacy_pio_txpacket *packet, *tmp_packet;
|
||||
|
||||
tasklet_disable(&queue->txtask);
|
||||
tasklet_kill(&queue->txtask);
|
||||
|
||||
list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
|
||||
free_txpacket(packet, 0);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#include "u_ether.h"
|
||||
|
||||
@ -295,7 +296,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
while (skb2) {
|
||||
if (status < 0
|
||||
|| ETH_HLEN > skb2->len
|
||||
|| skb2->len > ETH_FRAME_LEN) {
|
||||
|| skb2->len > VLAN_ETH_FRAME_LEN) {
|
||||
dev->net->stats.rx_errors++;
|
||||
dev->net->stats.rx_length_errors++;
|
||||
DBG(dev, "rx length %d\n", skb2->len);
|
||||
|
@ -54,7 +54,8 @@ struct ptp_clock_request {
|
||||
* clock operations
|
||||
*
|
||||
* @adjfreq: Adjusts the frequency of the hardware clock.
|
||||
* parameter delta: Desired period change in parts per billion.
|
||||
* parameter delta: Desired frequency offset from nominal frequency
|
||||
* in parts per billion
|
||||
*
|
||||
* @adjtime: Shifts the time of the hardware clock.
|
||||
* parameter delta: Desired change in nanoseconds.
|
||||
|
@ -1666,7 +1666,7 @@ static inline int deliver_skb(struct sk_buff *skb,
|
||||
|
||||
static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
|
||||
{
|
||||
if (ptype->af_packet_priv == NULL)
|
||||
if (!ptype->af_packet_priv || !skb->sk)
|
||||
return false;
|
||||
|
||||
if (ptype->id_match)
|
||||
|
@ -2192,7 +2192,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
|
||||
goto skip;
|
||||
|
||||
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
|
||||
portid, seq, 0, NTF_SELF);
|
||||
portid, seq,
|
||||
RTM_NEWNEIGH, NTF_SELF);
|
||||
if (err < 0)
|
||||
return err;
|
||||
skip:
|
||||
|
@ -892,13 +892,16 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
const struct inet_diag_handler *handler;
|
||||
int err = 0;
|
||||
|
||||
handler = inet_diag_lock_handler(r->sdiag_protocol);
|
||||
if (!IS_ERR(handler))
|
||||
handler->dump(skb, cb, r, bc);
|
||||
else
|
||||
err = PTR_ERR(handler);
|
||||
inet_diag_unlock_handler(handler);
|
||||
|
||||
return skb->len;
|
||||
return err ? : skb->len;
|
||||
}
|
||||
|
||||
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
|
@ -1633,9 +1633,9 @@ static size_t ip6gre_get_size(const struct net_device *dev)
|
||||
/* IFLA_GRE_OKEY */
|
||||
nla_total_size(4) +
|
||||
/* IFLA_GRE_LOCAL */
|
||||
nla_total_size(4) +
|
||||
nla_total_size(sizeof(struct in6_addr)) +
|
||||
/* IFLA_GRE_REMOTE */
|
||||
nla_total_size(4) +
|
||||
nla_total_size(sizeof(struct in6_addr)) +
|
||||
/* IFLA_GRE_TTL */
|
||||
nla_total_size(1) +
|
||||
/* IFLA_GRE_TOS */
|
||||
@ -1659,8 +1659,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
|
||||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
|
||||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
|
||||
nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
|
||||
nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
|
||||
nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) ||
|
||||
nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) ||
|
||||
nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
|
||||
/*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
|
||||
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
|
||||
|
@ -535,7 +535,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
|
||||
{
|
||||
struct inet6_dev *idev;
|
||||
struct inet6_ifaddr *ifa;
|
||||
struct in6_addr mcaddr;
|
||||
struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
|
||||
|
||||
idev = in6_dev_get(dev);
|
||||
if (!idev)
|
||||
@ -543,7 +543,6 @@ static void ndisc_send_unsol_na(struct net_device *dev)
|
||||
|
||||
read_lock_bh(&idev->lock);
|
||||
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
||||
addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
|
||||
ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
|
||||
/*router=*/ !!idev->cnf.forwarding,
|
||||
/*solicited=*/ false, /*override=*/ true,
|
||||
|
@ -84,18 +84,19 @@
|
||||
* grp->index is the index of the group; and grp->slot_shift
|
||||
* is the shift for the corresponding (scaled) sigma_i.
|
||||
*/
|
||||
#define QFQ_MAX_INDEX 19
|
||||
#define QFQ_MAX_WSHIFT 16
|
||||
#define QFQ_MAX_INDEX 24
|
||||
#define QFQ_MAX_WSHIFT 12
|
||||
|
||||
#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
|
||||
#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
|
||||
#define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT)
|
||||
|
||||
#define FRAC_BITS 30 /* fixed point arithmetic */
|
||||
#define ONE_FP (1UL << FRAC_BITS)
|
||||
#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
|
||||
|
||||
#define QFQ_MTU_SHIFT 11
|
||||
#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
|
||||
#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
|
||||
#define QFQ_MIN_LMAX 256 /* min possible lmax for a class */
|
||||
|
||||
/*
|
||||
* Possible group states. These values are used as indexes for the bitmaps
|
||||
@ -231,6 +232,32 @@ static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
|
||||
q->wsum += delta_w;
|
||||
}
|
||||
|
||||
static void qfq_update_reactivate_class(struct qfq_sched *q,
|
||||
struct qfq_class *cl,
|
||||
u32 inv_w, u32 lmax, int delta_w)
|
||||
{
|
||||
bool need_reactivation = false;
|
||||
int i = qfq_calc_index(inv_w, lmax);
|
||||
|
||||
if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
|
||||
/*
|
||||
* shift cl->F back, to not charge the
|
||||
* class for the not-yet-served head
|
||||
* packet
|
||||
*/
|
||||
cl->F = cl->S;
|
||||
/* remove class from its slot in the old group */
|
||||
qfq_deactivate_class(q, cl);
|
||||
need_reactivation = true;
|
||||
}
|
||||
|
||||
qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
|
||||
|
||||
if (need_reactivation) /* activate in new group */
|
||||
qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
|
||||
}
|
||||
|
||||
|
||||
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
struct nlattr **tca, unsigned long *arg)
|
||||
{
|
||||
@ -238,7 +265,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
struct qfq_class *cl = (struct qfq_class *)*arg;
|
||||
struct nlattr *tb[TCA_QFQ_MAX + 1];
|
||||
u32 weight, lmax, inv_w;
|
||||
int i, err;
|
||||
int err;
|
||||
int delta_w;
|
||||
|
||||
if (tca[TCA_OPTIONS] == NULL) {
|
||||
@ -270,16 +297,14 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
|
||||
if (tb[TCA_QFQ_LMAX]) {
|
||||
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
|
||||
if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
|
||||
if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
|
||||
pr_notice("qfq: invalid max length %u\n", lmax);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else
|
||||
lmax = 1UL << QFQ_MTU_SHIFT;
|
||||
lmax = psched_mtu(qdisc_dev(sch));
|
||||
|
||||
if (cl != NULL) {
|
||||
bool need_reactivation = false;
|
||||
|
||||
if (tca[TCA_RATE]) {
|
||||
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
|
||||
qdisc_root_sleeping_lock(sch),
|
||||
@ -291,24 +316,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
if (lmax == cl->lmax && inv_w == cl->inv_w)
|
||||
return 0; /* nothing to update */
|
||||
|
||||
i = qfq_calc_index(inv_w, lmax);
|
||||
sch_tree_lock(sch);
|
||||
if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
|
||||
/*
|
||||
* shift cl->F back, to not charge the
|
||||
* class for the not-yet-served head
|
||||
* packet
|
||||
*/
|
||||
cl->F = cl->S;
|
||||
/* remove class from its slot in the old group */
|
||||
qfq_deactivate_class(q, cl);
|
||||
need_reactivation = true;
|
||||
}
|
||||
|
||||
qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
|
||||
|
||||
if (need_reactivation) /* activate in new group */
|
||||
qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
|
||||
qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
|
||||
sch_tree_unlock(sch);
|
||||
|
||||
return 0;
|
||||
@ -663,15 +672,48 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
|
||||
|
||||
|
||||
/*
|
||||
* XXX we should make sure that slot becomes less than 32.
|
||||
* This is guaranteed by the input values.
|
||||
* roundedS is always cl->S rounded on grp->slot_shift bits.
|
||||
* If the weight and lmax (max_pkt_size) of the classes do not change,
|
||||
* then QFQ guarantees that the slot index is never higher than
|
||||
* 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM).
|
||||
*
|
||||
* With the current values of the above constants, the index is
|
||||
* then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18.
|
||||
*
|
||||
* When the weight of a class is increased or the lmax of the class is
|
||||
* decreased, a new class with smaller slot size may happen to be
|
||||
* activated. The activation of this class should be properly delayed
|
||||
* to when the service of the class has finished in the ideal system
|
||||
* tracked by QFQ. If the activation of the class is not delayed to
|
||||
* this reference time instant, then this class may be unjustly served
|
||||
* before other classes waiting for service. This may cause
|
||||
* (unfrequently) the above bound to the slot index to be violated for
|
||||
* some of these unlucky classes.
|
||||
*
|
||||
* Instead of delaying the activation of the new class, which is quite
|
||||
* complex, the following inaccurate but simple solution is used: if
|
||||
* the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps
|
||||
* of the class are shifted backward so as to let the slot index
|
||||
* become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if
|
||||
* the slot index is above it, then the data structure implementing
|
||||
* the bucket list either gets immediately corrupted or may get
|
||||
* corrupted on a possible next packet arrival that causes the start
|
||||
* time of the group to be shifted backward.
|
||||
*/
|
||||
static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
|
||||
u64 roundedS)
|
||||
{
|
||||
u64 slot = (roundedS - grp->S) >> grp->slot_shift;
|
||||
unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
|
||||
unsigned int i; /* slot index in the bucket list */
|
||||
|
||||
if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
|
||||
u64 deltaS = roundedS - grp->S -
|
||||
((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
|
||||
cl->S -= deltaS;
|
||||
cl->F -= deltaS;
|
||||
slot = QFQ_MAX_SLOTS - 2;
|
||||
}
|
||||
|
||||
i = (grp->front + slot) % QFQ_MAX_SLOTS;
|
||||
|
||||
hlist_add_head(&cl->next, &grp->slots[i]);
|
||||
__set_bit(slot, &grp->full_slots);
|
||||
@ -892,6 +934,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
}
|
||||
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
|
||||
|
||||
if (unlikely(cl->lmax < qdisc_pkt_len(skb))) {
|
||||
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
|
||||
cl->lmax, qdisc_pkt_len(skb), cl->common.classid);
|
||||
qfq_update_reactivate_class(q, cl, cl->inv_w,
|
||||
qdisc_pkt_len(skb), 0);
|
||||
}
|
||||
|
||||
err = qdisc_enqueue(skb, cl->qdisc);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
||||
|
@ -116,7 +116,6 @@ void tipc_handler_stop(void)
|
||||
return;
|
||||
|
||||
handler_enabled = 0;
|
||||
tasklet_disable(&tipc_tasklet);
|
||||
tasklet_kill(&tipc_tasklet);
|
||||
|
||||
spin_lock_bh(&qitem_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user