forked from Minki/linux
Merge ../from-linus
This commit is contained in:
commit
e872d4cace
@ -102,7 +102,7 @@ Here is the list of words, from left to right:
|
||||
- URB Status. This field makes no sense for submissions, but is present
|
||||
to help scripts with parsing. In error case, it contains the error code.
|
||||
In case of a setup packet, it contains a Setup Tag. If scripts read a number
|
||||
in this field, the proceed to read Data Length. Otherwise, they read
|
||||
in this field, they proceed to read Data Length. Otherwise, they read
|
||||
the setup packet before reading the Data Length.
|
||||
- Setup packet, if present, consists of 5 words: one of each for bmRequestType,
|
||||
bRequest, wValue, wIndex, wLength, as specified by the USB Specification 2.0.
|
||||
|
@ -350,8 +350,24 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
region->end = res->end - offset;
|
||||
}
|
||||
|
||||
void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
struct pci_controller *hose = (struct pci_controller *)dev->sysdata;
|
||||
unsigned long offset = 0;
|
||||
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
offset = hose->io_space->start;
|
||||
else if (res->flags & IORESOURCE_MEM)
|
||||
offset = hose->mem_space->start;
|
||||
|
||||
res->start = region->start + offset;
|
||||
res->end = region->end + offset;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
EXPORT_SYMBOL(pcibios_resource_to_bus);
|
||||
EXPORT_SYMBOL(pcibios_bus_to_resource);
|
||||
#endif
|
||||
|
||||
int
|
||||
|
@ -447,9 +447,26 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
region->end = res->end - offset;
|
||||
}
|
||||
|
||||
void __devinit
|
||||
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
struct pci_sys_data *root = dev->sysdata;
|
||||
unsigned long offset = 0;
|
||||
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
offset = root->io_offset;
|
||||
if (res->flags & IORESOURCE_MEM)
|
||||
offset = root->mem_offset;
|
||||
|
||||
res->start = region->start + offset;
|
||||
res->end = region->end + offset;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
EXPORT_SYMBOL(pcibios_fixup_bus);
|
||||
EXPORT_SYMBOL(pcibios_resource_to_bus);
|
||||
EXPORT_SYMBOL(pcibios_bus_to_resource);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -160,6 +160,21 @@ void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
}
|
||||
EXPORT_SYMBOL(pcibios_resource_to_bus);
|
||||
|
||||
void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
unsigned long offset = 0;
|
||||
struct pci_controller *hose = dev->sysdata;
|
||||
|
||||
if (hose && res->flags & IORESOURCE_IO)
|
||||
offset = (unsigned long)hose->io_base_virt - isa_io_base;
|
||||
else if (hose && res->flags & IORESOURCE_MEM)
|
||||
offset = hose->pci_mem_offset;
|
||||
res->start = region->start + offset;
|
||||
res->end = region->end + offset;
|
||||
}
|
||||
EXPORT_SYMBOL(pcibios_bus_to_resource);
|
||||
|
||||
/*
|
||||
* We need to avoid collisions with `mirrored' VGA ports
|
||||
* and other strange ISA hardware, so we always want the
|
||||
|
@ -108,8 +108,28 @@ void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region
|
||||
region->end = res->end - offset;
|
||||
}
|
||||
|
||||
void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
unsigned long offset = 0;
|
||||
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
||||
|
||||
if (!hose)
|
||||
return;
|
||||
|
||||
if (res->flags & IORESOURCE_IO)
|
||||
offset = (unsigned long)hose->io_base_virt - pci_io_base;
|
||||
|
||||
if (res->flags & IORESOURCE_MEM)
|
||||
offset = hose->pci_mem_offset;
|
||||
|
||||
res->start = region->start + offset;
|
||||
res->end = region->end + offset;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
EXPORT_SYMBOL(pcibios_resource_to_bus);
|
||||
EXPORT_SYMBOL(pcibios_bus_to_resource);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -413,6 +413,12 @@ static int pci_assign_bus_resource(const struct pci_bus *bus,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
|
||||
{
|
||||
/* Not implemented for sparc64... */
|
||||
BUG();
|
||||
}
|
||||
|
||||
int pci_assign_resource(struct pci_dev *pdev, int resource)
|
||||
{
|
||||
struct pcidev_cookie *pcp = pdev->sysdata;
|
||||
|
@ -1209,6 +1209,7 @@ static int rtc_proc_open(struct inode *inode, struct file *file)
|
||||
|
||||
void rtc_get_rtc_time(struct rtc_time *rtc_tm)
|
||||
{
|
||||
unsigned long uip_watchdog = jiffies;
|
||||
unsigned char ctrl;
|
||||
#ifdef CONFIG_MACH_DECSTATION
|
||||
unsigned int real_year;
|
||||
@ -1224,8 +1225,10 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
|
||||
* Once the read clears, read the RTC time (again via ioctl). Easy.
|
||||
*/
|
||||
|
||||
if (rtc_is_updating() != 0)
|
||||
msleep(20);
|
||||
while (rtc_is_updating() != 0 && jiffies - uip_watchdog < 2*HZ/100) {
|
||||
barrier();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Only the values that we read from the RTC are set. We leave
|
||||
|
@ -221,6 +221,37 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
|
||||
return best;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
|
||||
* @dev: PCI device to have its BARs restored
|
||||
*
|
||||
* Restore the BAR values for a given device, so as to make it
|
||||
* accessible by its driver.
|
||||
*/
|
||||
void
|
||||
pci_restore_bars(struct pci_dev *dev)
|
||||
{
|
||||
int i, numres;
|
||||
|
||||
switch (dev->hdr_type) {
|
||||
case PCI_HEADER_TYPE_NORMAL:
|
||||
numres = 6;
|
||||
break;
|
||||
case PCI_HEADER_TYPE_BRIDGE:
|
||||
numres = 2;
|
||||
break;
|
||||
case PCI_HEADER_TYPE_CARDBUS:
|
||||
numres = 1;
|
||||
break;
|
||||
default:
|
||||
/* Should never get here, but just in case... */
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < numres; i ++)
|
||||
pci_update_resource(dev, &dev->resource[i], i);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_set_power_state - Set the power state of a PCI device
|
||||
* @dev: PCI device to be suspended
|
||||
@ -239,7 +270,7 @@ int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t);
|
||||
int
|
||||
pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||
{
|
||||
int pm;
|
||||
int pm, need_restore = 0;
|
||||
u16 pmcsr, pmc;
|
||||
|
||||
/* bound the state we're entering */
|
||||
@ -278,14 +309,17 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
|
||||
|
||||
/* If we're in D3, force entire word to 0.
|
||||
* This doesn't affect PME_Status, disables PME_En, and
|
||||
* sets PowerState to 0.
|
||||
*/
|
||||
if (dev->current_state >= PCI_D3hot)
|
||||
if (dev->current_state >= PCI_D3hot) {
|
||||
if (!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
|
||||
need_restore = 1;
|
||||
pmcsr = 0;
|
||||
else {
|
||||
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
|
||||
} else {
|
||||
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
|
||||
pmcsr |= state;
|
||||
}
|
||||
@ -308,6 +342,22 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||
platform_pci_set_power_state(dev, state);
|
||||
|
||||
dev->current_state = state;
|
||||
|
||||
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
|
||||
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
|
||||
* from D3hot to D0 _may_ perform an internal reset, thereby
|
||||
* going to "D0 Uninitialized" rather than "D0 Initialized".
|
||||
* For example, at least some versions of the 3c905B and the
|
||||
* 3c556B exhibit this behaviour.
|
||||
*
|
||||
* At least some laptop BIOSen (e.g. the Thinkpad T21) leave
|
||||
* devices in a D3hot state at boot. Consequently, we need to
|
||||
* restore at least the BARs so that the device will be
|
||||
* accessible to its driver.
|
||||
*/
|
||||
if (need_restore)
|
||||
pci_restore_bars(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -805,6 +855,7 @@ struct pci_dev *isa_bridge;
|
||||
EXPORT_SYMBOL(isa_bridge);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL_GPL(pci_restore_bars);
|
||||
EXPORT_SYMBOL(pci_enable_device_bars);
|
||||
EXPORT_SYMBOL(pci_enable_device);
|
||||
EXPORT_SYMBOL(pci_disable_device);
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "pci.h"
|
||||
|
||||
|
||||
static void
|
||||
void
|
||||
pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
|
||||
{
|
||||
struct pci_bus_region region;
|
||||
|
@ -605,9 +605,8 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
|
||||
|
||||
static void yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end)
|
||||
{
|
||||
struct pci_bus *bus;
|
||||
struct resource *root, *res;
|
||||
u32 start, end;
|
||||
struct pci_bus_region region;
|
||||
unsigned mask;
|
||||
|
||||
res = socket->dev->resource + PCI_BRIDGE_RESOURCES + nr;
|
||||
@ -620,15 +619,13 @@ static void yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned typ
|
||||
if (type & IORESOURCE_IO)
|
||||
mask = ~3;
|
||||
|
||||
bus = socket->dev->subordinate;
|
||||
res->name = bus->name;
|
||||
res->name = socket->dev->subordinate->name;
|
||||
res->flags = type;
|
||||
|
||||
start = config_readl(socket, addr_start) & mask;
|
||||
end = config_readl(socket, addr_end) | ~mask;
|
||||
if (start && end > start && !override_bios) {
|
||||
res->start = start;
|
||||
res->end = end;
|
||||
region.start = config_readl(socket, addr_start) & mask;
|
||||
region.end = config_readl(socket, addr_end) | ~mask;
|
||||
if (region.start && region.end > region.start && !override_bios) {
|
||||
pcibios_bus_to_resource(socket->dev, res, ®ion);
|
||||
root = pci_find_parent_resource(socket->dev, res);
|
||||
if (root && (request_resource(root, res) == 0))
|
||||
return;
|
||||
|
@ -527,7 +527,7 @@ show_periodic (struct class_device *class_dev, char *buf)
|
||||
p.qh->period,
|
||||
le32_to_cpup (&p.qh->hw_info2)
|
||||
/* uframe masks */
|
||||
& 0xffff,
|
||||
& (QH_CMASK | QH_SMASK),
|
||||
p.qh);
|
||||
size -= temp;
|
||||
next += temp;
|
||||
|
@ -222,7 +222,7 @@ __acquires(ehci->lock)
|
||||
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
|
||||
|
||||
/* S-mask in a QH means it's an interrupt urb */
|
||||
if ((qh->hw_info2 & __constant_cpu_to_le32 (0x00ff)) != 0) {
|
||||
if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) {
|
||||
|
||||
/* ... update hc-wide periodic stats (for usbfs) */
|
||||
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
|
||||
@ -428,7 +428,8 @@ halt:
|
||||
/* should be rare for periodic transfers,
|
||||
* except maybe high bandwidth ...
|
||||
*/
|
||||
if (qh->period) {
|
||||
if ((__constant_cpu_to_le32 (QH_SMASK)
|
||||
& qh->hw_info2) != 0) {
|
||||
intr_deschedule (ehci, qh);
|
||||
(void) qh_schedule (ehci, qh);
|
||||
} else
|
||||
|
@ -301,7 +301,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
|
||||
|
||||
dev_dbg (&qh->dev->dev,
|
||||
"link qh%d-%04x/%p start %d [%d/%d us]\n",
|
||||
period, le32_to_cpup (&qh->hw_info2) & 0xffff,
|
||||
period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
|
||||
qh, qh->start, qh->usecs, qh->c_usecs);
|
||||
|
||||
/* high bandwidth, or otherwise every microframe */
|
||||
@ -385,7 +385,8 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
|
||||
|
||||
dev_dbg (&qh->dev->dev,
|
||||
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
|
||||
qh->period, le32_to_cpup (&qh->hw_info2) & 0xffff,
|
||||
qh->period,
|
||||
le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
|
||||
qh, qh->start, qh->usecs, qh->c_usecs);
|
||||
|
||||
/* qh->qh_next still "live" to HC */
|
||||
@ -411,7 +412,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
|
||||
* active high speed queues may need bigger delays...
|
||||
*/
|
||||
if (list_empty (&qh->qtd_list)
|
||||
|| (__constant_cpu_to_le32 (0x0ff << 8)
|
||||
|| (__constant_cpu_to_le32 (QH_CMASK)
|
||||
& qh->hw_info2) != 0)
|
||||
wait = 2;
|
||||
else
|
||||
@ -533,7 +534,7 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
|
||||
|
||||
/* reuse the previous schedule slots, if we can */
|
||||
if (frame < qh->period) {
|
||||
uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
|
||||
uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK);
|
||||
status = check_intr_schedule (ehci, frame, --uframe,
|
||||
qh, &c_mask);
|
||||
} else {
|
||||
@ -569,10 +570,10 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
|
||||
qh->start = frame;
|
||||
|
||||
/* reset S-frame and (maybe) C-frame masks */
|
||||
qh->hw_info2 &= __constant_cpu_to_le32 (~0xffff);
|
||||
qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
|
||||
qh->hw_info2 |= qh->period
|
||||
? cpu_to_le32 (1 << uframe)
|
||||
: __constant_cpu_to_le32 (0xff);
|
||||
: __constant_cpu_to_le32 (QH_SMASK);
|
||||
qh->hw_info2 |= c_mask;
|
||||
} else
|
||||
ehci_dbg (ehci, "reused qh %p schedule\n", qh);
|
||||
|
@ -385,6 +385,11 @@ struct ehci_qh {
|
||||
__le32 hw_info1; /* see EHCI 3.6.2 */
|
||||
#define QH_HEAD 0x00008000
|
||||
__le32 hw_info2; /* see EHCI 3.6.2 */
|
||||
#define QH_SMASK 0x000000ff
|
||||
#define QH_CMASK 0x0000ff00
|
||||
#define QH_HUBADDR 0x007f0000
|
||||
#define QH_HUBPORT 0x3f800000
|
||||
#define QH_MULT 0xc0000000
|
||||
__le32 hw_current; /* qtd list - see EHCI 3.6.4 */
|
||||
|
||||
/* qtd overlay (hardware parts of a struct ehci_qtd) */
|
||||
|
@ -229,9 +229,11 @@ static void preproc_atl_queue(struct isp116x *isp116x)
|
||||
struct isp116x_ep *ep;
|
||||
struct urb *urb;
|
||||
struct ptd *ptd;
|
||||
u16 toggle = 0, dir = PTD_DIR_SETUP, len;
|
||||
u16 len;
|
||||
|
||||
for (ep = isp116x->atl_active; ep; ep = ep->active) {
|
||||
u16 toggle = 0, dir = PTD_DIR_SETUP;
|
||||
|
||||
BUG_ON(list_empty(&ep->hep->urb_list));
|
||||
urb = container_of(ep->hep->urb_list.next,
|
||||
struct urb, urb_list);
|
||||
|
@ -9,9 +9,8 @@ config USB_MON
|
||||
help
|
||||
If you say Y here, a component which captures the USB traffic
|
||||
between peripheral-specific drivers and HC drivers will be built.
|
||||
The USB_MON is similar in spirit and may be compatible with Dave
|
||||
Harding's USBMon.
|
||||
For more information, see <file:Documentation/usb/usbmon.txt>.
|
||||
|
||||
This is somewhat experimental at this time, but it should be safe,
|
||||
as long as you aren't using modular USB and try to remove this
|
||||
module.
|
||||
This is somewhat experimental at this time, but it should be safe.
|
||||
|
||||
If unsure, say Y.
|
||||
|
@ -4,4 +4,5 @@
|
||||
|
||||
usbmon-objs := mon_main.o mon_stat.o mon_text.o
|
||||
|
||||
# This does not use CONFIG_USB_MON because we want this to use a tristate.
|
||||
obj-$(CONFIG_USB) += usbmon.o
|
||||
|
@ -1874,14 +1874,9 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
|
||||
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
|
||||
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
|
||||
#if defined(CONFIG_INOTIFY) || defined(CONFIG_DNOTIFY)
|
||||
dget(dentry);
|
||||
struct inode *inode = dentry->d_inode;
|
||||
d_delete(dentry);
|
||||
fsnotify_unlink(dentry, dir);
|
||||
dput(dentry);
|
||||
#else
|
||||
d_delete(dentry);
|
||||
#endif
|
||||
fsnotify_unlink(dentry, inode, dir);
|
||||
}
|
||||
|
||||
return error;
|
||||
|
@ -251,6 +251,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *,
|
||||
struct resource *);
|
||||
|
||||
extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region);
|
||||
|
||||
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
|
||||
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
|
@ -60,6 +60,10 @@ extern void
|
||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
struct resource *res);
|
||||
|
||||
extern void
|
||||
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region);
|
||||
|
||||
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
|
||||
{
|
||||
}
|
||||
|
@ -22,6 +22,14 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
region->end = res->end;
|
||||
}
|
||||
|
||||
static inline void
|
||||
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region)
|
||||
{
|
||||
res->start = region->start;
|
||||
res->end = region->end;
|
||||
}
|
||||
|
||||
#define pcibios_scan_all_fns(a, b) 0
|
||||
|
||||
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
|
||||
|
@ -253,6 +253,10 @@ extern void
|
||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
struct resource *res);
|
||||
|
||||
extern void
|
||||
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region);
|
||||
|
||||
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
|
||||
{
|
||||
}
|
||||
|
@ -105,6 +105,10 @@ extern void
|
||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
struct resource *res);
|
||||
|
||||
extern void
|
||||
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region);
|
||||
|
||||
extern void pcibios_add_platform_entries(struct pci_dev *dev);
|
||||
|
||||
struct file;
|
||||
|
@ -134,6 +134,10 @@ extern void
|
||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
struct resource *res);
|
||||
|
||||
extern void
|
||||
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
|
||||
struct pci_bus_region *region);
|
||||
|
||||
extern int
|
||||
unmap_bus_range(struct pci_bus *bus);
|
||||
|
||||
|
@ -46,10 +46,8 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
|
||||
/*
|
||||
* fsnotify_unlink - file was unlinked
|
||||
*/
|
||||
static inline void fsnotify_unlink(struct dentry *dentry, struct inode *dir)
|
||||
static inline void fsnotify_unlink(struct dentry *dentry, struct inode *inode, struct inode *dir)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
|
||||
inode_dir_notify(dir, DN_DELETE);
|
||||
inotify_inode_queue_event(dir, IN_DELETE, 0, dentry->d_name.name);
|
||||
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL);
|
||||
|
@ -225,6 +225,7 @@
|
||||
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
|
||||
#define PCI_PM_CTRL 4 /* PM control and status register */
|
||||
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
|
||||
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
|
||||
#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
|
||||
#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
|
||||
#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
|
||||
@ -816,7 +817,9 @@ int pci_set_mwi(struct pci_dev *dev);
|
||||
void pci_clear_mwi(struct pci_dev *dev);
|
||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
|
||||
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
|
||||
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
|
||||
int pci_assign_resource(struct pci_dev *dev, int i);
|
||||
void pci_restore_bars(struct pci_dev *dev);
|
||||
|
||||
/* ROM control related routines */
|
||||
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size);
|
||||
|
@ -143,7 +143,11 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
|
||||
leave 3% of the size of this process for other processes */
|
||||
allowed -= current->mm->total_vm / 32;
|
||||
|
||||
if (atomic_read(&vm_committed_space) < allowed)
|
||||
/*
|
||||
* cast `allowed' as a signed long because vm_committed_space
|
||||
* sometimes has a negative value
|
||||
*/
|
||||
if (atomic_read(&vm_committed_space) < (long)allowed)
|
||||
return 0;
|
||||
|
||||
vm_unacct_memory(pages);
|
||||
|
@ -1167,7 +1167,11 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
|
||||
leave 3% of the size of this process for other processes */
|
||||
allowed -= current->mm->total_vm / 32;
|
||||
|
||||
if (atomic_read(&vm_committed_space) < allowed)
|
||||
/*
|
||||
* cast `allowed' as a signed long because vm_committed_space
|
||||
* sometimes has a negative value
|
||||
*/
|
||||
if (atomic_read(&vm_committed_space) < (long)allowed)
|
||||
return 0;
|
||||
|
||||
vm_unacct_memory(pages);
|
||||
|
@ -403,11 +403,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
|
||||
sk->sk_send_head = skb;
|
||||
}
|
||||
|
||||
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
|
||||
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (skb->len <= tp->mss_cache ||
|
||||
if (skb->len <= mss_now ||
|
||||
!(sk->sk_route_caps & NETIF_F_TSO)) {
|
||||
/* Avoid the costly divide in the normal
|
||||
* non-TSO case.
|
||||
@ -417,10 +415,10 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
|
||||
} else {
|
||||
unsigned int factor;
|
||||
|
||||
factor = skb->len + (tp->mss_cache - 1);
|
||||
factor /= tp->mss_cache;
|
||||
factor = skb->len + (mss_now - 1);
|
||||
factor /= mss_now;
|
||||
skb_shinfo(skb)->tso_segs = factor;
|
||||
skb_shinfo(skb)->tso_size = tp->mss_cache;
|
||||
skb_shinfo(skb)->tso_size = mss_now;
|
||||
}
|
||||
}
|
||||
|
||||
@ -429,7 +427,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
|
||||
* packet to the list. This won't be called frequently, I hope.
|
||||
* Remember, these are still headerless SKBs at this point.
|
||||
*/
|
||||
static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
|
||||
static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *buff;
|
||||
@ -492,8 +490,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
|
||||
}
|
||||
|
||||
/* Fix up tso_factor for both original and new SKB. */
|
||||
tcp_set_skb_tso_segs(sk, skb);
|
||||
tcp_set_skb_tso_segs(sk, buff);
|
||||
tcp_set_skb_tso_segs(sk, skb, mss_now);
|
||||
tcp_set_skb_tso_segs(sk, buff, mss_now);
|
||||
|
||||
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
|
||||
tp->lost_out += tcp_skb_pcount(skb);
|
||||
@ -569,7 +567,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
|
||||
* factor and mss.
|
||||
*/
|
||||
if (tcp_skb_pcount(skb) > 1)
|
||||
tcp_set_skb_tso_segs(sk, skb);
|
||||
tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -734,12 +732,14 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
|
||||
/* This must be invoked the first time we consider transmitting
|
||||
* SKB onto the wire.
|
||||
*/
|
||||
static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb)
|
||||
static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
|
||||
{
|
||||
int tso_segs = tcp_skb_pcount(skb);
|
||||
|
||||
if (!tso_segs) {
|
||||
tcp_set_skb_tso_segs(sk, skb);
|
||||
if (!tso_segs ||
|
||||
(tso_segs > 1 &&
|
||||
skb_shinfo(skb)->tso_size != mss_now)) {
|
||||
tcp_set_skb_tso_segs(sk, skb, mss_now);
|
||||
tso_segs = tcp_skb_pcount(skb);
|
||||
}
|
||||
return tso_segs;
|
||||
@ -817,7 +817,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
unsigned int cwnd_quota;
|
||||
|
||||
tcp_init_tso_segs(sk, skb);
|
||||
tcp_init_tso_segs(sk, skb, cur_mss);
|
||||
|
||||
if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
|
||||
return 0;
|
||||
@ -854,7 +854,7 @@ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
|
||||
* know that all the data is in scatter-gather pages, and that the
|
||||
* packet has never been sent out before (and thus is not cloned).
|
||||
*/
|
||||
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len)
|
||||
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
|
||||
{
|
||||
struct sk_buff *buff;
|
||||
int nlen = skb->len - len;
|
||||
@ -887,8 +887,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len)
|
||||
skb_split(skb, buff, len);
|
||||
|
||||
/* Fix up tso_factor for both original and new SKB. */
|
||||
tcp_set_skb_tso_segs(sk, skb);
|
||||
tcp_set_skb_tso_segs(sk, buff);
|
||||
tcp_set_skb_tso_segs(sk, skb, mss_now);
|
||||
tcp_set_skb_tso_segs(sk, buff, mss_now);
|
||||
|
||||
/* Link BUFF into the send queue. */
|
||||
skb_header_release(buff);
|
||||
@ -972,19 +972,18 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
|
||||
if (unlikely(sk->sk_state == TCP_CLOSE))
|
||||
return 0;
|
||||
|
||||
skb = sk->sk_send_head;
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
tso_segs = tcp_init_tso_segs(sk, skb);
|
||||
cwnd_quota = tcp_cwnd_test(tp, skb);
|
||||
if (unlikely(!cwnd_quota))
|
||||
goto out;
|
||||
|
||||
sent_pkts = 0;
|
||||
while (likely(tcp_snd_wnd_test(tp, skb, mss_now))) {
|
||||
while ((skb = sk->sk_send_head)) {
|
||||
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
|
||||
BUG_ON(!tso_segs);
|
||||
|
||||
cwnd_quota = tcp_cwnd_test(tp, skb);
|
||||
if (!cwnd_quota)
|
||||
break;
|
||||
|
||||
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
|
||||
break;
|
||||
|
||||
if (tso_segs == 1) {
|
||||
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
|
||||
(tcp_skb_is_last(sk, skb) ?
|
||||
@ -1006,11 +1005,11 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
|
||||
limit = skb->len - trim;
|
||||
}
|
||||
if (skb->len > limit) {
|
||||
if (tso_fragment(sk, skb, limit))
|
||||
if (tso_fragment(sk, skb, limit, mss_now))
|
||||
break;
|
||||
}
|
||||
} else if (unlikely(skb->len > mss_now)) {
|
||||
if (unlikely(tcp_fragment(sk, skb, mss_now)))
|
||||
if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1026,27 +1025,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
|
||||
|
||||
tcp_minshall_update(tp, mss_now, skb);
|
||||
sent_pkts++;
|
||||
|
||||
/* Do not optimize this to use tso_segs. If we chopped up
|
||||
* the packet above, tso_segs will no longer be valid.
|
||||
*/
|
||||
cwnd_quota -= tcp_skb_pcount(skb);
|
||||
|
||||
BUG_ON(cwnd_quota < 0);
|
||||
if (!cwnd_quota)
|
||||
break;
|
||||
|
||||
skb = sk->sk_send_head;
|
||||
if (!skb)
|
||||
break;
|
||||
tso_segs = tcp_init_tso_segs(sk, skb);
|
||||
}
|
||||
|
||||
if (likely(sent_pkts)) {
|
||||
tcp_cwnd_validate(sk, tp);
|
||||
return 0;
|
||||
}
|
||||
out:
|
||||
return !tp->packets_out && sk->sk_send_head;
|
||||
}
|
||||
|
||||
@ -1076,7 +1060,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
|
||||
|
||||
BUG_ON(!skb || skb->len < mss_now);
|
||||
|
||||
tso_segs = tcp_init_tso_segs(sk, skb);
|
||||
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
|
||||
cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
|
||||
|
||||
if (likely(cwnd_quota)) {
|
||||
@ -1093,11 +1077,11 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
|
||||
limit = skb->len - trim;
|
||||
}
|
||||
if (skb->len > limit) {
|
||||
if (unlikely(tso_fragment(sk, skb, limit)))
|
||||
if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
|
||||
return;
|
||||
}
|
||||
} else if (unlikely(skb->len > mss_now)) {
|
||||
if (unlikely(tcp_fragment(sk, skb, mss_now)))
|
||||
if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1388,7 +1372,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
||||
int old_factor = tcp_skb_pcount(skb);
|
||||
int new_factor;
|
||||
|
||||
if (tcp_fragment(sk, skb, cur_mss))
|
||||
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
|
||||
return -ENOMEM; /* We'll try again later. */
|
||||
|
||||
/* New SKB created, account for it. */
|
||||
@ -1991,7 +1975,7 @@ int tcp_write_wakeup(struct sock *sk)
|
||||
skb->len > mss) {
|
||||
seg_size = min(seg_size, mss);
|
||||
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
||||
if (tcp_fragment(sk, skb, seg_size))
|
||||
if (tcp_fragment(sk, skb, seg_size, mss))
|
||||
return -1;
|
||||
/* SWS override triggered forced fragmentation.
|
||||
* Disable TSO, the connection is too sick. */
|
||||
@ -2000,7 +1984,7 @@ int tcp_write_wakeup(struct sock *sk)
|
||||
sk->sk_route_caps &= ~NETIF_F_TSO;
|
||||
}
|
||||
} else if (!tcp_skb_pcount(skb))
|
||||
tcp_set_skb_tso_segs(sk, skb);
|
||||
tcp_set_skb_tso_segs(sk, skb, mss);
|
||||
|
||||
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
||||
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
||||
|
Loading…
Reference in New Issue
Block a user