These changes contain the OMAP USB related platform data changes

that were dropped from linux next because of the merge conflicts
 as requested by me and Olof. The reason was that at this point
 we really should be able to do the arch/arm related changes
 separately from driver changes to avoid dependencies between
 branches.
 
 These patches were initially part of the USB related MFD patches.
 Based on our comments, Roger Quadros quickly reworked these
 patches into a shared branch between ARM SoC tree and the MFD
 tree, then separate patches for the OMAP platform data and
 MFD driver.
 
 Note that this branch will conflict with c1d1cd597f
 ("ARM: OMAP2+: omap_device: remove obsolete pm_lats and
 early_device code"). Please see http://lkml.org/lkml/2013/2/11/16
 for the merge resolution.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJRHR36AAoJEBvUPslcq6Vz6hEQAIIzgO4Rwf55/nMfRyMWqNEF
 bM5h8KmMmix0M+MIrCigqciXzEmOY0y95ssQJcdxUoDl3kCpkHfcrxDTQmf6ehGD
 cMxYkeYVB9JYDuYr3HUvqv7MvDQIge11Sh1Yfj1YG7UYHNDh5OP+4DBtSGByIhHf
 clQ9I3c74EBxqEzyUWlE7BOYH6+WEQ2yh4RzzDnSgiQcyRn25jWd0RpXoroE2XQb
 ZsW1wZtpFqsMfHsPrIdHm5KLsgn9tSvWZ+NEUlXVqnNumdl3zaxX65fOGNWGjAax
 A8JRpntvwcGGl6XV54mCVV/yuStuA7kdDn9ABnghUFy8ZzgRA2hvJFe0Kc5lUgO1
 RRKQ4+fsFyPYDyvMmo4tYsNOXxW+DryHSxvhUICHqjKvXyWEUSTHLJ3vHIvq2zjZ
 cYV+opeZkF6ddwUhPhIVYUmFDfZeCriqOUlVnQYLxqAzRSTEk5buNW58WtX4ZNA1
 f6k1xlepsLCvuMAr8IGZyWnM6SH3k+/f0Hs9Pl8mijV/nNSaF8JW4k9NVPnFwT9Q
 PcwVhT23mLkKLr5ab03EEzCs/2a3VTkmUbQm70tC2FTtMQTwmHCpg+WQMf+aug6e
 8++A6V+GqZ6ggW5rV95jJsJ39hBAAK0lCld+fNZ5MjQgMwlAe+mdl6FrQWib1TFa
 obIzQnfifyPjomrRAdu5
 =svW/
 -----END PGP SIGNATURE-----

Merge tag 'omap-for-v3.9/usb-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/soc

These changes contain the OMAP USB related platform data changes
that were dropped from linux next because of the merge conflicts
as requested by me and Olof. The reason was that at this point
we really should be able to do the arch/arm related changes
separately from driver changes to avoid dependencies between
branches.

These patches were initially part of the USB related MFD patches.
Based on our comments, Roger Quadros quickly reworked these
patches into a shared branch between ARM SoC tree and the MFD
tree, then separate patches for the OMAP platform data and
MFD driver.

Note that this branch will conflict with c1d1cd597f
("ARM: OMAP2+: omap_device: remove obsolete pm_lats and
early_device code"). Please see http://lkml.org/lkml/2013/2/11/16
for the merge resolution.

[arnd - resolved the merge conflict]

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2013-02-19 20:54:15 +01:00
commit 6e7f7cfce2
217 changed files with 1703 additions and 1116 deletions

View File

@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
real-time workloads. It can also improve energy
efficiency for asymmetric multiprocessors.
rcu_nocbs_poll [KNL,BOOT]
rcu_nocb_poll [KNL,BOOT]
Rather than requiring that offloaded CPUs
(specified by rcu_nocbs= above) explicitly
awaken the corresponding "rcuoN" kthreads,

View File

@ -57,7 +57,7 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
protocol entry point.
Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields
Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
to struct boot_params for for loading bzImage and ramdisk
above 4G in 64bit.

View File

@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://mirror.egtvedt.no/avr32linux.org/
W: http://avrfreaks.net/
S: Maintained
F: arch/avr32/

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Unicycling Gorilla
# *DOCUMENTATION*

View File

@ -37,7 +37,7 @@
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.

View File

@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
select CPU_EXYNOS4210
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
select PINCTRL
select PINCTRL_EXYNOS4
select PINCTRL_EXYNOS
select USE_OF
help
Machine support for Samsung Exynos4 machine with device tree enabled.

View File

@ -424,7 +424,7 @@ static void enable_board_wakeup_source(void)
OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
}
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,

View File

@ -53,7 +53,7 @@ static void enable_board_wakeup_source(void)
OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
}
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,

View File

@ -40,7 +40,7 @@ static struct omap_board_mux board_mux[] __initdata = {
};
#endif
static struct usbhs_omap_board_data usbhs_bdata __initdata = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -274,7 +274,7 @@ static __init void am3517_evm_mcbsp1_init(void)
omap_ctrl_writel(devconf0, OMAP2_CONTROL_DEVCONF0);
}
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)

View File

@ -418,7 +418,7 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
static struct usbhs_omap_board_data usbhs_bdata __initdata = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -166,7 +166,7 @@ static inline void cm_t3517_init_rtc(void) {}
#define HSUSB2_RESET_GPIO (147)
#define USB_HUB_RESET_GPIO (152)
static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = {
static struct usbhs_omap_platform_data cm_t3517_ehci_pdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -435,7 +435,7 @@ static struct platform_device *devkit8000_devices[] __initdata = {
&omap_dm9000_dev,
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -526,7 +526,7 @@ static void __init igep_i2c_init(void)
omap3_pmic_init("twl4030", &igep_twldata);
}
static const struct usbhs_omap_board_data igep2_usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data igep2_usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
@ -537,7 +537,7 @@ static const struct usbhs_omap_board_data igep2_usbhs_bdata __initconst = {
.reset_gpio_port[2] = -EINVAL,
};
static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data igep3_usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -430,7 +430,7 @@ static struct platform_device *omap3_beagle_devices[] __initdata = {
&madc_hwmon,
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,

View File

@ -538,7 +538,7 @@ static int __init omap3_evm_i2c_init(void)
return 0;
}
static struct usbhs_omap_board_data usbhs_bdata __initdata = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,

View File

@ -567,7 +567,7 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
&pandora_backlight,
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,

View File

@ -361,7 +361,7 @@ static struct platform_device *omap3_stalker_devices[] __initdata = {
&keys_gpio,
};
static struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -309,7 +309,7 @@ static struct platform_device *omap3_touchbook_devices[] __initdata = {
&keys_gpio,
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,

View File

@ -139,7 +139,7 @@ static struct platform_device *panda_devices[] __initdata = {
&btwilink_device,
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -457,7 +457,7 @@ static int __init overo_spi_init(void)
return 0;
}
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -92,7 +92,7 @@ static struct mtd_partition zoom_nand_partitions[] = {
},
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,

View File

@ -37,11 +37,6 @@
#define USBHS_UHH_HWMODNAME "usb_host_hs"
#define USBHS_TLL_HWMODNAME "usb_tll_hs"
static struct usbhs_omap_platform_data usbhs_data;
static struct usbtll_omap_platform_data usbtll_data;
static struct ehci_hcd_omap_platform_data ehci_data;
static struct ohci_hcd_omap_platform_data ohci_data;
/* MUX settings for EHCI pins */
/*
* setup_ehci_io_mux - initialize IO pad mux for USBHOST
@ -477,32 +472,18 @@ void __init setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
}
}
void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
void __init usbhs_init(struct usbhs_omap_platform_data *pdata)
{
struct omap_hwmod *uhh_hwm, *tll_hwm;
struct platform_device *pdev;
int bus_id = -1;
int i;
for (i = 0; i < OMAP3_HS_USB_PORTS; i++) {
usbhs_data.port_mode[i] = pdata->port_mode[i];
usbtll_data.port_mode[i] = pdata->port_mode[i];
ohci_data.port_mode[i] = pdata->port_mode[i];
ehci_data.port_mode[i] = pdata->port_mode[i];
ehci_data.reset_gpio_port[i] = pdata->reset_gpio_port[i];
ehci_data.regulator[i] = pdata->regulator[i];
}
ehci_data.phy_reset = pdata->phy_reset;
ohci_data.es2_compatibility = pdata->es2_compatibility;
usbhs_data.ehci_data = &ehci_data;
usbhs_data.ohci_data = &ohci_data;
if (cpu_is_omap34xx()) {
setup_ehci_io_mux(pdata->port_mode);
setup_ohci_io_mux(pdata->port_mode);
if (omap_rev() <= OMAP3430_REV_ES2_1)
usbhs_data.single_ulpi_bypass = true;
pdata->single_ulpi_bypass = true;
} else if (cpu_is_omap44xx()) {
setup_4430ehci_io_mux(pdata->port_mode);
@ -522,7 +503,7 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
}
pdev = omap_device_build(OMAP_USBTLL_DEVICE, bus_id, tll_hwm,
&usbtll_data, sizeof(usbtll_data));
pdata, sizeof(*pdata));
if (IS_ERR(pdev)) {
pr_err("Could not build hwmod device %s\n",
USBHS_TLL_HWMODNAME);
@ -530,7 +511,7 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
}
pdev = omap_device_build(OMAP_USBHS_DEVICE, bus_id, uhh_hwm,
&usbhs_data, sizeof(usbhs_data));
pdata, sizeof(*pdata));
if (IS_ERR(pdev)) {
pr_err("Could not build hwmod devices %s\n",
USBHS_UHH_HWMODNAME);
@ -540,7 +521,7 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
#else
void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
void __init usbhs_init(struct usbhs_omap_platform_data *pdata)
{
}

View File

@ -53,26 +53,8 @@
#define USBPHY_OTGSESSEND_EN (1 << 20)
#define USBPHY_DATA_POLARITY (1 << 23)
struct usbhs_omap_board_data {
enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
/* have to be valid if phy_reset is true and portx is in phy mode */
int reset_gpio_port[OMAP3_HS_USB_PORTS];
/* Set this to true for ES2.x silicon */
unsigned es2_compatibility:1;
unsigned phy_reset:1;
/*
* Regulators for USB PHYs.
* Each PHY can have a separate regulator.
*/
struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
extern void usb_musb_init(struct omap_musb_board_data *board_data);
extern void usbhs_init(const struct usbhs_omap_board_data *pdata);
extern void usbhs_init(struct usbhs_omap_platform_data *pdata);
extern void am35x_musb_reset(void);
extern void am35x_musb_phy_power(u8 on);

View File

@ -115,7 +115,7 @@
/*
* Only define NR_IRQS if less than NR_IRQS_EB
*/
#define NR_IRQS_EB (IRQ_EB_GIC_START + 96)
#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
#if defined(CONFIG_MACH_REALVIEW_EB) \
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))

View File

@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (is_coherent || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (gfp & GFP_ATOMIC)
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);

View File

@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* __ASM_AVR32_DMA_MAPPING_H */

View File

@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
_dma_sync((dma_addr_t)vaddr, size, dir);
}
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* _BLACKFIN_DMA_MAPPING_H */

View File

@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _ASM_C6X_DMA_MAPPING_H */

View File

@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif

View File

@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _ASM_DMA_MAPPING_H */

View File

@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
#include <asm-generic/dma-mapping-broken.h>
#endif
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif /* _M68K_DMA_MAPPING_H */

View File

@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
mn10300_dcache_flush_inv();
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif

View File

@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
/* At the moment, we panic on error for IOMMU resource exaustion */
#define dma_mapping_error(dev, x) 0
/* This API cannot be supported on PA-RISC */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif

View File

@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
sldi r29,r5,SID_SHIFT - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
xor r28,r5,r0
/*
* Calculate hash value for primary slot and store it in r28
* r3 = va, r5 = vsid
* r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
*/
rldicl r0,r3,64-12,48
xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
* r3 = va, r5 = vsid
*/
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
sldi r28,r5,25 /* vsid << 25 */
/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
rldicl r0,r3,64-12,36
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
*/
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
xor r28,r5,r0
/*
* Calculate hash value for primary slot and store it in r28
* r3 = va, r5 = vsid
* r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
*/
rldicl r0,r3,64-12,48
xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/*
* Calculate hash value for primary slot and
* store it in r28 for 1T segment
* r3 = va, r5 = vsid
*/
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
sldi r28,r5,25 /* vsid << 25 */
/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
rldicl r0,r3,64-12,36
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
xor r28,r5,r0
/* Calculate hash value for primary slot and store it in r28
* r3 = va, r5 = vsid
* r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
*/
rldicl r0,r3,64-16,52
xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
or r29,r28,r29
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
* r3 = va, r5 = vsid
*/
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
xor r28,r28,r5
sldi r28,r5,25 /* vsid << 25 */
/* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
rldicl r0,r3,64-16,40
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */

View File

@ -207,7 +207,7 @@ sysexit_from_sys_call:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
sti
ENABLE_INTERRUPTS(CLBR_NONE)
movl %eax,%esi /* second arg, syscall return value */
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
jbe 1f
@ -217,7 +217,7 @@ sysexit_from_sys_call:
call __audit_syscall_exit
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
cli
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz \exit

View File

@ -298,8 +298,7 @@ struct _cache_attr {
unsigned int);
};
#ifdef CONFIG_AMD_NB
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
/*
* L3 cache descriptors
*/
@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
static struct _cache_attr subcaches =
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
#else /* CONFIG_AMD_NB */
#else
#define amd_init_l3_cache(x, y)
#endif /* CONFIG_AMD_NB */
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
static int
__cpuinit cpuid4_cache_lookup_regs(int index,

View File

@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
break;
case 28: /* Atom */
case 54: /* Cedariew */
case 38: /* Lincroft */
case 39: /* Penwell */
case 53: /* Cloverview */
case 54: /* Cedarview */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
case 58: /* IvyBridge */
case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,

View File

@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
};
static __initconst u64 p6_hw_cache_event_ids
static u64 p6_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =

View File

@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
static void usage(const char *err)
{
if (err)
fprintf(stderr, "Error: %s\n\n", err);
fprintf(stderr, "%s: Error: %s\n\n", prog, err);
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
@ -269,7 +269,13 @@ int main(int argc, char **argv)
insns++;
}
fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
prog,
(errors) ? "Failure" : "Success",
insns,
(input_file) ? "given" : "random",
errors,
seed);
return errors ? 1 : 0;
}

View File

@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
consistent_sync(vaddr, size, direction);
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _XTENSA_DMA_MAPPING_H */

View File

@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);
static struct device_type disk_type;
static void disk_check_events(struct disk_events *ev,
unsigned int *clearing_ptr);
static void disk_alloc_events(struct gendisk *disk);
static void disk_add_events(struct gendisk *disk);
static void disk_del_events(struct gendisk *disk);
@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
const struct block_device_operations *bdops = disk->fops;
struct disk_events *ev = disk->ev;
unsigned int pending;
unsigned int clearing = mask;
if (!ev) {
/* for drivers still using the old ->media_changed method */
@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
return 0;
}
/* tell the workfn about the events being cleared */
disk_block_events(disk);
/*
* store the union of mask and ev->clearing on the stack so that the
* race with disk_flush_events does not cause ambiguity (ev->clearing
* can still be modified even if events are blocked).
*/
spin_lock_irq(&ev->lock);
ev->clearing |= mask;
clearing |= ev->clearing;
ev->clearing = 0;
spin_unlock_irq(&ev->lock);
/* uncondtionally schedule event check and wait for it to finish */
disk_block_events(disk);
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);
disk_check_events(ev, &clearing);
/*
* if ev->clearing is not 0, the disk_flush_events got called in the
* middle of this function, so we want to run the workfn without delay.
*/
__disk_unblock_events(disk, ev->clearing ? true : false);
/* then, fetch and clear pending events */
spin_lock_irq(&ev->lock);
WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
pending = ev->pending & mask;
ev->pending &= ~mask;
spin_unlock_irq(&ev->lock);
WARN_ON_ONCE(clearing & mask);
return pending;
}
/*
* Separate this part out so that a different pointer for clearing_ptr can be
* passed in for disk_clear_events.
*/
static void disk_events_workfn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
disk_check_events(ev, &ev->clearing);
}
static void disk_check_events(struct disk_events *ev,
unsigned int *clearing_ptr)
{
struct gendisk *disk = ev->disk;
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
unsigned int clearing = ev->clearing;
unsigned int clearing = *clearing_ptr;
unsigned int events;
unsigned long intv;
int nr_events = 0, i;
@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)
events &= ~ev->pending;
ev->pending |= events;
ev->clearing &= ~clearing;
*clearing_ptr &= ~clearing;
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)

View File

@ -636,82 +636,82 @@ struct rx_buf_desc {
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
typedef volatile u_int freg_t;
typedef volatile u_int ffreg_t;
typedef u_int rreg_t;
typedef struct _ffredn_t {
freg_t idlehead_high; /* Idle cell header (high) */
freg_t idlehead_low; /* Idle cell header (low) */
freg_t maxrate; /* Maximum rate */
freg_t stparms; /* Traffic Management Parameters */
freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
freg_t rm_type; /* */
u_int filler5[0x17 - 0x06];
freg_t cmd_reg; /* Command register */
u_int filler18[0x20 - 0x18];
freg_t cbr_base; /* CBR Pointer Base */
freg_t vbr_base; /* VBR Pointer Base */
freg_t abr_base; /* ABR Pointer Base */
freg_t ubr_base; /* UBR Pointer Base */
u_int filler24;
freg_t vbrwq_base; /* VBR Wait Queue Base */
freg_t abrwq_base; /* ABR Wait Queue Base */
freg_t ubrwq_base; /* UBR Wait Queue Base */
freg_t vct_base; /* Main VC Table Base */
freg_t vcte_base; /* Extended Main VC Table Base */
u_int filler2a[0x2C - 0x2A];
freg_t cbr_tab_beg; /* CBR Table Begin */
freg_t cbr_tab_end; /* CBR Table End */
freg_t cbr_pointer; /* CBR Pointer */
u_int filler2f[0x30 - 0x2F];
freg_t prq_st_adr; /* Packet Ready Queue Start Address */
freg_t prq_ed_adr; /* Packet Ready Queue End Address */
freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
u_int filler38[0x40 - 0x38];
freg_t queue_base; /* Base address for PRQ and TCQ */
freg_t desc_base; /* Base address of descriptor table */
u_int filler42[0x45 - 0x42];
freg_t mode_reg_0; /* Mode register 0 */
freg_t mode_reg_1; /* Mode register 1 */
freg_t intr_status_reg;/* Interrupt Status register */
freg_t mask_reg; /* Mask Register */
freg_t cell_ctr_high1; /* Total cell transfer count (high) */
freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
freg_t state_reg; /* Status register */
u_int filler4c[0x58 - 0x4c];
freg_t curr_desc_num; /* Contains the current descriptor num */
freg_t next_desc; /* Next descriptor */
freg_t next_vc; /* Next VC */
u_int filler5b[0x5d - 0x5b];
freg_t present_slot_cnt;/* Present slot count */
u_int filler5e[0x6a - 0x5e];
freg_t new_desc_num; /* New descriptor number */
freg_t new_vc; /* New VC */
freg_t sched_tbl_ptr; /* Schedule table pointer */
freg_t vbrwq_wptr; /* VBR wait queue write pointer */
freg_t vbrwq_rptr; /* VBR wait queue read pointer */
freg_t abrwq_wptr; /* ABR wait queue write pointer */
freg_t abrwq_rptr; /* ABR wait queue read pointer */
freg_t ubrwq_wptr; /* UBR wait queue write pointer */
freg_t ubrwq_rptr; /* UBR wait queue read pointer */
freg_t cbr_vc; /* CBR VC */
freg_t vbr_sb_vc; /* VBR SB VC */
freg_t abr_sb_vc; /* ABR SB VC */
freg_t ubr_sb_vc; /* UBR SB VC */
freg_t vbr_next_link; /* VBR next link */
freg_t abr_next_link; /* ABR next link */
freg_t ubr_next_link; /* UBR next link */
u_int filler7a[0x7c-0x7a];
freg_t out_rate_head; /* Out of rate head */
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
ffreg_t idlehead_high; /* Idle cell header (high) */
ffreg_t idlehead_low; /* Idle cell header (low) */
ffreg_t maxrate; /* Maximum rate */
ffreg_t stparms; /* Traffic Management Parameters */
ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
ffreg_t rm_type; /* */
u_int filler5[0x17 - 0x06];
ffreg_t cmd_reg; /* Command register */
u_int filler18[0x20 - 0x18];
ffreg_t cbr_base; /* CBR Pointer Base */
ffreg_t vbr_base; /* VBR Pointer Base */
ffreg_t abr_base; /* ABR Pointer Base */
ffreg_t ubr_base; /* UBR Pointer Base */
u_int filler24;
ffreg_t vbrwq_base; /* VBR Wait Queue Base */
ffreg_t abrwq_base; /* ABR Wait Queue Base */
ffreg_t ubrwq_base; /* UBR Wait Queue Base */
ffreg_t vct_base; /* Main VC Table Base */
ffreg_t vcte_base; /* Extended Main VC Table Base */
u_int filler2a[0x2C - 0x2A];
ffreg_t cbr_tab_beg; /* CBR Table Begin */
ffreg_t cbr_tab_end; /* CBR Table End */
ffreg_t cbr_pointer; /* CBR Pointer */
u_int filler2f[0x30 - 0x2F];
ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
u_int filler38[0x40 - 0x38];
ffreg_t queue_base; /* Base address for PRQ and TCQ */
ffreg_t desc_base; /* Base address of descriptor table */
u_int filler42[0x45 - 0x42];
ffreg_t mode_reg_0; /* Mode register 0 */
ffreg_t mode_reg_1; /* Mode register 1 */
ffreg_t intr_status_reg;/* Interrupt Status register */
ffreg_t mask_reg; /* Mask Register */
ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
ffreg_t state_reg; /* Status register */
u_int filler4c[0x58 - 0x4c];
ffreg_t curr_desc_num; /* Contains the current descriptor num */
ffreg_t next_desc; /* Next descriptor */
ffreg_t next_vc; /* Next VC */
u_int filler5b[0x5d - 0x5b];
ffreg_t present_slot_cnt;/* Present slot count */
u_int filler5e[0x6a - 0x5e];
ffreg_t new_desc_num; /* New descriptor number */
ffreg_t new_vc; /* New VC */
ffreg_t sched_tbl_ptr; /* Schedule table pointer */
ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
ffreg_t cbr_vc; /* CBR VC */
ffreg_t vbr_sb_vc; /* VBR SB VC */
ffreg_t abr_sb_vc; /* ABR SB VC */
ffreg_t ubr_sb_vc; /* UBR SB VC */
ffreg_t vbr_next_link; /* VBR next link */
ffreg_t abr_next_link; /* ABR next link */
ffreg_t ubr_next_link; /* UBR next link */
u_int filler7a[0x7c-0x7a];
ffreg_t out_rate_head; /* Out of rate head */
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
} ffredn_t;
typedef struct _rfredn_t {

View File

@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#ifdef CONFIG_BCMA_DRIVER_GPIO
/* driver_gpio.c */
int bcma_gpio_init(struct bcma_drv_cc *cc);
int bcma_gpio_unregister(struct bcma_drv_cc *cc);
#else
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
{
return -ENOTSUPP;
}
static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
return 0;
}
#endif /* CONFIG_BCMA_DRIVER_GPIO */
#endif

View File

@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
cc->core->id.rev != 0x38) {
cc->core->id.rev != 38) {
bcma_err(bus, "NAND flash on unsupported board!\n");
return -ENOTSUPP;
}

View File

@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
return gpiochip_add(chip);
}
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
{
return gpiochip_remove(&cc->gpio);
}

View File

@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus)
{
struct bcma_device *cores[3];
int err;
err = bcma_gpio_unregister(&bus->drv_cc);
if (err == -EBUSY)
bcma_err(bus, "Some GPIOs are still in use.\n");
else if (err)
bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);

View File

@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
}
/* must hold resource->req_lock */
static void start_new_tl_epoch(struct drbd_tconn *tconn)
void start_new_tl_epoch(struct drbd_tconn *tconn)
{
/* no point closing an epoch, if it is empty, anyways. */
if (tconn->current_tle_writes == 0)

View File

@ -267,6 +267,7 @@ struct bio_and_error {
int error;
};
extern void start_new_tl_epoch(struct drbd_tconn *tconn);
extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);

View File

@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
enum drbd_state_rv rv = SS_SUCCESS;
enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
bool did_remote, should_do_remote;
os = drbd_read_state(mdev);
@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
atomic_inc(&mdev->local_cnt);
did_remote = drbd_should_do_remote(mdev->state);
mdev->state.i = ns.i;
should_do_remote = drbd_should_do_remote(mdev->state);
mdev->tconn->susp = ns.susp;
mdev->tconn->susp_nod = ns.susp_nod;
mdev->tconn->susp_fen = ns.susp_fen;
/* put replicated vs not-replicated requests in seperate epochs */
if (did_remote != should_do_remote)
start_new_tl_epoch(mdev->tconn);
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
drbd_print_uuids(mdev, "attached to UUIDs");

View File

@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
}
}
if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
mtip_restart_port(port);
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
mtip_restart_port(port);
wake_up_interruptible(&port->svc_wait);
}
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
}
if (port->ic_pause_timer) {
@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
* Delete our gendisk structure. This also removes the device
* from /dev
*/
del_gendisk(dd->disk);
if (dd->disk) {
if (dd->disk->queue)
del_gendisk(dd->disk);
else
put_disk(dd->disk);
}
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
"Shutting down %s ...\n", dd->disk->disk_name);
/* Delete our gendisk structure, and cleanup the blk queue. */
del_gendisk(dd->disk);
if (dd->disk) {
if (dd->disk->queue)
del_gendisk(dd->disk);
else
put_disk(dd->disk);
}
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);

View File

@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st);
#define foreach_grant(pos, rbtree, node) \
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
#define foreach_grant_safe(pos, n, rbtree, node) \
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
(n) = rb_next(&(pos)->node); \
&(pos)->node != NULL; \
(pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
(pos) = container_of(n, typeof(*(pos)), node), \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
static void add_persistent_gnt(struct rb_root *root,
@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
struct rb_node *n;
int ret = 0;
int segs_to_unmap = 0;
foreach_grant(persistent_gnt, root, node) {
foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
gnttab_set_unmap_op(&unmap[segs_to_unmap],
@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
persistent_gnt->handle);
pages[segs_to_unmap] = persistent_gnt->page;
rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt);
num--;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
BUG_ON(ret);
segs_to_unmap = 0;
}
rb_erase(&persistent_gnt->node, root);
kfree(persistent_gnt);
num--;
}
BUG_ON(num != 0);
}

View File

@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
{
struct llist_node *all_gnts;
struct grant *persistent_gnt;
struct llist_node *n;
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* Remove all persistent grants */
if (info->persistent_gnts_c) {
all_gnts = llist_del_all(&info->persistent_gnts);
llist_for_each_entry(persistent_gnt, all_gnts, node) {
llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
struct blkif_response *bret)
{
int i;
int i = 0;
struct bio_vec *bvec;
struct req_iterator iter;
unsigned long flags;
@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
*/
rq_for_each_segment(bvec, s->request, iter) {
BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
i = offset >> PAGE_SHIFT;
if (bvec->bv_offset < offset)
i++;
BUG_ON(i >= s->req.u.rw.nr_segments);
shared_data = kmap_atomic(
pfn_to_page(s->grants_used[i]->pfn));
@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
bvec->bv_len);
bvec_kunmap_irq(bvec_data, &flags);
kunmap_atomic(shared_data);
offset += bvec->bv_len;
offset = bvec->bv_offset + bvec->bv_len;
}
}
/* Add the persistent grant into the list of free grants */

View File

@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
cancel_work_sync(&portdev->control_work);
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);

View File

@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
/* wait for the next frame */
@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
/* wait for the MC to settle */
udelay(100);
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
if ((rdev->config.evergreen.max_backends == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
if ((disabled_rb_mask & 3) == 1) {
/* RB0 disabled, RB1 enabled */
tmp = 0x11111111;
} else {
/* RB1 disabled, RB0 enabled */
tmp = 0x00000000;
}
} else {
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
}
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);

View File

@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
u32 pipe_rb_ratio, pipe_rb_remain;
u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
/* mask out the RBs that don't exist on that asic */
disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
/* make sure at least one RB is available */
if ((tmp & 0xff) != 0xff)
disabled_rb_mask = tmp;
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);

View File

@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {

View File

@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
}
/* RV100 board with external TDMS bit mis-set.
* Actually uses internal TMDS, clear the bit.
*/
if (dev->pdev->device == 0x5159 &&
dev->pdev->subsystem_vendor == 0x1014 &&
dev->pdev->subsystem_device == 0x029A) {
tmp &= ~(1 << 4);
}
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,

View File

@ -1115,8 +1115,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL)
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {

View File

@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
{
int r;
/* make sure we aren't trying to allocate more space than there is on the ring */
if (ndw > (ring->ring_size / 4))
return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;

View File

@ -1,5 +1,6 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
0x00008040 WAIT_UNTIL
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE

View File

@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
WREG32(R600_CITF_CNTL, blackout);
}
}
/* wait for the MC to settle */
udelay(100);
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)

View File

@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_lock(&bdev->fence_lock);
if (bo->sync_obj)
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
else
fbo->sync_obj = NULL;
spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
/* ttm_buffer_object_transfer accesses bo->sync_obj */
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;

View File

@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
struct qib_qp __rcu **qpp;
qpp = &dev->qp_table[n];
q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock));
for (; q; qpp = &q->next) {
for (; (q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock))) != NULL;
qpp = &q->next)
if (q == qp) {
atomic_dec(&qp->refcount);
*qpp = qp->next;
rcu_assign_pointer(qp->next, NULL);
q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock));
break;
}
q = rcu_dereference_protected(*qpp,
lockdep_is_held(&dev->qpt_lock));
}
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);

View File

@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
tx_req->mapping = addr;
skb_orphan(skb);
skb_dst_drop(skb);
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len);
if (unlikely(rc)) {
@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
dev->trans_start = jiffies;
++tx->tx_head;
skb_orphan(skb);
skb_dst_drop(skb);
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);

View File

@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
netif_stop_queue(dev);
}
skb_orphan(skb);
skb_dst_drop(skb);
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen);
if (unlikely(rc)) {
@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
address->last_send = priv->tx_head;
++priv->tx_head;
skb_orphan(skb);
skb_dst_drop(skb);
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))

View File

@ -353,6 +353,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
irq_set_chained_handler(irq, gic_handle_cascade_irq);
}
static u8 gic_get_cpumask(struct gic_chip_data *gic)
{
void __iomem *base = gic_data_dist_base(gic);
u32 mask, i;
for (i = mask = 0; i < 32; i += 4) {
mask = readl_relaxed(base + GIC_DIST_TARGET + i);
mask |= mask >> 16;
mask |= mask >> 8;
if (mask)
break;
}
if (!mask)
pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
return mask;
}
static void __init gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
@ -371,7 +390,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
/*
* Set all global interrupts to this CPU only.
*/
cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
cpumask = gic_get_cpumask(gic);
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
@ -402,7 +423,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
* Get what the GIC says our CPU mask is.
*/
BUG_ON(cpu >= NR_GIC_CPU_IF);
cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask;
/*

View File

@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
radio->vdev.vfl_dir = VFL_DIR_TX;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;

View File

@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
.name = "radio-si4713",
.release = video_device_release,
.ioctl_ops = &radio_si4713_ioctl_ops,
.vfl_dir = VFL_DIR_TX,
};
/* Platform driver interface */

View File

@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
.ioctl_ops = &wl1273_ioctl_ops,
.name = WL1273_FM_DRIVER_NAME,
.release = wl1273_vdev_release,
.vfl_dir = VFL_DIR_TX,
};
static int wl1273_fm_radio_remove(struct platform_device *pdev)

View File

@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
.ioctl_ops = &fm_drv_ioctl_ops,
.name = FM_DRV_NAME,
.release = video_device_release,
/*
* To ensure both the tuner and modulator ioctls are accessible we
* set the vfl_dir to M2M to indicate this.
*
* It is not really a mem2mem device of course, but it can both receive
* and transmit using the same radio device. It's the only radio driver
* that does this and it should really be split in two radio devices,
* but that would affect applications using this driver.
*/
.vfl_dir = VFL_DIR_M2M,
};
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)

View File

@ -272,6 +272,7 @@ config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
select BCH_CONST_PARAMS
select BITREVERSE
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.

View File

@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
resource_size_t res_size;
struct mtd_part_parser_data ppdata;
bool map_indirect;
const char *mtd_name;
const char *mtd_name = NULL;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)

View File

@ -17,8 +17,8 @@
#include "bcm47xxnflash.h"
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
* shown 164 retries as maxiumum. */
#define NFLASH_READY_RETRIES 1000
* shown ~1000 retries as maxiumum. */
#define NFLASH_READY_RETRIES 10000
#define NFLASH_SECTOR_SIZE 512

View File

@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
{},
}
};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
static struct davinci_nand_pdata

View File

@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
/* ONFI need to be probed in 8 bits mode */
WARN_ON(chip->options & NAND_BUSWIDTH_16);
/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
if (chip->options & NAND_BUSWIDTH_16) {
pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
return 0;
}
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||

View File

@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
pr_info("%s: Setting primary slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
goto out;
}

View File

@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
/* According to C_CAN documentation, the reserved bit
* in IFx_MASK2 register is fixed 1
*/
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
IFX_WRITE_HIGH_16BIT(mask));
IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));

View File

@ -36,13 +36,13 @@
#define DRV_VER "4.4.161.0u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
#define OC_NAME "Emulex OneConnect"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df

View File

@ -25,7 +25,7 @@
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
static unsigned int num_vfs;

View File

@ -232,6 +232,7 @@
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@ -389,6 +390,12 @@
#define E1000_PBS_16K E1000_PBA_16K
/* Uncorrectable/correctable ECC Error counts and enable bits */
#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
@ -408,6 +415,7 @@
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@ -443,6 +451,7 @@
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */

View File

@ -309,6 +309,8 @@ struct e1000_adapter {
struct napi_struct napi;
unsigned int uncorr_errors; /* uncorrectable ECC errors */
unsigned int corr_errors; /* correctable ECC errors */
unsigned int restart_queue;
u32 txd_cmd;

View File

@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("dropped_smbus", stats.mgpdc),
E1000_STAT("rx_dma_failed", rx_dma_failed),
E1000_STAT("tx_dma_failed", tx_dma_failed),
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)

View File

@ -77,6 +77,7 @@ enum e1e_registers {
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
E1000_FLOP = 0x0103C, /* FLASH Opcode Register */

View File

@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_ich8lan)
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
/* Enable ECC on Lynxpoint */
if (hw->mac.type == e1000_pch_lpt) {
reg = er32(PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
ew32(PBECCSTS, reg);
reg = er32(CTRL);
reg |= E1000_CTRL_MEHE;
ew32(CTRL, reg);
}
}
/**

View File

@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return IRQ_HANDLED;
}
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return IRQ_HANDLED;
}
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
} else if (hw->mac.type == e1000_pch_lpt) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
}
@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgptc += er32(MGTPTC);
adapter->stats.mgprc += er32(MGTPRC);
adapter->stats.mgpdc += er32(MGTPDC);
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
}
}
/**

View File

@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
if ((dev_cap->flags &
if ((dev->caps.flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;

View File

@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
dev_kfree_skb_irq(rp->tx_skbuff[entry]);
dev_kfree_skb(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
if (intr_status & IntrPCIErr)
netif_warn(rp, hw, dev, "PCI error\n");
napi_disable(&rp->napi);
rhine_irq_disable(rp);
/* Slow and safe. Consider __napi_schedule as a replacement ? */
napi_enable(&rp->napi);
napi_schedule(&rp->napi);
iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
out_unlock:
mutex_unlock(&rp->task_lock);

View File

@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
u16 queue_index)
struct tun_file *tfile)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
u16 queue_index = tfile->queue_index;
if (!rxhash)
return;
@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_lock();
if (tun->numqueues == 1)
/* We may get a very small possibility of OOO during switching, not
* worth to optimize.*/
if (tun->numqueues == 1 || tfile->detached)
goto unlock;
e = tun_flow_find(head, rxhash);
@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun) {
if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
rcu_assign_pointer(tfile->tun, NULL);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
if (clean)
if (clean) {
rcu_assign_pointer(tfile->tun, NULL);
sock_put(&tfile->sk);
else
} else
tun_disable_queue(tun, tfile);
synchronize_net();
@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
}
if (clean) {
if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
!(tun->flags & TUN_PERSIST))
if (tun->dev->reg_state == NETREG_REGISTERED)
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
netif_carrier_off(tun->dev);
if (!(tun->flags & TUN_PERSIST) &&
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
&tfile->socket.flags));
@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
rcu_assign_pointer(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
wake_up_all(&tfile->wq.wait);
rcu_assign_pointer(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
synchronize_net();
@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
goto out;
err = -EINVAL;
if (rtnl_dereference(tfile->tun))
if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
tun_flow_update(tun, rxhash, tfile->queue_index);
tun_flow_update(tun, rxhash, tfile);
return total_len;
}
@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
netif_carrier_on(tun->dev);
}
netif_carrier_on(tun->dev);
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = tun_attach(tun, file);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & TUN_TAP_MQ))
if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);

View File

@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
.driver_info = (unsigned long)&wwan_info,
},
/* Infineon(now Intel) HSPA Modem platform */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,

View File

@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
.driver_info = (unsigned long)&qmi_wwan_info,
},
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
@ -461,6 +473,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View File

@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
unsigned long lockflags;
size_t size = dev->rx_urb_size;
/* prevent rx skb allocation when error ratio is high */
if (test_bit(EVENT_RX_KILL, &dev->flags)) {
usb_free_urb(urb);
return -ENOLINK;
}
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@ -539,6 +545,17 @@ block:
break;
}
/* stop rx if packet error rate is high */
if (++dev->pkt_cnt > 30) {
dev->pkt_cnt = 0;
dev->pkt_err = 0;
} else {
if (state == rx_cleanup)
dev->pkt_err++;
if (dev->pkt_err > 20)
set_bit(EVENT_RX_KILL, &dev->flags);
}
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
"simple");
/* reset rx error state */
dev->pkt_cnt = 0;
dev->pkt_err = 0;
clear_bit(EVENT_RX_KILL, &dev->flags);
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
if (info->manage_power) {
@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
if (netif_msg_tx_err(dev)) {
netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
goto drop;
} else {
/* cdc_ncm collected packet; waits for more */
/* packet collected; minidriver waiting for more */
if (info->flags & FLAG_MULTI_PACKET)
goto not_drop;
}
netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
goto drop;
}
}
length = skb->len;
@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
}
}
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {

View File

@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
adapter->netdev->name, adapter->link_speed);
if (!netif_carrier_ok(adapter->netdev))
netif_carrier_on(adapter->netdev);
netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
if (netif_carrier_ok(adapter->netdev))
netif_carrier_off(adapter->netdev);
netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {

View File

@ -36,6 +36,7 @@
#include "debug.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
static bool brcms_tx_flush_completed(struct brcms_info *wl)
{
bool result;
spin_lock_bh(&wl->lock);
result = brcms_c_tx_flush_completed(wl->wlc);
spin_unlock_bh(&wl->lock);
return result;
}
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
struct brcms_info *wl = hw->priv;
int ret;
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
/* wait for packet queue and dma fifos to run empty */
spin_lock_bh(&wl->lock);
brcms_c_wait_for_tx_completion(wl->wlc, drop);
spin_unlock_bh(&wl->lock);
ret = wait_event_timeout(wl->tx_flush_wq,
brcms_tx_flush_completed(wl),
msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
brcms_dbg_mac80211(wl->wlc->hw->d11core,
"ret=%d\n", jiffies_to_msecs(ret));
}
static const struct ieee80211_ops brcms_ops = {
@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
done:
spin_unlock_bh(&wl->lock);
wake_up(&wl->tx_flush_wq);
}
/*
@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
atomic_set(&wl->callbacks, 0);
init_waitqueue_head(&wl->tx_flush_wq);
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
spin_lock_bh(&wl->lock);
return blocked;
}
/*
* precondition: perimeter lock has been acquired
*/
void brcms_msleep(struct brcms_info *wl, uint ms)
{
spin_unlock_bh(&wl->lock);
msleep(ms);
spin_lock_bh(&wl->lock);
}

View File

@ -68,6 +68,8 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
/* tx flush */
wait_queue_head_t tx_flush_wq;
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
extern void brcms_free_timer(struct brcms_timer *timer);
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
extern bool brcms_del_timer(struct brcms_timer *timer);
extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
extern void brcms_fatal_error(struct brcms_info *wl);

View File

@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
static bool
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs = &txstatus;
core = wlc_hw->d11core;
*fatal = false;
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
while (!(*fatal)
&& (s1 & TXS_V)) {
/* !give others some time to run! */
if (n >= max_tx_num) {
morepending = true;
break;
}
while (n < max_tx_num) {
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (s1 == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
*fatal = true;
return false;
}
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
/* only process when valid */
if (!(s1 & TXS_V))
break;
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
txs->sequence = s2 & TXS_SEQ_MASK;
@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (*fatal == true)
return false;
n++;
}
if (*fatal)
return false;
return morepending;
return n >= max_tx_num;
}
static void brcms_c_tbtt(struct brcms_c_info *wlc)
@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
return wlc->band->bandunit;
}
void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
int timeout = 20;
int i;
/* Kick DMA to send any pending AMPDU */
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
dma_txflush(wlc->hw->di[i]);
dma_kick_tx(wlc->hw->di[i]);
/* wait for queue and DMA fifos to run dry */
while (brcms_txpktpendtot(wlc) > 0) {
brcms_msleep(wlc->wl, 1);
if (--timeout == 0)
break;
}
WARN_ON_ONCE(timeout == 0);
return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)

View File

@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
bool drop);
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
#endif /* _BRCM_PUB_H_ */

View File

@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
next_reclaimed = ssn;
}
if (tid != IWL_TID_NON_QOS) {
priv->tid_data[sta_id][tid].next_reclaimed =
next_reclaimed;
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
next_reclaimed);
}
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
iwlagn_check_ratid_empty(priv, sta_id, tid);
@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
/*
* W/A for FW bug - the seq_ctl isn't updated when the
* queues are flushed. Fetch it from the packet itself
*/
if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
next_reclaimed =
SEQ_TO_SN(next_reclaimed + 0x10);
}
is_offchannel_skb =
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
}
if (tid != IWL_TID_NON_QOS) {
priv->tid_data[sta_id][tid].next_reclaimed =
next_reclaimed;
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
next_reclaimed);
}
WARN_ON(!is_agg && freed != 1);
/*

View File

@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
scan_rsp->number_of_sets);
ret = -1;
goto done;
goto check_next_scan;
}
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (!beacon_size || beacon_size > bytes_left) {
bss_info += bytes_left;
bytes_left = 0;
return -1;
ret = -1;
goto check_next_scan;
}
/* Initialize the current working beacon pointer for this BSS
@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
dev_err(priv->adapter->dev,
"%s: bytes left < IE length\n",
__func__);
goto done;
goto check_next_scan;
}
if (element_id == WLAN_EID_DS_PARAMS) {
channel = *(current_ptr + sizeof(struct ieee_types_header));
@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
}
check_next_scan:
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
}
}
done:
return ret;
}

View File

@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
is_tx ? "Tx" : "Rx");
if (is_tx) {
rtl_lps_leave(hw);
schedule_work(&rtlpriv->
works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies =
jiffies;
}
@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
} else if (ETH_P_ARP == ether_type) {
if (is_tx) {
rtl_lps_leave(hw);
schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
if (is_tx) {
rtl_lps_leave(hw);
schedule_work(&rtlpriv->works.lps_leave_work);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}

Some files were not shown because too many files have changed in this diff Show More