ASoC: Fixes for v3.14

A few fixes, all in drivers.  Nothing stands out particularly, the
 biggest set of fixes is some build coverage issues from Sachin.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJS5+nmAAoJELSic+t+oim9aMEQAIdCxUw2kZBkvJx+P2Zoncid
 u56PW1BeYd5NnRfU/XsVNI6F2RGRwzTY5yFKHZ5fc1E0PObRJ4USWPcbR9V8eVQj
 6NDP/b01iKbTJvjWVJYyIsn/690PnwPCcb9Zu8SPtry+BcMRpM53Mfkg5T+zHkNm
 e4dTPFmy8O1m+RI2XT6Zi4YoxkF8HwsFtPbQhu5ASsL5WpdD1q4w+oL861/e0Kpo
 2j3kBWkPjj8msDFeTbA7nXPWQrOx5TF9x5Z0stOnqt0G9rOOvtbnH1cd3xyMqK7+
 eM8kMstmhChW/mrxPYJwh6NMFWaBnAtqZ6eHZNCDXJhdQ3tZtPCcqaHLgFONPKyu
 /OkxvlzN5P1OyLvSofaWO1nWf+D833pMGCknSNxuS7WXfVHYCJ7WPIKqNbDT4r0C
 E0czsKcTFq7D24F8TNm6YTIakhbA3hpETCZRXAkIuEII2vmgORzomx6y65JPQc2M
 Igya0bgzU5pV8lx/02UtrzaXvYwb89/DdMmp4gM5CGdepiRWMSx+j/Ao40vlEGHC
 ZNm7P6MsG2D1h/GgyEV6yH3PnSxZgY5Vxm/9CNDfJss155B5wowKR70RI5nk3NIS
 d/tuRpuiMWeC+wLemBAW5wudYaeLPVZIQ+NLjty/8agFn843ePKpCNnhyaPmhekx
 l7yUEfUoA1yDQiDJysVD
 =ayMS
 -----END PGP SIGNATURE-----

Merge tag 'asoc-v3.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v3.14

A few fixes, all in drivers.  Nothing stands out particularly, the
biggest set of fixes is some build coverage issues from Sachin.
This commit is contained in:
Takashi Iwai 2014-01-29 07:35:19 +01:00
commit a31886669f
93 changed files with 524 additions and 295 deletions

View File

@ -43,7 +43,7 @@ Example:
sound {
compatible = "simple-audio-card";
simple-audio-card,format = "left_j";
simple-audio-routing =
simple-audio-card,routing =
"MIC_IN", "Mic Jack",
"Headphone Jack", "HP_OUT",
"Ext Spk", "LINE_OUT";

View File

@ -9231,6 +9231,7 @@ F: include/media/videobuf2-*
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com>
L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/char/virtio_console.c
@ -9240,6 +9241,7 @@ F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
M: Rusty Russell <rusty@rustcorp.com.au>
M: "Michael S. Tsirkin" <mst@redhat.com>
L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/virtio/
@ -9252,6 +9254,7 @@ F: include/uapi/linux/virtio_*.h
VIRTIO HOST (VHOST)
M: "Michael S. Tsirkin" <mst@redhat.com>
L: kvm@vger.kernel.org
L: virtio-dev@lists.oasis-open.org
L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org
S: Maintained

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 13
SUBLEVEL = 0
EXTRAVERSION = -rc8
EXTRAVERSION =
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*

View File

@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void)
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
return phys_id == cpu_logical_map(cpu);
}
static const void * __init arch_get_next_mach(const char *const **match)

View File

@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
int (*init_fn)(struct arm_pmu *);
const int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
int ret = -ENODEV;

View File

@ -431,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
instr2 = __mem_to_opcode_thumb16(instr2);
instr = __opcode_thumb32_compose(instr, instr2);
}
} else if (get_user(instr, (u32 __user *)pc)) {
} else {
if (get_user(instr, (u32 __user *)pc))
goto die_sig;
instr = __mem_to_opcode_arm(instr);
goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)

View File

@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void)
static void highbank_l2x0_disable(void)
{
outer_flush_all();
/* Disable PL310 L2 Cache controller */
highbank_smc1(0x102, 0x0);
}

View File

@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void)
static void omap4_l2x0_disable(void)
{
outer_flush_all();
/* Disable PL310 L2 Cache controller */
omap_smc1(0x102, 0x0);
}

View File

@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;

View File

@ -641,10 +641,10 @@ load_ind:
emit(ARM_MUL(r_A, r_A, r_X), ctx);
break;
case BPF_S_ALU_DIV_K:
/* current k == reciprocal_value(userspace k) */
if (k == 1)
break;
emit_mov_i(r_scratch, k, ctx);
/* A = top 32 bits of the product */
emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
emit_udiv(r_A, r_A, r_scratch, ctx);
break;
case BPF_S_ALU_DIV_X:
update_on_xread(ctx);

View File

@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define PROT_DEFAULT (pgprot_default | PTE_DIRTY)
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))

View File

@ -83,6 +83,6 @@
/*
* Loongson2-specific cacheops
*/
#define Hit_Invalidate_I_Loongson23 0x00
#define Hit_Invalidate_I_Loongson2 0x00
#endif /* __ASM_CACHEOPS_H */

View File

@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr)
__iflush_prologue
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
cache_op(Hit_Invalidate_I_Loongson23, addr);
cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
default:
@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr)
{
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
default:
@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr)
"i" (op));
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
static inline void blast_##pfx##cache##lsize(void) \
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
static inline void extra##blast_##pfx##cache##lsize(void) \
{ \
unsigned long start = INDEX_BASE; \
unsigned long end = start + current_cpu_data.desc.waysize; \
@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \
__##pfx##flush_epilogue \
} \
\
static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
{ \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
__##pfx##flush_epilogue \
} \
\
static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
{ \
unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
unsigned long start = INDEX_BASE + (page & indexmask); \
@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
__##pfx##flush_epilogue \
}
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
/* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
protected_, loongson23_)
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
protected_, loongson2_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */

View File

@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page;
else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page;
else if (ic_lsize == 64)
@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void)
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed;
else if (current_cpu_type() == CPU_LOONGSON2)
r4k_blast_icache_page_indexed =
loongson2_blast_icache32_page_indexed;
else
r4k_blast_icache_page_indexed =
blast_icache32_page_indexed;
@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void)
r4k_blast_icache = blast_r4600_v1_icache32;
else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache = tx49_blast_icache32;
else if (current_cpu_type() == CPU_LOONGSON2)
r4k_blast_icache = loongson2_blast_icache32;
else
r4k_blast_icache = blast_icache32;
} else if (ic_lsize == 64)
@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
else {
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
protected_blast_icache_range(start, end);
protected_loongson2_blast_icache_range(start, end);
break;
default:
protected_loongson23_blast_icache_range(start, end);
protected_blast_icache_range(start, end);
break;
}
}

View File

@ -75,6 +75,6 @@
#define SO_BUSY_POLL 0x4027
#define SO_MAX_PACING_RATE 0x4048
#define SO_MAX_PACING_RATE 0x4028
#endif /* _UAPI_ASM_SOCKET_H */

View File

@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void)
/* Get the full OF pathname of the stdout device */
memset(path, 0, 256);
call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
val = cpu_to_be32(stdout_node);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
&val, sizeof(val));
prom_printf("OF stdout device is: %s\n", of_stdout_device);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
path, strlen(path) + 1);
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(stdout_node, "device_type", type, sizeof(type));
if (strcmp(type, "display") == 0)
prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
/* instance-to-package fails on PA-Semi */
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
if (stdout_node != PROM_ERROR) {
val = cpu_to_be32(stdout_node);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
&val, sizeof(val));
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(stdout_node, "device_type", type, sizeof(type));
if (strcmp(type, "display") == 0)
prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
}
}
static int __init prom_find_machine_type(void)

View File

@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
}
PPC_DIVWU(r_A, r_A, r_X);
break;
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
case BPF_S_ALU_DIV_K: /* A /= K */
if (K == 1)
break;
PPC_LI32(r_scratch1, K);
/* Top 32 bits of 64bit result -> A */
PPC_MULHWU(r_A, r_A, r_scratch1);
PPC_DIVWU(r_A, r_A, r_scratch1);
break;
case BPF_S_ALU_AND_X:
ctx->seen |= SEEN_XREG;

View File

@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
/* dr %r4,%r12 */
EMIT2(0x1d4c);
/* dlr %r4,%r12 */
EMIT4(0xb997004c);
break;
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
/* m %r4,<d(K)>(%r13) */
EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
case BPF_S_ALU_DIV_K: /* A /= K */
if (K == 1)
break;
/* lhi %r4,0 */
EMIT4(0xa7480000);
/* dl %r4,<d(K)>(%r13) */
EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break;
case BPF_S_ALU_MOD_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
/* dr %r4,%r12 */
EMIT2(0x1d4c);
/* dlr %r4,%r12 */
EMIT4(0xb997004c);
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_K: /* A %= K */
if (K == 1) {
/* lhi %r5,0 */
EMIT4(0xa7580000);
break;
}
/* lhi %r4,0 */
EMIT4(0xa7480000);
/* d %r4,<d(K)>(%r13) */
EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
/* dl %r4,<d(K)>(%r13) */
EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
break;

View File

@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_MUL_K: /* A *= K */
emit_alu_K(MUL, K);
break;
case BPF_S_ALU_DIV_K: /* A /= K */
emit_alu_K(MUL, K);
emit_read_y(r_A);
case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
if (K == 1)
break;
emit_write_y(G0);
#ifdef CONFIG_SPARC32
/* The Sparc v8 architecture requires
* three instructions between a %y
* register write and the first use.
*/
emit_nop();
emit_nop();
emit_nop();
#endif
emit_alu_K(DIV, K);
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
emit_cmpi(r_X, 0);

View File

@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
alternative_input(
ASM_NOP8 ASM_NOP2,
"emms\n\t" /* clear stack tags */
"fildl %P[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (tsk->thread.fpu.has_fpu));
if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
asm volatile(
"fnclex\n\t"
"emms\n\t"
"fildl %P[addr]" /* set F?P to defined value */
: : [addr] "m" (tsk->thread.fpu.has_fpu));
}
return fpu_restore_checking(&tsk->thread.fpu);
}

View File

@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
#include <linux/syscore_ops.h>
#include <asm/apic.h>
@ -816,6 +817,18 @@ out:
return ret;
}
static void ibs_eilvt_setup(void)
{
/*
* Force LVT offset assignment for family 10h: The offsets are
* not assigned by the BIOS for this family, so the OS is
* responsible for doing it. If the OS assignment fails, fall
* back to BIOS settings and try to setup this.
*/
if (boot_cpu_data.x86 == 0x10)
force_ibs_eilvt_setup();
}
static inline int get_ibs_lvt_offset(void)
{
u64 val;
@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
#ifdef CONFIG_PM
static int perf_ibs_suspend(void)
{
clear_APIC_ibs(NULL);
return 0;
}
static void perf_ibs_resume(void)
{
ibs_eilvt_setup();
setup_APIC_ibs(NULL);
}
static struct syscore_ops perf_ibs_syscore_ops = {
.resume = perf_ibs_resume,
.suspend = perf_ibs_suspend,
};
static void perf_ibs_pm_init(void)
{
register_syscore_ops(&perf_ibs_syscore_ops);
}
#else
static inline void perf_ibs_pm_init(void) { }
#endif
static int
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
/*
* Force LVT offset assignment for family 10h: The offsets are
* not assigned by the BIOS for this family, so the OS is
* responsible for doing it. If the OS assignment fails, fall
* back to BIOS settings and try to setup this.
*/
if (boot_cpu_data.x86 == 0x10)
force_ibs_eilvt_setup();
ibs_eilvt_setup();
if (!ibs_eilvt_valid())
goto out;
perf_ibs_pm_init();
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */

View File

@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller)
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
leal function_trace_op, %ecx
movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)

View File

@ -88,7 +88,7 @@ END(function_hook)
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
leaq function_trace_op, %rdx
movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi

View File

@ -1355,7 +1355,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
vcpu->arch.apic_base = value;
/* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else

View File

@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
/*
* Any interrupt that takes a fault gets the fixup. This makes
* the below recursive fault logic only apply to a faults from
* task context.
*/
if (in_interrupt())
return;
/*
* Per the above we're !in_interrupt(), aka. task context.
*
* In this case we need to make sure we're not recursively
* faulting through the emulate_vsyscall() logic.
*/
if (current_thread_info()->sig_on_uaccess_error && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* XXX: hwpoison faults will set the wrong code. */
force_sig_info_fault(signal, si_code, address, tsk, 0);
}
/*
* Barring that, we can do the fixup and be happy.
*/
return;
}

View File

@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
case BPF_S_ALU_MOD_K: /* A %= K; */
if (K == 1) {
CLEAR_A();
break;
}
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
EMIT2(0xf7, 0xf1); /* div %ecx */
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
EMIT(K, 4);
EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
case BPF_S_ALU_DIV_K: /* A /= K */
if (K == 1)
break;
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
EMIT2(0xf7, 0xf1); /* div %ecx */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;

View File

@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
ts->tv_nsec = 0;
do {
seq = read_seqcount_begin_no_lockdep(&gtod->seq);
seq = raw_read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->wall_time_snsec;
@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
ts->tv_nsec = 0;
do {
seq = read_seqcount_begin_no_lockdep(&gtod->seq);
seq = raw_read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec;
@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
{
unsigned long seq;
do {
seq = read_seqcount_begin_no_lockdep(&gtod->seq);
seq = raw_read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
{
unsigned long seq;
do {
seq = read_seqcount_begin_no_lockdep(&gtod->seq);
seq = raw_read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));

View File

@ -162,7 +162,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
{ "INT33B2", },
{ "INT33FC", },
{ "INT3430", (unsigned long)&lpt_dev_desc },
{ "INT3431", (unsigned long)&lpt_dev_desc },

View File

@ -67,11 +67,13 @@
* struct ttc_timer - This definition defines local timer structure
*
* @base_addr: Base address of timer
* @freq: Timer input clock frequency
* @clk: Associated clock source
* @clk_rate_change_nb Notifier block for clock rate changes
*/
struct ttc_timer {
void __iomem *base_addr;
unsigned long freq;
struct clk *clk;
struct notifier_block clk_rate_change_nb;
};
@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
ttc_set_interval(timer,
DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk),
PRESCALE * HZ));
ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
PRESCALE * HZ));
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
return;
}
ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
__raw_writel(CNT_CNTRL_RESET,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs,
clk_get_rate(ttccs->ttc.clk) / PRESCALE);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
if (WARN_ON(err)) {
kfree(ttccs);
return;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
setup_sched_clock(ttc_sched_clock_read, 16,
clk_get_rate(ttccs->ttc.clk) / PRESCALE);
setup_sched_clock(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
ndata->new_rate / PRESCALE);
local_irq_restore(flags);
/* update cached frequency */
ttc->freq = ndata->new_rate;
/* fall through */
}
case PRE_RATE_CHANGE:
@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
if (clk_notifier_register(ttcce->ttc.clk,
&ttcce->ttc.clk_rate_change_nb))
pr_warn("Unable to register clock notifier.\n");
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
ttcce->ce.name = "ttc_clockevent";
@ -396,7 +401,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
}
clockevents_config_and_register(&ttcce->ce,
clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe);
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
}
/**

View File

@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
/* if equal delete the probed mode */
mode->status = pmode->status;
/* Merge type bits together */
mode->type = pmode->type;
mode->type |= pmode->type;
list_del(&pmode->head);
drm_mode_destroy(connector->dev, pmode);
break;

View File

@ -2713,6 +2713,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
#undef GEN8_IRQ_INIT_NDX
POSTING_READ(GEN8_PCU_IIR);
ibx_irq_preinstall(dev);
}
static void ibx_hpd_irq_setup(struct drm_device *dev)

View File

@ -1057,12 +1057,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *intel_crtc;
dev_priv->ddi_plls.spll_refcount = 0;
dev_priv->ddi_plls.wrpll1_refcount = 0;
dev_priv->ddi_plls.wrpll2_refcount = 0;
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
if (!intel_crtc->active)
if (!intel_crtc->active) {
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
}
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);

View File

@ -11053,10 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
drm_modeset_lock_all(dev);
mutex_lock(&dev->mode_config.mutex);
drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
drm_modeset_unlock_all(dev);
mutex_unlock(&dev->mode_config.mutex);
}
void intel_modeset_cleanup(struct drm_device *dev)

View File

@ -73,7 +73,7 @@ struct nouveau_i2c {
int (*identify)(struct nouveau_i2c *, int index,
const char *what, struct nouveau_i2c_board_info *,
bool (*match)(struct nouveau_i2c_port *,
struct i2c_board_info *));
struct i2c_board_info *, void *), void *);
struct list_head ports;
};

View File

@ -50,6 +50,13 @@ struct nouveau_instmem {
static inline struct nouveau_instmem *
nouveau_instmem(void *obj)
{
/* nv04/nv40 impls need to create objects in their constructor,
* which is before the subdev pointer is valid
*/
if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
return obj;
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
}

View File

@ -197,7 +197,7 @@ static int
nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
struct nouveau_i2c_board_info *info,
bool (*match)(struct nouveau_i2c_port *,
struct i2c_board_info *))
struct i2c_board_info *, void *), void *data)
{
struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
int i;
@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
if (nv_probe_i2c(port, info[i].dev.addr) &&
(!match || match(port, &info[i].dev))) {
(!match || match(port, &info[i].dev, data))) {
nv_info(i2c, "detected %s: %s\n", what,
info[i].dev.type);
return i;

View File

@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
static int
mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
{
struct nouveau_mxm *mxm = nouveau_mxm(bios);
struct nouveau_mxm *mxm = data;
struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
u8 type, i2cidx, link, ver, len;
u8 *conn;
@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
return;
}
dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
}

View File

@ -29,9 +29,9 @@
static bool
probe_monitoring_device(struct nouveau_i2c_port *i2c,
struct i2c_board_info *info)
struct i2c_board_info *info, void *data)
{
struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
struct nouveau_therm_priv *priv = data;
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
struct i2c_client *client;
@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
board, probe_monitoring_device);
board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
};
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
board, probe_monitoring_device);
board, probe_monitoring_device, therm);
if (priv->ic)
return;
}
@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
device. Let's try our static list.
*/
i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
nv_board_infos, probe_monitoring_device);
nv_board_infos, probe_monitoring_device, therm);
}

View File

@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
get_tmds_slave(encoder))
return;
type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
if (type < 0)
return;

View File

@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
nv04_tv_encoder_info, NULL);
nv04_tv_encoder_info, NULL, NULL);
}

View File

@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)

View File

@ -1077,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@ -1155,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@ -1563,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@ -1645,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@ -2788,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
@ -5760,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
} else
@ -7706,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev,
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
rdev->saved_raid_disk < 0)
! (rdev->saved_raid_disk >= 0 &&
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
rdev->recovery_offset = 0;
@ -7787,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev)
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
remove_and_add_spares(mddev, NULL);
mddev->pers->spare_active(mddev);
/* There is no thread, but we need to call
* ->spare_active and clear saved_raid_disk
*/
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}

View File

@ -129,6 +129,9 @@ struct md_rdev {
enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
Bitmap_sync, /* ..actually, not quite In_sync. Need a
* bitmap-based recovery to get fully in sync
*/
Unmerged, /* device is being added to array and should
* be considerred for bvec_merge_fn but not
* yet for actual IO

View File

@ -924,9 +924,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
conf->next_window_requests++;
else
conf->current_window_requests++;
}
if (bio->bi_sector >= conf->start_next_window)
sector = conf->start_next_window;
}
}
conf->nr_pending++;

View File

@ -1319,7 +1319,7 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
sectors_handled = (r10_bio->sectors + max_sectors
sectors_handled = (r10_bio->sector + max_sectors
- bio->bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
@ -1327,7 +1327,7 @@ read_again:
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
spin_unlock(&conf->device_lock);
spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
put_buf(r10_bio);
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
put_buf(r10_bio);
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
break;
}
}

View File

@ -687,7 +687,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&sh->lru));
BUG_ON(list_empty(&sh->lru) &&
!test_bit(STRIPE_EXPANDING, &sh->state));
list_del_init(&sh->lru);
if (sh->group) {
sh->group->stripes_cnt--;
@ -3608,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
*/
set_bit(R5_Insync, &dev->flags);
if (rdev && test_bit(R5_WriteError, &dev->flags)) {
if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@ -3621,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(

View File

@ -12942,25 +12942,26 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
if (bp->regview)
iounmap(bp->regview);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
/* for vf doorbells are part of the regview and were unmapped along with
* it. FW is only loaded by PF.
*/
if (IS_PF(bp)) {
if (bp->doorbells)
iounmap(bp->doorbells);
/* For vfs, doorbells are part of the regview and were unmapped
* along with it. FW is only loaded by PF.
*/
if (IS_PF(bp)) {
if (bp->doorbells)
iounmap(bp->doorbells);
bnx2x_release_firmware(bp);
}
bnx2x_free_mem_bp(bp);
bnx2x_release_firmware(bp);
}
bnx2x_free_mem_bp(bp);
if (remove_netdev)
free_netdev(dev);
if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
}
pci_disable_device(pdev);
}

View File

@ -423,7 +423,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
ntuple |= (u64)l2t->lport << tp->port_shift;

View File

@ -1776,6 +1776,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL;
struct device *dev = &adapter->pdev->dev;
struct be_eth_rx_d *rxd;
u64 page_dmaaddr = 0, frag_dmaaddr;
u32 posted, page_offset = 0;
@ -1788,9 +1789,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
rx_stats(rxo)->rx_post_fail++;
break;
}
page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
0, adapter->big_page_size,
page_dmaaddr = dma_map_page(dev, pagep, 0,
adapter->big_page_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, page_dmaaddr)) {
put_page(pagep);
pagep = NULL;
rx_stats(rxo)->rx_post_fail++;
break;
}
page_info->page_offset = 0;
} else {
get_page(pagep);

View File

@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
return __e1000_resume(pdev);
}
#endif /* CONFIG_PM */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int e1000_runtime_suspend(struct device *dev)
@ -7015,13 +7015,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
e1000_idle)
};
#endif
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
@ -7029,11 +7027,9 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
#ifdef CONFIG_PM
.driver = {
.pm = &e1000_pm_ops,
},
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};

View File

@ -4765,6 +4765,8 @@ static int qlge_probe(struct pci_dev *pdev,
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;

View File

@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
goto out_unlock;
napi_disable(&rp->napi);
netif_tx_disable(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */

View File

@ -614,6 +614,18 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
{}, // END
};

View File

@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
return -ENOMEM;
urb->num_sgs = num_sgs;
sg_init_table(urb->sg, urb->num_sgs);
sg_init_table(urb->sg, urb->num_sgs + 1);
sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
total_len += skb_headlen(skb);

View File

@ -512,7 +512,6 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
static const struct acpi_device_id byt_gpio_acpi_match[] = {
{ "INT33B2", 0 },
{ "INT33FC", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);

View File

@ -3061,8 +3061,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
* thus don't need to be hashed. They also don't need a name until a
* user wants to identify the object in /proc/pid/fd/. The little hack
* below allows us to generate a name for these objects on demand:
*
* Some pseudo inodes are mountable. When they are mounted
* path->dentry == path->mnt->mnt_root. In that case don't call d_dname
* and instead have d_path return the mounted path.
*/
if (path->dentry->d_op && path->dentry->d_op->d_dname)
if (path->dentry->d_op && path->dentry->d_op->d_dname &&
(!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
rcu_read_lock();

View File

@ -516,13 +516,16 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
}
WARN_ON(inode->i_state & I_SYNC);
/*
* Skip inode if it is clean. We don't want to mess with writeback
* lists in this function since flusher thread may be doing for example
* sync in parallel and if we move the inode, it could get skipped. So
* here we make sure inode is on some writeback list and leave it there
* unless we have completely cleaned the inode.
* Skip inode if it is clean and we have no outstanding writeback in
* WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
* function since flusher thread may be doing for example sync in
* parallel and if we move the inode, it could get skipped. So here we
* make sure inode is on some writeback list and leave it there unless
* we have completely cleaned the inode.
*/
if (!(inode->i_state & I_DIRTY))
if (!(inode->i_state & I_DIRTY) &&
(wbc->sync_mode != WB_SYNC_ALL ||
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
inode->i_state |= I_SYNC;
spin_unlock(&inode->i_lock);

View File

@ -2886,7 +2886,7 @@ bool fs_fully_visible(struct file_system_type *type)
struct inode *inode = child->mnt_mountpoint->d_inode;
if (!S_ISDIR(inode->i_mode))
goto next;
if (inode->i_nlink != 2)
if (inode->i_nlink > 2)
goto next;
}
visible = true;

View File

@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nilfs_clear_logs(&sci->sc_segbufs);
err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
if (unlikely(err))
return err;
if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
sci->sc_freesegs,
sci->sc_nfreesegs,
NULL);
WARN_ON(err); /* do not happen */
sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
}
err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
if (unlikely(err))
return err;
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}

View File

@ -6,6 +6,8 @@
#include <linux/proc_fs.h>
#include <linux/elf.h>
#include <asm/pgtable.h> /* for pgprot_t */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)

View File

@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
static inline struct i2c_adapter *
i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
{
#if IS_ENABLED(I2C_MUX)
#if IS_ENABLED(CONFIG_I2C_MUX)
struct device *parent = adapter->dev.parent;
if (parent != NULL && parent->type == &i2c_adapter_type)

View File

@ -117,15 +117,15 @@ repeat:
}
/**
* read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
* raw_read_seqcount_begin - start seq-read critical section w/o lockdep
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
* read_seqcount_begin_no_lockdep opens a read critical section of the given
* raw_read_seqcount_begin opens a read critical section of the given
* seqcount, but without any lockdep checking. Validity of the critical
* section is tested by checking read_seqcount_retry function.
*/
static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
smp_rmb();
@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
seqcount_lockdep_reader_access(s);
return read_seqcount_begin_no_lockdep(s);
return raw_read_seqcount_begin(s);
}
/**
@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
}
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
/*
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
s->sequence++;
smp_wmb();
raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
static inline void write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, 1, _RET_IP_);
smp_wmb();
s->sequence++;
raw_write_seqcount_end(s);
}
/**

View File

@ -165,7 +165,6 @@ struct inet6_dev {
struct net_device *dev;
struct list_head addr_list;
int valid_ll_addr_cnt;
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;

View File

@ -1172,7 +1172,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* do not allow it to share a thread group or signal handlers or
* parent with the forking task.
*/
if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
if (clone_flags & CLONE_SIGHAND) {
if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
(task_active_pid_ns(current) !=
current->nsproxy->pid_ns_for_children))

View File

@ -3923,7 +3923,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
if (!tg->parent || !wl) /* the trivial, non-cgroup case */
if (!tg->parent) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {

View File

@ -74,7 +74,7 @@ unsigned long long notrace sched_clock(void)
return cd.epoch_ns;
do {
seq = read_seqcount_begin(&cd.seq);
seq = raw_read_seqcount_begin(&cd.seq);
epoch_cyc = cd.epoch_cyc;
epoch_ns = cd.epoch_ns;
} while (read_seqcount_retry(&cd.seq, seq));
@ -99,10 +99,10 @@ static void notrace update_sched_clock(void)
cd.mult, cd.shift);
raw_local_irq_save(flags);
write_seqcount_begin(&cd.seq);
raw_write_seqcount_begin(&cd.seq);
cd.epoch_ns = ns;
cd.epoch_cyc = cyc;
write_seqcount_end(&cd.seq);
raw_write_seqcount_end(&cd.seq);
raw_local_irq_restore(flags);
}

View File

@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
__this_cpu_write(*fbc->counters, 0);
} else {
__this_cpu_write(*fbc->counters, count);
this_cpu_add(*fbc->counters, amount);
}
preempt_enable();
}

View File

@ -390,7 +390,10 @@ struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
/* This happens if someone calls flush_dcache_page on slab page */
if (unlikely(PageSlab(page)))
return NULL;
if (unlikely(PageSwapCache(page))) {
swp_entry_t entry;

View File

@ -277,7 +277,7 @@ int batadv_max_header_len(void)
sizeof(struct batadv_coded_packet));
#endif
return header_len;
return header_len + ETH_HLEN;
}
/**

View File

@ -36,7 +36,6 @@
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/reciprocal_div.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A /= X;
continue;
case BPF_S_ALU_DIV_K:
A = reciprocal_divide(A, K);
A /= K;
continue;
case BPF_S_ALU_MOD_X:
if (X == 0)
@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
/* Some instructions need special checks */
switch (code) {
case BPF_S_ALU_DIV_K:
/* check for division by zero */
if (ftest->k == 0)
return -EINVAL;
ftest->k = reciprocal_value(ftest->k);
break;
case BPF_S_ALU_MOD_K:
/* check for division by zero */
if (ftest->k == 0)
@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
to->code = decodes[code];
to->jt = filt->jt;
to->jf = filt->jf;
if (code == BPF_S_ALU_DIV_K) {
/*
* When loaded this rule user gave us X, which was
* translated into R = r(X). Now we calculate the
* RR = r(R) and report it back. If next time this
* value is loaded and RRR = r(RR) is calculated
* then the R == RRR will be true.
*
* One exception. X == 1 translates into R == 0 and
* we can't calculate RR out of it with r().
*/
if (filt->k == 0)
to->k = 1;
else
to->k = reciprocal_value(filt->k);
BUG_ON(reciprocal_value(to->k) != filt->k);
} else
to->k = filt->k;
to->k = filt->k;
}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)

View File

@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
if (type >= __IEEE802154_DEV_MAX)
return -EINVAL;
if (type >= __IEEE802154_DEV_MAX) {
rc = -EINVAL;
goto nla_put_failure;
}
}
dev = phy->add_iface(phy, devname, type);

View File

@ -930,12 +930,15 @@ skip_listen_ht:
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
int res;
int state;
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num)
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
state = (sk->sk_state == TCP_TIME_WAIT) ?
inet_twsk(sk)->tw_substate : sk->sk_state;
if (!(r->idiag_states & (1 << state)))
goto next_normal;
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)

View File

@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
struct ipmr_result res;
struct fib_lookup_arg arg = { .result = &res, };
int err;
struct ipmr_result res;
struct fib_lookup_arg arg = {
.result = &res,
.flags = FIB_LOOKUP_NOREF,
};
err = fib_rules_lookup(net->ipv4.mr_rules_ops,
flowi4_to_flowi(flp4), 0, &arg);

View File

@ -22,6 +22,9 @@
int sysctl_tcp_nometrics_save __read_mostly;
static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
struct net *net, unsigned int hash);
struct tcp_fastopen_metrics {
u16 mss;
u16 syn_loss:10; /* Recurring Fast Open SYN losses */
@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
}
}
#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
{
if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
tcpm_suck_dst(tm, dst, false);
}
#define TCP_METRICS_RECLAIM_DEPTH 5
#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
struct inetpeer_addr *addr,
unsigned int hash,
bool reclaim)
unsigned int hash)
{
struct tcp_metrics_block *tm;
struct net *net;
bool reclaim = false;
spin_lock_bh(&tcp_metrics_lock);
net = dev_net(dst->dev);
/* While waiting for the spin-lock the cache might have been populated
* with this entry and so we have to check again.
*/
tm = __tcp_get_metrics(addr, net, hash);
if (tm == TCP_METRICS_RECLAIM_PTR) {
reclaim = true;
tm = NULL;
}
if (tm) {
tcpm_check_stamp(tm, dst);
goto out_unlock;
}
if (unlikely(reclaim)) {
struct tcp_metrics_block *oldest;
@ -169,17 +197,6 @@ out_unlock:
return tm;
}
#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
{
if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
tcpm_suck_dst(tm, dst, false);
}
#define TCP_METRICS_RECLAIM_DEPTH 5
#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
{
if (tm)
@ -282,7 +299,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
struct inetpeer_addr addr;
unsigned int hash;
struct net *net;
bool reclaim;
addr.family = sk->sk_family;
switch (addr.family) {
@ -304,13 +320,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
tm = __tcp_get_metrics(&addr, net, hash);
reclaim = false;
if (tm == TCP_METRICS_RECLAIM_PTR) {
reclaim = true;
if (tm == TCP_METRICS_RECLAIM_PTR)
tm = NULL;
}
if (!tm && create)
tm = tcpm_new(dst, &addr, hash, reclaim);
tm = tcpm_new(dst, &addr, hash);
else
tcpm_check_stamp(tm, dst);

View File

@ -3189,6 +3189,22 @@ out:
in6_ifa_put(ifp);
}
/* ifp->idev must be at least read locked */
static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
{
struct inet6_ifaddr *ifpiter;
struct inet6_dev *idev = ifp->idev;
list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
(ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
IFA_F_PERMANENT)
return false;
}
return true;
}
static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
{
struct net_device *dev = ifp->idev->dev;
@ -3208,14 +3224,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
*/
read_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
ifp->idev->valid_ll_addr_cnt == 1;
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0;
spin_unlock(&ifp->lock);
read_unlock_bh(&ifp->idev->lock);
/* While dad is in progress mld report's source address is in6_addrany.
@ -4512,19 +4525,6 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
}
static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
{
write_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
ifp->idev->valid_ll_addr_cnt += count;
WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
spin_unlock(&ifp->lock);
write_unlock_bh(&ifp->idev->lock);
}
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
{
struct net *net = dev_net(ifp->idev->dev);
@ -4533,8 +4533,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
switch (event) {
case RTM_NEWADDR:
update_valid_ll_addr_cnt(ifp, 1);
/*
* If the address was optimistic
* we inserted the route at the start of
@ -4550,8 +4548,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
ifp->idev->dev, 0, 0);
break;
case RTM_DELADDR:
update_valid_ll_addr_cnt(ifp, -1);
if (ifp->idev->cnf.forwarding)
addrconf_leave_anycast(ifp);
addrconf_leave_solict(ifp->idev, &ifp->addr);

View File

@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr6_table **mrt)
{
struct ip6mr_result res;
struct fib_lookup_arg arg = { .result = &res, };
int err;
struct ip6mr_result res;
struct fib_lookup_arg arg = {
.result = &res,
.flags = FIB_LOOKUP_NOREF,
};
err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
flowi6_to_flowi(flp6), 0, &arg);

View File

@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
struct rds_ib_refill_cache *cache)
{
unsigned long flags;
struct list_head *old;
struct list_head __percpu *chpfirst;
struct list_head *old, *chpfirst;
local_irq_save(flags);
@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
else /* put on front */
list_add_tail(new_item, chpfirst);
__this_cpu_write(chpfirst, new_item);
__this_cpu_write(cache->percpu->first, new_item);
__this_cpu_inc(cache->percpu->count);
if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
} while (old);
__this_cpu_write(chpfirst, NULL);
__this_cpu_write(cache->percpu->first, NULL);
__this_cpu_write(cache->percpu->count, 0);
end:
local_irq_restore(flags);

View File

@ -450,6 +450,17 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
}
snd_soc_write(codec, AIC32X4_IFACE1, data);
if (params_channels(params) == 1) {
data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
} else {
if (aic32x4->swapdacs)
data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
else
data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
}
snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
data);
return 0;
}
@ -606,20 +617,15 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
}
snd_soc_write(codec, AIC32X4_CMMODE, tmp_reg);
/* Do DACs need to be swapped? */
if (aic32x4->swapdacs) {
snd_soc_write(codec, AIC32X4_DACSETUP, AIC32X4_LDAC2RCHN | AIC32X4_RDAC2LCHN);
} else {
snd_soc_write(codec, AIC32X4_DACSETUP, AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN);
}
/* Mic PGA routing */
if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K) {
if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K)
snd_soc_write(codec, AIC32X4_LMICPGANIN, AIC32X4_LMICPGANIN_IN2R_10K);
}
if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K) {
else
snd_soc_write(codec, AIC32X4_LMICPGANIN, AIC32X4_LMICPGANIN_CM1L_10K);
if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K)
snd_soc_write(codec, AIC32X4_RMICPGANIN, AIC32X4_RMICPGANIN_IN1L_10K);
}
else
snd_soc_write(codec, AIC32X4_RMICPGANIN, AIC32X4_RMICPGANIN_CM1R_10K);
aic32x4_set_bias_level(codec, SND_SOC_BIAS_STANDBY);

View File

@ -120,7 +120,9 @@
#define AIC32X4_MICBIAS_2075V 0x60
#define AIC32X4_LMICPGANIN_IN2R_10K 0x10
#define AIC32X4_LMICPGANIN_CM1L_10K 0x40
#define AIC32X4_RMICPGANIN_IN1L_10K 0x10
#define AIC32X4_RMICPGANIN_CM1R_10K 0x40
#define AIC32X4_LMICPGAVOL_NOGAIN 0x80
#define AIC32X4_RMICPGAVOL_NOGAIN 0x80
@ -138,6 +140,7 @@
#define AIC32X4_LDAC2RCHN (0x02 << 4)
#define AIC32X4_LDAC2LCHN (0x01 << 4)
#define AIC32X4_RDAC2RCHN (0x01 << 2)
#define AIC32X4_DAC_CHAN_MASK 0x3c
#define AIC32X4_SSTEP2WCLK 0x01
#define AIC32X4_MUTEON 0x0C

View File

@ -14,6 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/gcd.h>
#include <linux/gpio.h>
@ -2141,6 +2142,7 @@ int wm5100_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
return 0;
}
EXPORT_SYMBOL_GPL(wm5100_detect);
static irqreturn_t wm5100_irq(int irq, void *data)
{

View File

@ -81,6 +81,54 @@ static const struct reg_default wm5110_sysclk_revd_patch[] = {
{ 0x3133, 0x1201 },
{ 0x3183, 0x1501 },
{ 0x31D3, 0x1401 },
{ 0x0049, 0x01ea },
{ 0x004a, 0x01f2 },
{ 0x0057, 0x01e7 },
{ 0x0058, 0x01fb },
{ 0x33ce, 0xc4f5 },
{ 0x33cf, 0x1361 },
{ 0x33d0, 0x0402 },
{ 0x33d1, 0x4700 },
{ 0x33d2, 0x026d },
{ 0x33d3, 0xff00 },
{ 0x33d4, 0x026d },
{ 0x33d5, 0x0101 },
{ 0x33d6, 0xc4f5 },
{ 0x33d7, 0x0361 },
{ 0x33d8, 0x0402 },
{ 0x33d9, 0x6701 },
{ 0x33da, 0xc4f5 },
{ 0x33db, 0x136f },
{ 0x33dc, 0xc4f5 },
{ 0x33dd, 0x134f },
{ 0x33de, 0xc4f5 },
{ 0x33df, 0x131f },
{ 0x33e0, 0x026d },
{ 0x33e1, 0x4f01 },
{ 0x33e2, 0x026d },
{ 0x33e3, 0xf100 },
{ 0x33e4, 0x026d },
{ 0x33e5, 0x0001 },
{ 0x33e6, 0xc4f5 },
{ 0x33e7, 0x0361 },
{ 0x33e8, 0x0402 },
{ 0x33e9, 0x6601 },
{ 0x33ea, 0xc4f5 },
{ 0x33eb, 0x136f },
{ 0x33ec, 0xc4f5 },
{ 0x33ed, 0x134f },
{ 0x33ee, 0xc4f5 },
{ 0x33ef, 0x131f },
{ 0x33f0, 0x026d },
{ 0x33f1, 0x4e01 },
{ 0x33f2, 0x026d },
{ 0x33f3, 0xf000 },
{ 0x33f6, 0xc4f5 },
{ 0x33f7, 0x1361 },
{ 0x33f8, 0x0402 },
{ 0x33f9, 0x4600 },
{ 0x33fa, 0x026d },
{ 0x33fb, 0xfe00 },
};
static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,

View File

@ -1262,18 +1262,13 @@ static int fsl_ssi_probe(struct platform_device *pdev)
return -EINVAL;
hw_type = (enum fsl_ssi_type) of_id->data;
/* We only support the SSI in "I2S Slave" mode */
sprop = of_get_property(np, "fsl,mode", NULL);
if (!sprop) {
dev_err(&pdev->dev, "fsl,mode property is necessary\n");
return -EINVAL;
}
if (!strcmp(sprop, "ac97-slave")) {
if (!strcmp(sprop, "ac97-slave"))
ac97 = true;
} else if (strcmp(sprop, "i2s-slave")) {
dev_notice(&pdev->dev, "mode %s is unsupported\n", sprop);
return -ENODEV;
}
/* The DAI name is the last part of the full name of the node. */
p = strrchr(np->full_name, '/') + 1;
@ -1391,7 +1386,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
*/
ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
if (IS_ERR(ssi_private->baudclk))
dev_warn(&pdev->dev, "could not get baud clock: %ld\n",
dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
PTR_ERR(ssi_private->baudclk));
else
clk_prepare_enable(ssi_private->baudclk);

View File

@ -30,6 +30,7 @@ config SND_OMAP_SOC_RX51
select SND_OMAP_SOC_MCBSP
select SND_SOC_TLV320AIC3X
select SND_SOC_TPA6130A2
depends on GPIOLIB
help
Say Y if you want to add support for SoC audio on Nokia RX-51
hardware. This is also known as Nokia N900 product.

View File

@ -19,7 +19,7 @@ config SND_S3C_DMA_LEGACY
config SND_S3C24XX_I2S
tristate
select S3C2410_DMA
select S3C24XX_DMA
config SND_S3C_I2SV2_SOC
tristate
@ -210,7 +210,7 @@ config SND_SOC_TOBERMORY
config SND_SOC_BELLS
tristate "Audio support for Wolfson Bells"
depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410
depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA
select SND_SAMSUNG_I2S
select SND_SOC_WM5102
select SND_SOC_WM5110

View File

@ -23,6 +23,7 @@
#include "regs-iis.h"
#include <asm/mach-types.h>
#include <mach/gpio-samsung.h>
#include "s3c24xx-i2s.h"
static unsigned int rates[] = {

View File

@ -22,8 +22,6 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <mach/dma.h>
#include <linux/platform_data/asoc-s3c.h>
#include "dma.h"
@ -1268,7 +1266,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
return 0;
err:
release_mem_region(regs_base, resource_size(res));
if (res)
release_mem_region(regs_base, resource_size(res));
return ret;
}

View File

@ -20,6 +20,7 @@
#include <sound/soc.h>
#include <mach/gpio-samsung.h>
#include <asm/mach-types.h>
#include "regs-iis.h"

View File

@ -24,6 +24,7 @@
#include <sound/soc.h>
#include <sound/jack.h>
#include <mach/gpio-samsung.h>
#include "regs-iis.h"
#include <asm/mach-types.h>

View File

@ -729,7 +729,7 @@ int s3c_i2sv2_register_component(struct device *dev, int id,
struct snd_soc_component_driver *cmp_drv,
struct snd_soc_dai_driver *dai_drv)
{
struct snd_soc_dai_ops *ops = drv->ops;
struct snd_soc_dai_ops *ops = dai_drv->ops;
ops->trigger = s3c2412_i2s_trigger;
if (!ops->hw_params)
@ -742,8 +742,8 @@ int s3c_i2sv2_register_component(struct device *dev, int id,
if (!ops->delay)
ops->delay = s3c2412_i2s_delay;
drv->suspend = s3c2412_i2s_suspend;
drv->resume = s3c2412_i2s_resume;
dai_drv->suspend = s3c2412_i2s_suspend;
dai_drv->resume = s3c2412_i2s_resume;
return snd_soc_register_component(dev, cmp_drv, dai_drv, 1);
}

View File

@ -26,6 +26,8 @@
#include <sound/pcm_params.h>
#include <mach/dma.h>
#include <mach/gpio-samsung.h>
#include <plat/gpio-cfg.h>
#include "dma.h"
#include "regs-i2s-v2.h"

View File

@ -24,6 +24,8 @@
#include <sound/pcm_params.h>
#include <mach/dma.h>
#include <mach/gpio-samsung.h>
#include <plat/gpio-cfg.h>
#include "regs-iis.h"
#include "dma.h"

View File

@ -19,6 +19,7 @@
#include <sound/soc.h>
#include <sound/jack.h>
#include <mach/gpio-samsung.h>
#include <asm/mach-types.h>
#include "i2s.h"

View File

@ -152,13 +152,11 @@ static struct snd_soc_card smdk = {
.num_links = ARRAY_SIZE(smdk_dai),
};
#ifdef CONFIG_OF
static const struct of_device_id samsung_wm8994_of_match[] = {
{ .compatible = "samsung,smdk-wm8994", .data = &smdk_board_data },
{},
};
MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match);
#endif /* CONFIG_OF */
static int smdk_audio_probe(struct platform_device *pdev)
{
@ -188,7 +186,7 @@ static int smdk_audio_probe(struct platform_device *pdev)
smdk_dai[0].platform_of_node = smdk_dai[0].cpu_of_node;
}
id = of_match_device(samsung_wm8994_of_match, &pdev->dev);
id = of_match_device(of_match_ptr(samsung_wm8994_of_match), &pdev->dev);
if (id)
*board = *((struct smdk_wm8994_data *)id->data);