Merge branch 'vexpress-v3.4' of git://git.linaro.org/people/pawelmoll/linux into late/soc

A few device tree updates and an include file fix for versatile.

* 'vexpress-v3.4' of git://git.linaro.org/people/pawelmoll/linux:
  ARM: vexpress: Remove twice included header files
  ARM: vexpress: Device Tree updates
  + update to 3.4

Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Olof Johansson 2012-05-26 13:30:41 -07:00
commit 67e7ebc21f
127 changed files with 1028 additions and 557 deletions

View File

@ -539,3 +539,13 @@ When: 3.6
Why: setitimer is not returning -EFAULT if user pointer is NULL. This
violates the spec.
Who: Sasikantha Babu <sasikanth.v19@gmail.com>
----------------------------
What: V4L2_CID_HCENTER, V4L2_CID_VCENTER V4L2 controls
When: 3.7
Why: The V4L2_CID_VCENTER, V4L2_CID_HCENTER controls have been deprecated
for about 4 years and they are not used by any mainline driver.
There are newer controls (V4L2_CID_PAN*, V4L2_CID_TILT*) that provide
similar functionality.
Who: Sylwester Nawrocki <sylvester.nawrocki@gmail.com>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
@ -442,7 +442,7 @@ asm-generic:
no-dot-config-targets := clean mrproper distclean \
cscope gtags TAGS tags help %docs check% coccicheck \
include/linux/version.h headers_% archheaders \
include/linux/version.h headers_% archheaders archscripts \
kernelversion %src-pkg
config-targets := 0
@ -979,7 +979,7 @@ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
include/config/auto.conf
$(cmd_crmodverdir)
archprepare: archheaders prepare1 scripts_basic
archprepare: archheaders archscripts prepare1 scripts_basic
prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=.
@ -1049,8 +1049,11 @@ hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
PHONY += archheaders
archheaders:
PHONY += archscripts
archscripts:
PHONY += __headers
__headers: include/linux/version.h scripts_basic asm-generic archheaders FORCE
__headers: include/linux/version.h scripts_basic asm-generic archheaders archscripts FORCE
$(Q)$(MAKE) $(build)=scripts build_unifdef
PHONY += headers_install_all

View File

@ -73,7 +73,10 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x2c001000 0x1000>,
<0x2c002000 0x100>;
<0x2c002000 0x1000>,
<0x2c004000 0x2000>,
<0x2c006000 0x2000>;
interrupts = <1 9 0xf04>;
};
memory-controller@7ffd0000 {
@ -93,6 +96,14 @@
<0 91 4>;
};
timer {
compatible = "arm,armv7-timer";
interrupts = <1 13 0xf08>,
<1 14 0xf08>,
<1 11 0xf08>,
<1 10 0xf08>;
};
pmu {
compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
interrupts = <0 68 4>,

View File

@ -77,13 +77,18 @@
timer@2c000600 {
compatible = "arm,cortex-a5-twd-timer";
reg = <0x2c000600 0x38>;
interrupts = <1 2 0x304>,
<1 3 0x304>;
reg = <0x2c000600 0x20>;
interrupts = <1 13 0x304>;
};
watchdog@2c000620 {
compatible = "arm,cortex-a5-twd-wdt";
reg = <0x2c000620 0x20>;
interrupts = <1 14 0x304>;
};
gic: interrupt-controller@2c001000 {
compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic";
compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;

View File

@ -105,8 +105,13 @@
timer@1e000600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e000600 0x20>;
interrupts = <1 2 0xf04>,
<1 3 0xf04>;
interrupts = <1 13 0xf04>;
};
watchdog@1e000620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x1e000620 0x20>;
interrupts = <1 14 0xf04>;
};
gic: interrupt-controller@1e001000 {

View File

@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
static __init void sirfsoc_irq_init(void)
{
sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32);
sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32);
sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32,
SIRFSOC_INTENAL_IRQ_END + 1 - 32);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1);
@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void)
if (!sirfsoc_intc_base)
panic("unable to map intc cpu registers\n");
irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0,
&irq_domain_simple_ops, NULL);
of_node_put(np);

View File

@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value)
void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
{
return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
}
void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
{
return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
}

View File

@ -14,7 +14,6 @@
#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
@ -29,7 +28,6 @@
#include <asm/hardware/gic.h>
#include <asm/hardware/timer-sp.h>
#include <asm/hardware/sp810.h>
#include <asm/hardware/gic.h>
#include <mach/ct-ca9x4.h>
#include <mach/motherboard.h>

View File

@ -247,7 +247,9 @@ good_area:
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
/* Don't allow expansion below FIRST_USER_ADDRESS */
if (vma->vm_flags & VM_GROWSDOWN &&
addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
goto good_area;
out:
return fault;

View File

@ -489,6 +489,7 @@ static void __init build_mem_type_table(void)
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;

View File

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
@ -432,7 +433,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
static void vfp_enable(void *unused)
{
u32 access = get_copro_access();
u32 access;
BUG_ON(preemptible());
access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
@ -573,12 +577,6 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* entry.
*/
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
/*
* Disable VFP in the hwstate so that we can detect if it gets
* used.
*/
hwstate->fpexc &= ~FPEXC_EN;
return 0;
}
@ -591,11 +589,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
unsigned long fpexc;
int err = 0;
/*
* If VFP has been used, then disable it to avoid corrupting
* the new thread state.
*/
if (hwstate->fpexc & FPEXC_EN)
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
/*
@ -657,7 +651,7 @@ static int __init vfp_init(void)
unsigned int cpu_arch = cpu_architecture();
if (cpu_arch >= CPU_ARCH_ARMv6)
vfp_enable(NULL);
on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
@ -678,8 +672,6 @@ static int __init vfp_init(void)
} else {
hotcpu_notifier(vfp_hotplug, 0);
smp_call_function(vfp_enable, NULL, 1);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,

View File

@ -135,10 +135,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
/* Allocation and freeing of basic task resources. */
extern struct task_struct *alloc_task_struct_node(int node);
extern void free_task_struct(struct task_struct *p);
#define cpu_relax() barrier()
/* data cache prefetch */

View File

@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/processor.h>
@ -38,7 +39,6 @@
#include "internal.h"
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpu.h>
#include <asm/cacheflush.h>
static unsigned long sleep_mode[NR_CPUS];
@ -874,10 +874,13 @@ static void __init smp_online(void)
cpu = smp_processor_id();
local_irq_enable();
notify_cpu_starting(cpu);
ipi_call_lock();
set_cpu_online(cpu, true);
smp_wmb();
ipi_call_unlock();
local_irq_enable();
}
/**

View File

@ -21,7 +21,12 @@
#define ARCH_HAS_PREFETCH
static inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
__asm__(
#ifndef CONFIG_PA20
/* Need to avoid prefetch of NULL on PA7300LC */
" extrw,u,= %0,31,32,%%r0\n"
#endif
" ldw 0(%0), %%r0" : : "r" (addr));
}
/* LDD is a PA2.0 addition. */

View File

@ -581,7 +581,11 @@
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
#ifdef CONFIG_64BIT
depd,z \prot,8,7,\prot
#else
depw,z \prot,8,7,\prot
#endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.

View File

@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm)
/* Purge any old translation */
pitlb (%sr0,%r28)
pitlb (%sr4,%r28)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r1
@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm)
sub %r25, %r1, %r25
1: fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
1: fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
fic,m %r1(%r28)
fic,m %r1(%sr4,%r28)
sync
bv %r0(%r2)
pitlb (%sr0,%r25)
pitlb (%sr4,%r25)
.exit
.procend

View File

@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/atomic.h>
#include <asm/current.h>
@ -296,7 +297,12 @@ smp_cpu_init(int cpunum)
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
}
notify_cpu_starting(cpunum);
ipi_call_lock();
set_cpu_online(cpunum, true);
ipi_call_unlock();
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);

View File

@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
u64 vsid_next;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next;
#else
u64 vsid_first;
u64 vsid_max;
u64 proto_vsid_first;
u64 proto_vsid_max;
u64 proto_vsid_next;
#endif
int context_id[SID_CONTEXTS];

View File

@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
backwards_map = !backwards_map;
/* Uh-oh ... out of mappings. Let's flush! */
if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
memset(vcpu_book3s->sid_map, 0,
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
kvmppc_mmu_pte_flush(vcpu, 0, 0);
kvmppc_mmu_flush_segments(vcpu);
}
map->host_vsid = vcpu_book3s->vsid_next++;
map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
map->guest_vsid = gvsid;
map->valid = true;
@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
return -1;
vcpu3s->context_id[0] = err;
vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
vcpu3s->vsid_next = vcpu3s->vsid_first;
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
<< USER_ESID_BITS) - 1;
vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);

View File

@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
/* insert R and C bits from PTE */
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
hp[0] = 0;
continue;
}

View File

@ -198,6 +198,7 @@ kvmppc_interrupt:
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
andi. r0, r12, 0x2
cmpwi cr1, r0, 0
beq 1f
mfspr r3,SPRN_HSRR0
mfspr r4,SPRN_HSRR1
@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
beq ld_last_prev_inst
cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
beq- ld_last_inst
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
beq- ld_last_inst
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#endif
b no_ld_last_inst
@ -316,23 +323,17 @@ no_dcbz32_off:
* Having set up SRR0/1 with the address where we want
* to continue with relocation on (potentially in module
* space), we either just go straight there with rfi[d],
* or we jump to an interrupt handler with bctr if there
* is an interrupt to be handled first. In the latter
* case, the rfi[d] at the end of the interrupt handler
* will get us back to where we want to continue.
* or we jump to an interrupt handler if there is an
* interrupt to be handled first. In the latter case,
* the rfi[d] at the end of the interrupt handler will
* get us back to where we want to continue.
*/
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 1f
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq 1f
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
1: mtctr r12
/* Register usage at this point:
*
* R1 = host R1
* R2 = host R2
* R10 = raw exit handler id
* R12 = exit handler id
* R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
@ -342,12 +343,25 @@ no_dcbz32_off:
PPC_LL r6, HSTATE_HOST_MSR(r13)
PPC_LL r8, HSTATE_VMHANDLER(r13)
/* Restore host msr -> SRR1 */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
beq cr1, 1f
mtspr SPRN_HSRR1, r6
mtspr SPRN_HSRR0, r8
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#endif
1: /* Restore host msr -> SRR1 */
mtsrr1 r6
/* Load highmem handler address */
mtsrr0 r8
/* RFI into the highmem handler, or jump to interrupt handler */
beqctr
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beqa BOOK3S_INTERRUPT_EXTERNAL
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beqa BOOK3S_INTERRUPT_DECREMENTER
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
beqa BOOK3S_INTERRUPT_PERFMON
RFI
kvmppc_handler_trampoline_exit_end:

View File

@ -11,6 +11,7 @@ config TILE
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select HAVE_SYSCALL_WRAPPERS if TILEGX
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG

View File

@ -100,9 +100,14 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
#else /* __ASSEMBLY__ */
/* how to get the thread information struct from ASM */
/*
* How to get the thread information struct from assembly.
* Note that we use different macros since different architectures
* have different semantics in their "mm" instruction and we would
* like to guarantee that the macro expands to exactly one instruction.
*/
#ifdef __tilegx__
#define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63
#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
#else
#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
#endif

View File

@ -403,19 +403,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Set up registers for signal handler.
* Registers that we don't modify keep the value they had from
* user-space at the time we took the signal.
* We always pass siginfo and mcontext, regardless of SA_SIGINFO,
* since some things rely on this (e.g. glibc's debug/segfault.c).
*/
regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = ptr_to_compat_reg(frame);
regs->lr = restorer;
regs->regs[0] = (unsigned long) usig;
if (ka->sa.sa_flags & SA_SIGINFO) {
/* Need extra arguments, so mark to restore caller-saves. */
regs->regs[1] = ptr_to_compat_reg(&frame->info);
regs->regs[2] = ptr_to_compat_reg(&frame->uc);
regs->flags |= PT_FLAGS_CALLER_SAVES;
}
/*
* Notify any tracer that was single-stepping it.

View File

@ -838,6 +838,18 @@ STD_ENTRY(interrupt_return)
.Lresume_userspace:
FEEDBACK_REENTER(interrupt_return)
/*
* Use r33 to hold whether we have already loaded the callee-saves
* into ptregs. We don't want to do it twice in this loop, since
* then we'd clobber whatever changes are made by ptrace, etc.
* Get base of stack in r32.
*/
{
GET_THREAD_INFO(r32)
movei r33, 0
}
.Lretry_work_pending:
/*
* Disable interrupts so as to make sure we don't
* miss an interrupt that sets any of the thread flags (like
@ -848,9 +860,6 @@ STD_ENTRY(interrupt_return)
IRQ_DISABLE(r20, r21)
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
/* Get base of stack in r32; note r30/31 are used as arguments here. */
GET_THREAD_INFO(r32)
/* Check to see if there is any work to do before returning to user. */
{
@ -866,16 +875,18 @@ STD_ENTRY(interrupt_return)
/*
* Make sure we have all the registers saved for signal
* handling or single-step. Call out to C code to figure out
* exactly what we need to do for each flag bit, then if
* necessary, reload the flags and recheck.
* handling, notify-resume, or single-step. Call out to C
* code to figure out exactly what we need to do for each flag bit,
* then if necessary, reload the flags and recheck.
*/
push_extra_callee_saves r0
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_work_pending
bnz r33, 1f
}
bnz r0, .Lresume_userspace
push_extra_callee_saves r0
movei r33, 1
1: jal do_work_pending
bnz r0, .Lretry_work_pending
/*
* In the NMI case we
@ -1180,10 +1191,12 @@ handle_syscall:
add r20, r20, tp
lw r21, r20
addi r21, r21, 1
{
sw r20, r21
GET_THREAD_INFO(r31)
}
/* Trace syscalls, if requested. */
GET_THREAD_INFO(r31)
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
@ -1362,7 +1375,10 @@ handle_ill:
3:
/* set PC and continue */
lw r26, r24
{
sw r28, r26
GET_THREAD_INFO(r0)
}
/*
* Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
@ -1370,7 +1386,6 @@ handle_ill:
* need to clear it here and can't really impose on all other arches.
* So what's another write between friends?
*/
GET_THREAD_INFO(r0)
addi r1, r0, THREAD_INFO_FLAGS_OFFSET
{

View File

@ -646,6 +646,20 @@ STD_ENTRY(interrupt_return)
.Lresume_userspace:
FEEDBACK_REENTER(interrupt_return)
/*
* Use r33 to hold whether we have already loaded the callee-saves
* into ptregs. We don't want to do it twice in this loop, since
* then we'd clobber whatever changes are made by ptrace, etc.
*/
{
movei r33, 0
move r32, sp
}
/* Get base of stack in r32. */
EXTRACT_THREAD_INFO(r32)
.Lretry_work_pending:
/*
* Disable interrupts so as to make sure we don't
* miss an interrupt that sets any of the thread flags (like
@ -656,9 +670,6 @@ STD_ENTRY(interrupt_return)
IRQ_DISABLE(r20, r21)
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
/* Get base of stack in r32; note r30/31 are used as arguments here. */
GET_THREAD_INFO(r32)
/* Check to see if there is any work to do before returning to user. */
{
@ -674,16 +685,18 @@ STD_ENTRY(interrupt_return)
/*
* Make sure we have all the registers saved for signal
* handling or single-step. Call out to C code to figure out
* handling or notify-resume. Call out to C code to figure out
* exactly what we need to do for each flag bit, then if
* necessary, reload the flags and recheck.
*/
push_extra_callee_saves r0
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_work_pending
bnez r33, 1f
}
bnez r0, .Lresume_userspace
push_extra_callee_saves r0
movei r33, 1
1: jal do_work_pending
bnez r0, .Lretry_work_pending
/*
* In the NMI case we
@ -968,11 +981,16 @@ handle_syscall:
shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
add r20, r20, tp
ld4s r21, r20
{
addi r21, r21, 1
move r31, sp
}
{
st4 r20, r21
EXTRACT_THREAD_INFO(r31)
}
/* Trace syscalls, if requested. */
GET_THREAD_INFO(r31)
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
ld r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE

View File

@ -567,6 +567,10 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
*/
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
/* If we enter in kernel mode, do nothing and exit the caller loop. */
if (!user_mode(regs))
return 0;
if (thread_info_flags & _TIF_NEED_RESCHED) {
schedule();
return 1;
@ -589,7 +593,6 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
return 1;
}
if (thread_info_flags & _TIF_SINGLESTEP) {
if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
single_step_once(regs);
return 0;
}

View File

@ -134,6 +134,9 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
archscripts:
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
###
# Syscall table generation

View File

@ -40,13 +40,12 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
targets += vmlinux.bin.all vmlinux.relocs
targets += vmlinux.bin.all vmlinux.relocs relocs
hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs
CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
$(obj)/vmlinux.relocs: vmlinux FORCE
$(call if_changed,relocs)
vmlinux.bin.all-y := $(obj)/vmlinux.bin

View File

@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
unsigned int eax, ebx, ecx, edx;
char signature[13];
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
memcpy(signature + 0, &ebx, 4);
memcpy(signature + 4, &ecx, 4);

View File

@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;

View File

@ -945,9 +945,10 @@ struct mce_info {
atomic_t inuse;
struct task_struct *t;
__u64 paddr;
int restartable;
} mce_info[MCE_INFO_MAX];
static void mce_save_info(__u64 addr)
static void mce_save_info(__u64 addr, int c)
{
struct mce_info *mi;
@ -955,6 +956,7 @@ static void mce_save_info(__u64 addr)
if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
mi->t = current;
mi->paddr = addr;
mi->restartable = c;
return;
}
}
@ -1130,7 +1132,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst == MCE_AR_SEVERITY) {
/* schedule action before return to userland */
mce_save_info(m.addr);
mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
set_thread_flag(TIF_MCE_NOTIFY);
} else if (kill_it) {
force_sig(SIGBUS, current);
@ -1179,7 +1181,13 @@ void mce_notify_process(void)
pr_err("Uncorrected hardware memory error in user-access at %llx",
mi->paddr);
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0) {
/*
* We must call memory_failure() here even if the current process is
* doomed. We still need to mark the page as poisoned and alert any
* other users of the page.
*/
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
mi->restartable == 0) {
pr_err("Memory error not recovered");
force_sig(SIGBUS, current);
}

View File

@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
memset(csig, 0, sizeof(*csig));
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("CPU%d not a capable Intel processor\n", cpu_num);
return -1;
}
csig->sig = cpuid_eax(0x00000001);
if ((c->x86_model >= 5) || (c->x86 > 6)) {
@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = {
struct microcode_ops * __init init_intel_microcode(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("Intel CPU family 0x%x not supported\n", c->x86);
return NULL;
}
return &microcode_intel_ops;
}

1
arch/x86/tools/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
relocs

View File

@ -36,3 +36,7 @@ HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x
$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += relocs
relocs: $(obj)/relocs

View File

@ -18,6 +18,8 @@ static void die(char *fmt, ...);
static Elf32_Ehdr ehdr;
static unsigned long reloc_count, reloc_idx;
static unsigned long *relocs;
static unsigned long reloc16_count, reloc16_idx;
static unsigned long *relocs16;
struct section {
Elf32_Shdr shdr;
@ -28,52 +30,86 @@ struct section {
};
static struct section *secs;
enum symtype {
S_ABS,
S_REL,
S_SEG,
S_LIN,
S_NSYMTYPES
};
static const char * const sym_regex_kernel[S_NSYMTYPES] = {
/*
* Following symbols have been audited. There values are constant and do
* not change if bzImage is loaded at a different physical address than
* the address for which it has been compiled. Don't warn user about
* absolute relocations present w.r.t these symbols.
*/
static const char abs_sym_regex[] =
[S_ABS] =
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
"__crc_)";
static regex_t abs_sym_regex_c;
static int is_abs_reloc(const char *sym_name)
{
return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0);
}
"__crc_)",
/*
* These symbols are known to be relative, even if the linker marks them
* as absolute (typically defined outside any section in the linker script.)
*/
static const char rel_sym_regex[] =
"^_end$";
static regex_t rel_sym_regex_c;
static int is_rel_reloc(const char *sym_name)
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
"(__parainstructions|__alt_instructions)(|_end)|"
"(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
"_end)$"
};
static const char * const sym_regex_realmode[S_NSYMTYPES] = {
/*
* These are 16-bit segment symbols when compiling 16-bit code.
*/
[S_SEG] =
"^real_mode_seg$",
/*
* These are offsets belonging to segments, as opposed to linear addresses,
* when compiling 16-bit code.
*/
[S_LIN] =
"^pa_",
};
static const char * const *sym_regex;
static regex_t sym_regex_c[S_NSYMTYPES];
static int is_reloc(enum symtype type, const char *sym_name)
{
return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0);
return sym_regex[type] &&
!regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
}
static void regex_init(void)
static void regex_init(int use_real_mode)
{
char errbuf[128];
int err;
int i;
err = regcomp(&abs_sym_regex_c, abs_sym_regex,
if (use_real_mode)
sym_regex = sym_regex_realmode;
else
sym_regex = sym_regex_kernel;
for (i = 0; i < S_NSYMTYPES; i++) {
if (!sym_regex[i])
continue;
err = regcomp(&sym_regex_c[i], sym_regex[i],
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf);
regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
die("%s", errbuf);
}
err = regcomp(&rel_sym_regex_c, rel_sym_regex,
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf);
die("%s", errbuf);
}
}
@ -154,6 +190,10 @@ static const char *rel_type(unsigned type)
REL_TYPE(R_386_RELATIVE),
REL_TYPE(R_386_GOTOFF),
REL_TYPE(R_386_GOTPC),
REL_TYPE(R_386_8),
REL_TYPE(R_386_PC8),
REL_TYPE(R_386_16),
REL_TYPE(R_386_PC16),
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
@ -189,7 +229,7 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
name = sym_strtab + sym->st_name;
}
else {
name = sec_name(secs[sym->st_shndx].shdr.sh_name);
name = sec_name(sym->st_shndx);
}
return name;
}
@ -472,7 +512,7 @@ static void print_absolute_relocs(void)
* Before warning check if this absolute symbol
* relocation is harmless.
*/
if (is_abs_reloc(name) || is_rel_reloc(name))
if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
continue;
if (!printed) {
@ -496,7 +536,8 @@ static void print_absolute_relocs(void)
printf("\n");
}
static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
int use_real_mode)
{
int i;
/* Walk through the relocations */
@ -521,30 +562,67 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
Elf32_Rel *rel;
Elf32_Sym *sym;
unsigned r_type;
const char *symname;
int shn_abs;
rel = &sec->reltab[j];
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
r_type = ELF32_R_TYPE(rel->r_info);
/* Don't visit relocations to absolute symbols */
if (sym->st_shndx == SHN_ABS &&
!is_rel_reloc(sym_name(sym_strtab, sym))) {
continue;
}
shn_abs = sym->st_shndx == SHN_ABS;
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
/*
* NONE can be ignored and and PC relative
* relocations don't need to be adjusted.
*/
break;
case R_386_16:
symname = sym_name(sym_strtab, sym);
if (!use_real_mode)
goto bad;
if (shn_abs) {
if (is_reloc(S_ABS, symname))
break;
else if (!is_reloc(S_SEG, symname))
goto bad;
} else {
if (is_reloc(S_LIN, symname))
goto bad;
else
break;
}
visit(rel, sym);
break;
case R_386_32:
/* Visit relocations that need to be adjusted */
symname = sym_name(sym_strtab, sym);
if (shn_abs) {
if (is_reloc(S_ABS, symname))
break;
else if (!is_reloc(S_REL, symname))
goto bad;
} else {
if (use_real_mode &&
!is_reloc(S_LIN, symname))
break;
}
visit(rel, sym);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
bad:
symname = sym_name(sym_strtab, sym);
die("Invalid %s %s relocation: %s\n",
shn_abs ? "absolute" : "relative",
rel_type(r_type), symname);
}
}
}
@ -552,12 +630,18 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
reloc_count += 1;
if (ELF32_R_TYPE(rel->r_info) == R_386_16)
reloc16_count++;
else
reloc_count++;
}
static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
/* Remember the address that needs to be adjusted. */
if (ELF32_R_TYPE(rel->r_info) == R_386_16)
relocs16[reloc16_idx++] = rel->r_offset;
else
relocs[reloc_idx++] = rel->r_offset;
}
@ -568,23 +652,41 @@ static int cmp_relocs(const void *va, const void *vb)
return (*a == *b)? 0 : (*a > *b)? 1 : -1;
}
static void emit_relocs(int as_text)
static int write32(unsigned int v, FILE *f)
{
unsigned char buf[4];
put_unaligned_le32(v, buf);
return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
}
static void emit_relocs(int as_text, int use_real_mode)
{
int i;
/* Count how many relocations I have and allocate space for them. */
reloc_count = 0;
walk_relocs(count_reloc);
walk_relocs(count_reloc, use_real_mode);
relocs = malloc(reloc_count * sizeof(relocs[0]));
if (!relocs) {
die("malloc of %d entries for relocs failed\n",
reloc_count);
}
relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
if (!relocs16) {
die("malloc of %d entries for relocs16 failed\n",
reloc16_count);
}
/* Collect up the relocations */
reloc_idx = 0;
walk_relocs(collect_reloc);
walk_relocs(collect_reloc, use_real_mode);
if (reloc16_count && !use_real_mode)
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs);
/* Print the relocations */
if (as_text) {
@ -593,58 +695,83 @@ static void emit_relocs(int as_text)
*/
printf(".section \".data.reloc\",\"a\"\n");
printf(".balign 4\n");
if (use_real_mode) {
printf("\t.long %lu\n", reloc16_count);
for (i = 0; i < reloc16_count; i++)
printf("\t.long 0x%08lx\n", relocs16[i]);
printf("\t.long %lu\n", reloc_count);
for (i = 0; i < reloc_count; i++) {
printf("\t.long 0x%08lx\n", relocs[i]);
}
} else {
/* Print a stop */
printf("\t.long 0x%08lx\n", (unsigned long)0);
for (i = 0; i < reloc_count; i++) {
printf("\t.long 0x%08lx\n", relocs[i]);
}
}
printf("\n");
}
else {
unsigned char buf[4];
if (use_real_mode) {
write32(reloc16_count, stdout);
for (i = 0; i < reloc16_count; i++)
write32(relocs16[i], stdout);
write32(reloc_count, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++)
write32(relocs[i], stdout);
} else {
/* Print a stop */
fwrite("\0\0\0\0", 4, 1, stdout);
write32(0, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++) {
put_unaligned_le32(relocs[i], buf);
fwrite(buf, 4, 1, stdout);
write32(relocs[i], stdout);
}
}
}
}
static void usage(void)
{
die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
}
int main(int argc, char **argv)
{
int show_absolute_syms, show_absolute_relocs;
int as_text;
int as_text, use_real_mode;
const char *fname;
FILE *fp;
int i;
regex_init();
show_absolute_syms = 0;
show_absolute_relocs = 0;
as_text = 0;
use_real_mode = 0;
fname = NULL;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (*arg == '-') {
if (strcmp(argv[1], "--abs-syms") == 0) {
if (strcmp(arg, "--abs-syms") == 0) {
show_absolute_syms = 1;
continue;
}
if (strcmp(argv[1], "--abs-relocs") == 0) {
if (strcmp(arg, "--abs-relocs") == 0) {
show_absolute_relocs = 1;
continue;
}
else if (strcmp(argv[1], "--text") == 0) {
if (strcmp(arg, "--text") == 0) {
as_text = 1;
continue;
}
if (strcmp(arg, "--realmode") == 0) {
use_real_mode = 1;
continue;
}
}
else if (!fname) {
fname = arg;
@ -655,6 +782,7 @@ int main(int argc, char **argv)
if (!fname) {
usage();
}
regex_init(use_real_mode);
fp = fopen(fname, "r");
if (!fp) {
die("Cannot open %s: %s\n",
@ -673,6 +801,6 @@ int main(int argc, char **argv)
print_absolute_relocs();
return 0;
}
emit_relocs(as_text);
emit_relocs(as_text, use_real_mode);
return 0;
}

View File

@ -743,7 +743,7 @@ void __init printk_all_partitions(void)
struct hd_struct *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
/*
* Don't show empty devices or things that have been
@ -762,14 +762,16 @@ void __init printk_all_partitions(void)
while ((part = disk_part_iter_next(&piter))) {
bool is_part0 = part == &disk->part0;
uuid[0] = 0;
uuid_buf[0] = '\0';
if (part->info)
part_unpack_uuid(part->info->uuid, uuid);
snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
part->info->uuid);
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
(unsigned long long)part->nr_sects >> 1,
disk_name(disk, part->partno, name_buf), uuid);
disk_name(disk, part->partno, name_buf),
uuid_buf);
if (is_part0) {
if (disk->driverfs_dev != NULL &&
disk->driverfs_dev->driver != NULL)

View File

@ -250,6 +250,10 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
/* For D3cold we should execute _PS3, not _PS4. */
if (state == ACPI_STATE_D3_COLD)
object_name[3] = '3';
/*
* Transition Power
* ----------------

View File

@ -660,7 +660,7 @@ int acpi_power_on_resources(struct acpi_device *device, int state)
int acpi_power_transition(struct acpi_device *device, int state)
{
int result;
int result = 0;
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
@ -679,8 +679,11 @@ int acpi_power_transition(struct acpi_device *device, int state)
* (e.g. so the device doesn't lose power while transitioning). Then,
* we dereference all power resources used in the current list.
*/
result = acpi_power_on_list(&device->power.states[state].resources);
if (!result)
if (state < ACPI_STATE_D3_COLD)
result = acpi_power_on_list(
&device->power.states[state].resources);
if (!result && device->power.state < ACPI_STATE_D3_COLD)
acpi_power_off_list(
&device->power.states[device->power.state].resources);

View File

@ -908,6 +908,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
device->power.states[ACPI_STATE_D3].flags.valid = 1;
device->power.states[ACPI_STATE_D3].power = 0;
/* Set D3cold's explicit_set flag if _PS3 exists. */
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
acpi_bus_init_power(device);
return 0;

View File

@ -6580,14 +6580,11 @@ static const struct file_operations dac960_user_command_proc_fops = {
static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
{
struct proc_dir_entry *StatusProcEntry;
struct proc_dir_entry *ControllerProcEntry;
struct proc_dir_entry *UserCommandProcEntry;
if (DAC960_ProcDirectoryEntry == NULL) {
DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
StatusProcEntry = proc_create("status", 0,
DAC960_ProcDirectoryEntry,
proc_create("status", 0, DAC960_ProcDirectoryEntry,
&dac960_proc_fops);
}
@ -6596,7 +6593,7 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
Controller->ControllerProcEntry = ControllerProcEntry;
}

View File

@ -2510,8 +2510,10 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
up(&dd->port->cmd_slot);
return NULL;
}
if (unlikely(*tag < 0))
if (unlikely(*tag < 0)) {
up(&dd->port->cmd_slot);
return NULL;
}
return dd->port->commands[*tag].sg;
}

View File

@ -1895,6 +1895,13 @@ static int virtcons_restore(struct virtio_device *vdev)
/* Get port open/close status on the host */
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
/*
* If a port was open at the time of suspending, we
* have to let the host know that it's still open.
*/
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
}
return 0;
}

View File

@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA
select CRYPTO_ALGAPI
select CRYPTO_AES
select CRYPTO_BLKCIPHER2
select CRYPTO_HASH
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Marvell Orion

View File

@ -245,6 +245,8 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
/* mark the descriptor as complete for non cyclic cases only */
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
/* move children to free_list */

View File

@ -703,6 +703,8 @@ static void ep93xx_dma_tasklet(unsigned long data)
desc = ep93xx_dma_get_active(edmac);
if (desc) {
if (desc->complete) {
/* mark descriptor complete for non cyclic case only */
if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}

View File

@ -2322,6 +2322,7 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
if (pch->cyclic)
dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}

View File

@ -1632,6 +1632,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool->low_water_blocks = pt->low_water_blocks;
pool->pf = pt->pf;
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
if (pt->pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
char buf[BDEVNAME_SIZE];
DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
bdevname(pt->data_dev->bdev, buf));
pool->pf.discard_passdown = 0;
}
}
return 0;
}
@ -1988,19 +2003,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out_flags_changed;
}
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
if (pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
DMWARN("Discard unsupported by data device: Disabling discard passdown.");
pf.discard_passdown = 0;
}
}
pt->pool = pool;
pt->ti = ti;
pt->metadata_dev = metadata_dev;
@ -2385,7 +2387,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
(unsigned long long)pt->low_water_blocks);
count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
!pool->pf.discard_passdown;
!pt->pf.discard_passdown;
DMEMIT("%u ", count);
if (!pool->pf.zero_new_blocks)
@ -2394,7 +2396,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (!pool->pf.discard_enabled)
DMEMIT("ignore_discard ");
if (!pool->pf.discard_passdown)
if (!pt->pf.discard_passdown)
DMEMIT("no_discard_passdown ");
break;

View File

@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);

View File

@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
static void calc_sectors(struct r10conf *conf, sector_t size)
{
/* Calculate the number of sectors-per-device that will
* actually be used, and set conf->dev_sectors and
* conf->stride
*/
size = size >> conf->chunk_shift;
sector_div(size, conf->far_copies);
size = size * conf->raid_disks;
sector_div(size, conf->near_copies);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" */
size = size * conf->copies;
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
conf->dev_sectors = size << conf->chunk_shift;
if (conf->far_offset)
conf->stride = 1 << conf->chunk_shift;
else {
sector_div(size, conf->far_copies);
conf->stride = size << conf->chunk_shift;
}
}
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
int err = -EINVAL;
if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf->r10bio_pool)
goto out;
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
sector_div(size, nc);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" in 'stride' */
stride = size * conf->copies;
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);
conf->dev_sectors = stride << conf->chunk_shift;
if (fo)
stride = 1;
else
sector_div(stride, fc);
conf->stride = stride << conf->chunk_shift;
calc_sectors(conf, mddev->dev_sectors);
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
@ -3468,7 +3475,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->recovery_cp = oldsize;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
calc_sectors(conf, sectors);
mddev->dev_sectors = conf->dev_sectors;
mddev->resync_max_sectors = size;
return 0;
}

View File

@ -1921,6 +1921,10 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
} else {
/* default values */
switch (c->delivery_system) {
case SYS_DVBS:
case SYS_DVBS2:
case SYS_ISDBS:
case SYS_TURBO:
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
fepriv->min_delay = HZ / 20;

View File

@ -1018,22 +1018,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
spin_lock_init(&dev->hw_lock);
/* claim the resources */
error = -EBUSY;
dev->hw_io = pnp_port_start(pnp_dev, 0);
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
dev->hw_io = -1;
dev->irq = -1;
goto error;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
dev->irq = -1;
goto error;
}
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
@ -1086,6 +1070,22 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
device_set_wakeup_capable(&pnp_dev->dev, true);
device_set_wakeup_enable(&pnp_dev->dev, true);
/* claim the resources */
error = -EBUSY;
dev->hw_io = pnp_port_start(pnp_dev, 0);
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
dev->hw_io = -1;
dev->irq = -1;
goto error;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
dev->irq = -1;
goto error;
}
error = rc_register_device(rdev);
if (error < 0)
goto error;

View File

@ -197,7 +197,7 @@ static int fintek_hw_detect(struct fintek_dev *fintek)
/*
* Newer reviews of this chipset uses port 8 instead of 5
*/
if ((chip != 0x0408) || (chip != 0x0804))
if ((chip != 0x0408) && (chip != 0x0804))
fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV2;
else
fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV1;
@ -514,16 +514,6 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
spin_lock_init(&fintek->fintek_lock);
ret = -EBUSY;
/* now claim resources */
if (!request_region(fintek->cir_addr,
fintek->cir_port_len, FINTEK_DRIVER_NAME))
goto failure;
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
goto failure;
pnp_set_drvdata(pdev, fintek);
fintek->pdev = pdev;
@ -558,6 +548,16 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
ret = -EBUSY;
/* now claim resources */
if (!request_region(fintek->cir_addr,
fintek->cir_port_len, FINTEK_DRIVER_NAME))
goto failure;
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
goto failure;
ret = rc_register_device(rdev);
if (ret)
goto failure;

View File

@ -1515,16 +1515,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* initialize raw event */
init_ir_raw_event(&itdev->rawir);
ret = -EBUSY;
/* now claim resources */
if (!request_region(itdev->cir_addr,
dev_desc->io_region_size, ITE_DRIVER_NAME))
goto failure;
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
goto failure;
/* set driver data into the pnp device */
pnp_set_drvdata(pdev, itdev);
itdev->pdev = pdev;
@ -1600,6 +1590,16 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->driver_name = ITE_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
ret = -EBUSY;
/* now claim resources */
if (!request_region(itdev->cir_addr,
dev_desc->io_region_size, ITE_DRIVER_NAME))
goto failure;
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
goto failure;
ret = rc_register_device(rdev);
if (ret)
goto failure;

View File

@ -1021,24 +1021,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
spin_lock_init(&nvt->nvt_lock);
spin_lock_init(&nvt->tx.lock);
ret = -EBUSY;
/* now claim resources */
if (!request_region(nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
goto failure;
if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
goto failure;
if (!request_region(nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
goto failure;
if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
goto failure;
pnp_set_drvdata(pdev, nvt);
nvt->pdev = pdev;
@ -1085,6 +1067,24 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
rdev->tx_resolution = XYZ;
#endif
ret = -EBUSY;
/* now claim resources */
if (!request_region(nvt->cir_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
goto failure;
if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
goto failure;
if (!request_region(nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
goto failure;
if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
goto failure;
ret = rc_register_device(rdev);
if (ret)
goto failure;

View File

@ -991,39 +991,10 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
"(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
data->wbase, data->ebase, data->sbase, data->irq);
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_free_data;
}
if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_release_wbase;
}
if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->sbase, data->sbase + SP_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_release_ebase;
}
err = request_irq(data->irq, wbcir_irq_handler,
IRQF_DISABLED, DRVNAME, device);
if (err) {
dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
err = -EBUSY;
goto exit_release_sbase;
}
led_trigger_register_simple("cir-tx", &data->txtrigger);
if (!data->txtrigger) {
err = -ENOMEM;
goto exit_free_irq;
goto exit_free_data;
}
led_trigger_register_simple("cir-rx", &data->rxtrigger);
@ -1062,9 +1033,38 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_free_rc;
}
if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_release_wbase;
}
if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
data->sbase, data->sbase + SP_IOMEM_LEN - 1);
err = -EBUSY;
goto exit_release_ebase;
}
err = request_irq(data->irq, wbcir_irq_handler,
IRQF_DISABLED, DRVNAME, device);
if (err) {
dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
err = -EBUSY;
goto exit_release_sbase;
}
err = rc_register_device(data->dev);
if (err)
goto exit_free_rc;
goto exit_free_irq;
device_init_wakeup(&device->dev, 1);
@ -1072,14 +1072,6 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
return 0;
exit_free_rc:
rc_free_device(data->dev);
exit_unregister_led:
led_classdev_unregister(&data->led);
exit_unregister_rxtrigger:
led_trigger_unregister_simple(data->rxtrigger);
exit_unregister_txtrigger:
led_trigger_unregister_simple(data->txtrigger);
exit_free_irq:
free_irq(data->irq, device);
exit_release_sbase:
@ -1088,6 +1080,14 @@ exit_release_ebase:
release_region(data->ebase, EHFUNC_IOMEM_LEN);
exit_release_wbase:
release_region(data->wbase, WAKEUP_IOMEM_LEN);
exit_free_rc:
rc_free_device(data->dev);
exit_unregister_led:
led_classdev_unregister(&data->led);
exit_unregister_rxtrigger:
led_trigger_unregister_simple(data->rxtrigger);
exit_unregister_txtrigger:
led_trigger_unregister_simple(data->txtrigger);
exit_free_data:
kfree(data);
pnp_set_drvdata(device, NULL);

View File

@ -2923,6 +2923,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
* not the JPEG end of frame ('ff d9').
*/
/* count the packets and their size */
sd->npkt++;
sd->pktsz += len;
/*fixme: assumption about the following code:
* - there can be only one marker in a packet
*/
@ -2945,10 +2949,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
data += i;
}
/* count the packets and their size */
sd->npkt++;
sd->pktsz += len;
/* search backwards if there is a marker in the packet */
for (i = len - 1; --i >= 0; ) {
if (data[i] != 0xff) {

View File

@ -181,7 +181,6 @@ static int mmpcam_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cam->devlist);
mcam = &cam->mcam;
mcam->platform = MHP_Armada610;
mcam->plat_power_up = mmpcam_power_up;
mcam->plat_power_down = mmpcam_power_down;
mcam->dev = &pdev->dev;

View File

@ -246,28 +246,37 @@ int fimc_capture_resume(struct fimc_dev *fimc)
}
static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
{
if (!fr || plane >= fr->fmt->memplanes)
return 0;
return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
}
static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *allocators[])
{
const struct v4l2_pix_format_mplane *pixm = NULL;
struct fimc_ctx *ctx = vq->drv_priv;
struct fimc_fmt *fmt = ctx->d_frame.fmt;
struct fimc_frame *frame = &ctx->d_frame;
struct fimc_fmt *fmt = frame->fmt;
unsigned long wh;
int i;
if (!fmt)
if (pfmt) {
pixm = &pfmt->fmt.pix_mp;
fmt = fimc_find_format(&pixm->pixelformat, NULL,
FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1);
wh = pixm->width * pixm->height;
} else {
wh = frame->f_width * frame->f_height;
}
if (fmt == NULL)
return -EINVAL;
*num_planes = fmt->memplanes;
for (i = 0; i < fmt->memplanes; i++) {
sizes[i] = get_plane_size(&ctx->d_frame, i);
unsigned int size = (wh * fmt->depth[i]) / 8;
if (pixm)
sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
else
sizes[i] = size;
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
@ -1383,7 +1392,7 @@ static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
fimc_capture_try_crop(ctx, r, crop->pad);
if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
mutex_lock(&fimc->lock);
mutex_unlock(&fimc->lock);
*v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
return 0;
}

View File

@ -1048,14 +1048,14 @@ static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
* @mask: the color flags to match
* @index: offset in the fimc_formats array, ignored if negative
*/
struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code,
struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
unsigned int mask, int index)
{
struct fimc_fmt *fmt, *def_fmt = NULL;
unsigned int i;
int id = 0;
if (index >= ARRAY_SIZE(fimc_formats))
if (index >= (int)ARRAY_SIZE(fimc_formats))
return NULL;
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {

View File

@ -718,7 +718,7 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code,
struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
unsigned int mask, int index);
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,

View File

@ -530,7 +530,10 @@ static int soc_camera_open(struct file *file)
if (icl->reset)
icl->reset(icd->pdev);
/* Don't mess with the host during probe */
mutex_lock(&ici->host_lock);
ret = ici->ops->add(icd);
mutex_unlock(&ici->host_lock);
if (ret < 0) {
dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
goto eiciadd;
@ -956,7 +959,7 @@ static void scan_add_host(struct soc_camera_host *ici)
{
struct soc_camera_device *icd;
mutex_lock(&list_lock);
mutex_lock(&ici->host_lock);
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
@ -967,7 +970,7 @@ static void scan_add_host(struct soc_camera_host *ici)
}
}
mutex_unlock(&list_lock);
mutex_unlock(&ici->host_lock);
}
#ifdef CONFIG_I2C_BOARDINFO
@ -1313,6 +1316,7 @@ int soc_camera_host_register(struct soc_camera_host *ici)
list_add_tail(&ici->list, &hosts);
mutex_unlock(&list_lock);
mutex_init(&ici->host_lock);
scan_add_host(ici);
return 0;

View File

@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-memops.h>
struct vb2_dc_conf {
@ -85,7 +86,7 @@ static void *vb2_dma_contig_vaddr(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
if (!buf)
return 0;
return NULL;
return buf->vaddr;
}

View File

@ -55,6 +55,7 @@ struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
return vma_copy;
}
EXPORT_SYMBOL_GPL(vb2_get_vma);
/**
* vb2_put_userptr() - release a userspace virtual memory area

View File

@ -376,7 +376,7 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
* Make a fake call to mtd_read_fact_prot_reg() to check if OTP
* operations are supported.
*/
if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
return -EOPNOTSUPP;
switch (mode) {

View File

@ -212,18 +212,17 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
/* Link the private data with the MTD structure */
ams_delta_mtd->priv = this;
if (!request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev))) {
dev_err(&pdev->dev, "request_mem_region failed\n");
err = -EBUSY;
goto out_free;
}
/*
* Don't try to request the memory region from here,
* it should have been already requested from the
* gpio-omap driver and requesting it again would fail.
*/
io_base = ioremap(res->start, resource_size(res));
if (io_base == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
err = -EIO;
goto out_release_io;
goto out_free;
}
this->priv = io_base;
@ -271,8 +270,6 @@ out_gpio:
platform_set_drvdata(pdev, NULL);
gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
out_free:
kfree(ams_delta_mtd);
out:
@ -285,7 +282,6 @@ out_free:
static int __devexit ams_delta_cleanup(struct platform_device *pdev)
{
void __iomem *io_base = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Release resources, unregister device */
nand_release(ams_delta_mtd);
@ -293,7 +289,6 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
release_mem_region(res->start, resource_size(res));
/* Free the MTD device structure */
kfree(ams_delta_mtd);

View File

@ -342,26 +342,26 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_unlock_rx_hashtbl_bh(bond);
}
static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arp_pkt *arp;
if (skb->protocol != cpu_to_be16(ETH_P_ARP))
return;
goto out;
arp = (struct arp_pkt *) skb->data;
if (!arp) {
pr_debug("Packet has no ARP data\n");
return;
goto out;
}
if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
return;
goto out;
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
return;
goto out;
}
if (arp->op_code == htons(ARPOP_REPLY)) {
@ -369,6 +369,8 @@ static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
rlb_update_entry_from_arp(bond, arp);
pr_debug("Server received an ARP Reply from client\n");
}
out:
return RX_HANDLER_ANOTHER;
}
/* Caller must hold bond lock for read */

View File

@ -218,7 +218,7 @@ struct bonding {
struct slave *primary_slave;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
void (*recv_probe)(struct sk_buff *, struct bonding *,
int (*recv_probe)(struct sk_buff *, struct bonding *,
struct slave *);
rwlock_t lock;
rwlock_t curr_slave_lock;

View File

@ -493,7 +493,11 @@ out:
static void e1000_down_and_stop(struct e1000_adapter *adapter)
{
set_bit(__E1000_DOWN, &adapter->flags);
/* Only kill reset task if adapter is not resetting */
if (!test_bit(__E1000_RESETTING, &adapter->flags))
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);

View File

@ -584,7 +584,6 @@ struct pch_gbe_hw_stats {
/**
* struct pch_gbe_adapter - board specific private data structure
* @stats_lock: Spinlock structure for status
* @tx_queue_lock: Spinlock structure for transmit
* @ethtool_lock: Spinlock structure for ethtool
* @irq_sem: Semaphore for interrupt
* @netdev: Pointer of network device structure
@ -609,7 +608,6 @@ struct pch_gbe_hw_stats {
struct pch_gbe_adapter {
spinlock_t stats_lock;
spinlock_t tx_queue_lock;
spinlock_t ethtool_lock;
atomic_t irq_sem;
struct net_device *netdev;

View File

@ -640,14 +640,11 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
*/
static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
{
int size;
size = (int)sizeof(struct pch_gbe_tx_ring);
adapter->tx_ring = kzalloc(size, GFP_KERNEL);
adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
if (!adapter->tx_ring)
return -ENOMEM;
size = (int)sizeof(struct pch_gbe_rx_ring);
adapter->rx_ring = kzalloc(size, GFP_KERNEL);
adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
if (!adapter->rx_ring) {
kfree(adapter->tx_ring);
return -ENOMEM;
@ -1162,7 +1159,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
struct sk_buff *tmp_skb;
unsigned int frame_ctrl;
unsigned int ring_num;
unsigned long flags;
/*-- Set frame control --*/
frame_ctrl = 0;
@ -1211,14 +1207,14 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
}
}
}
spin_lock_irqsave(&tx_ring->tx_lock, flags);
ring_num = tx_ring->next_to_use;
if (unlikely((ring_num + 1) == tx_ring->count))
tx_ring->next_to_use = 0;
else
tx_ring->next_to_use = ring_num + 1;
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
buffer_info = &tx_ring->buffer_info[ring_num];
tmp_skb = buffer_info->skb;
@ -1518,7 +1514,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
&rx_ring->rx_buff_pool_logic,
GFP_KERNEL);
if (!rx_ring->rx_buff_pool) {
pr_err("Unable to allocate memory for the receive poll buffer\n");
pr_err("Unable to allocate memory for the receive pool buffer\n");
return -ENOMEM;
}
memset(rx_ring->rx_buff_pool, 0, size);
@ -1637,15 +1633,17 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count);
/* Recover from running out of Tx resources in xmit_frame */
spin_lock(&tx_ring->tx_lock);
if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
netif_wake_queue(adapter->netdev);
adapter->stats.tx_restart_count++;
pr_debug("Tx wake queue\n");
}
spin_lock(&adapter->tx_queue_lock);
tx_ring->next_to_clean = i;
spin_unlock(&adapter->tx_queue_lock);
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
spin_unlock(&tx_ring->tx_lock);
return cleaned;
}
@ -2037,7 +2035,6 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
return -ENOMEM;
}
spin_lock_init(&adapter->hw.miim_lock);
spin_lock_init(&adapter->tx_queue_lock);
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->ethtool_lock);
atomic_set(&adapter->irq_sem, 0);
@ -2142,10 +2139,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->next_to_use, tx_ring->next_to_clean);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
/* CRC,ITAG no support */
pch_gbe_tx_queue(adapter, tx_ring, skb);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}

View File

@ -485,6 +485,7 @@ static const struct driver_info wwan_info = {
/*-------------------------------------------------------------------------*/
#define HUAWEI_VENDOR_ID 0x12D1
#define NOVATEL_VENDOR_ID 0x1410
static const struct usb_device_id products [] = {
/*
@ -602,6 +603,21 @@ static const struct usb_device_id products [] = {
* because of bugs/quirks in a given product (like Zaurus, above).
*/
{
/* Novatel USB551L */
/* This match must come *before* the generic CDC-ETHER match so that
* we get FLAG_WWAN set on the device, since it's descriptors are
* generic CDC-ETHER.
*/
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = NOVATEL_VENDOR_ID,
.idProduct = 0xB001,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long)&wwan_info,
}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,

View File

@ -282,17 +282,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
}
EXPORT_SYMBOL_GPL(usbnet_change_mtu);
/* The caller must hold list->lock */
static void __usbnet_queue_skb(struct sk_buff_head *list,
struct sk_buff *newsk, enum skb_state state)
{
struct skb_data *entry = (struct skb_data *) newsk->cb;
__skb_queue_tail(list, newsk);
entry->state = state;
}
/*-------------------------------------------------------------------------*/
/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
* completion callbacks. 2.5 should have fixed those bugs...
*/
static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
struct sk_buff_head *list, enum skb_state state)
{
unsigned long flags;
enum skb_state old_state;
struct skb_data *entry = (struct skb_data *) skb->cb;
spin_lock_irqsave(&list->lock, flags);
old_state = entry->state;
entry->state = state;
__skb_unlink(skb, list);
spin_unlock(&list->lock);
spin_lock(&dev->done.lock);
@ -300,6 +315,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
spin_unlock_irqrestore(&dev->done.lock, flags);
return old_state;
}
/* some work can't be done in tasklets, so we use keventd
@ -340,7 +356,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
entry = (struct skb_data *) skb->cb;
entry->urb = urb;
entry->dev = dev;
entry->state = rx_start;
entry->length = 0;
usb_fill_bulk_urb (urb, dev->udev, dev->in,
@ -372,7 +387,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
tasklet_schedule (&dev->bh);
break;
case 0:
__skb_queue_tail (&dev->rxq, skb);
__usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@ -423,16 +438,17 @@ static void rx_complete (struct urb *urb)
struct skb_data *entry = (struct skb_data *) skb->cb;
struct usbnet *dev = entry->dev;
int urb_status = urb->status;
enum skb_state state;
skb_put (skb, urb->actual_length);
entry->state = rx_done;
state = rx_done;
entry->urb = NULL;
switch (urb_status) {
/* success */
case 0:
if (skb->len < dev->net->hard_header_len) {
entry->state = rx_cleanup;
state = rx_cleanup;
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
netif_dbg(dev, rx_err, dev->net,
@ -471,7 +487,7 @@ static void rx_complete (struct urb *urb)
"rx throttle %d\n", urb_status);
}
block:
entry->state = rx_cleanup;
state = rx_cleanup;
entry->urb = urb;
urb = NULL;
break;
@ -482,17 +498,18 @@ block:
// FALLTHROUGH
default:
entry->state = rx_cleanup;
state = rx_cleanup;
dev->net->stats.rx_errors++;
netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
break;
}
defer_bh(dev, skb, &dev->rxq);
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
if (netif_running (dev->net) &&
!test_bit (EVENT_RX_HALT, &dev->flags)) {
!test_bit (EVENT_RX_HALT, &dev->flags) &&
state != unlink_start) {
rx_submit (dev, urb, GFP_ATOMIC);
usb_mark_last_busy(dev->udev);
return;
@ -579,16 +596,23 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
unsigned long flags;
struct sk_buff *skb, *skbnext;
struct sk_buff *skb;
int count = 0;
spin_lock_irqsave (&q->lock, flags);
skb_queue_walk_safe(q, skb, skbnext) {
while (!skb_queue_empty(q)) {
struct skb_data *entry;
struct urb *urb;
int retval;
skb_queue_walk(q, skb) {
entry = (struct skb_data *) skb->cb;
if (entry->state != unlink_start)
goto found;
}
break;
found:
entry->state = unlink_start;
urb = entry->urb;
/*
@ -1039,8 +1063,7 @@ static void tx_complete (struct urb *urb)
}
usb_autopm_put_interface_async(dev->intf);
entry->state = tx_done;
defer_bh(dev, skb, &dev->txq);
(void) defer_bh(dev, skb, &dev->txq, tx_done);
}
/*-------------------------------------------------------------------------*/
@ -1096,7 +1119,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
entry = (struct skb_data *) skb->cb;
entry->urb = urb;
entry->dev = dev;
entry->state = tx_start;
entry->length = length;
usb_fill_bulk_urb (urb, dev->udev, dev->out,
@ -1155,7 +1177,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
break;
case 0:
net->trans_start = jiffies;
__skb_queue_tail (&dev->txq, skb);
__usbnet_queue_skb(&dev->txq, skb, tx_start);
if (dev->txq.qlen >= TX_QLEN (dev))
netif_stop_queue (net);
}

View File

@ -492,7 +492,9 @@ static void virtnet_napi_enable(struct virtnet_info *vi)
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
virtqueue_disable_cb(vi->rvq);
local_bh_disable();
__napi_schedule(&vi->napi);
local_bh_enable();
}
}

View File

@ -1851,14 +1851,6 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
/*aspm */
rtl_pci_init_aspm(hw);
@ -1877,6 +1869,14 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
goto fail3;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
err = -ENODEV;
goto fail3;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,

View File

@ -971,11 +971,6 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
rtlpriv->cfg->ops->read_chip_version(hw);
/*like read eeprom and so on */
rtlpriv->cfg->ops->read_eeprom_info(hw);
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
goto error_out;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
err = _rtl_usb_init(hw);
if (err)
goto error_out;
@ -987,6 +982,11 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
"Can't allocate sw for mac80211\n");
goto error_out;
}
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
goto error_out;
}
rtlpriv->cfg->ops->init_sw_leds(hw);
return 0;
error_out:

View File

@ -223,7 +223,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
[PCI_D2] = ACPI_STATE_D2,
[PCI_D3hot] = ACPI_STATE_D3_HOT,
[PCI_D3hot] = ACPI_STATE_D3,
[PCI_D3cold] = ACPI_STATE_D3
};
int error = -EINVAL;

View File

@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/slab.h>
#define STATION_ADDR_LEN 20
#define PCI_DEVICE_ID_PCH_1588 0x8819

View File

@ -354,7 +354,7 @@ static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i)
{
struct rproc *rproc = rvdev->rproc;
for (i--; i > 0; i--) {
for (i--; i >= 0; i--) {
struct rproc_vring *rvring = &rvdev->vring[i];
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));

View File

@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct rtc_class_ops *ops = id->data;
unsigned long time;
ret = amba_request_regions(adev, NULL);
if (ret)
@ -343,6 +344,23 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
ldata->base + RTC_CR);
/*
* On ST PL031 variants, the RTC reset value does not provide correct
* weekday for 2000-01-01. Correct the erroneous sunday to saturday.
*/
if (ldata->hw_designer == AMBA_VENDOR_ST) {
if (readl(ldata->base + RTC_YDR) == 0x2000) {
time = readl(ldata->base + RTC_DR);
if ((time &
(RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
== 0x02120000) {
time = time | (0x7 << RTC_WDAY_SHIFT);
writel(0x2000, ldata->base + RTC_YLR);
writel(time, ldata->base + RTC_LR);
}
}
}
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {

View File

@ -169,6 +169,7 @@ static struct se_device *fd_create_virtdevice(
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct request_queue *q;
unsigned long long dev_size;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
@ -183,13 +184,12 @@ static struct se_device *fd_create_virtdevice(
* one (1) logical sector from underlying struct block_device
*/
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@ -605,10 +605,20 @@ static u32 fd_get_device_type(struct se_device *dev)
static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
dev->se_sub_dev->se_dev_attrib.block_size);
struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host;
unsigned long long dev_size;
/*
* When using a file that references an underlying struct block_device,
* ensure dev_size is always based on the current inode size in order
* to handle underlying block_device resize operations.
*/
if (S_ISBLK(i->i_mode))
dev_size = (i_size_read(i) - fd_dev->fd_block_size);
else
dev_size = fd_dev->fd_dev_size;
return blocks_long;
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
}
static struct se_subsystem_api fileio_template = {

View File

@ -220,6 +220,9 @@ int target_scsi2_reservation_release(struct se_task *task)
if (dev->dev_reserved_node_acl != sess->se_node_acl)
goto out_unlock;
if (dev->dev_res_bin_isid != sess->sess_bin_isid)
goto out_unlock;
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {

View File

@ -2044,7 +2044,7 @@ int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm)
kbd->default_ledflagstate = ((arg >> 4) & 7);
set_leds();
spin_unlock_irqrestore(&kbd_event_lock, flags);
break;
return 0;
/* the ioctls below only set the lights, not the functions */
/* for those, see KDGKBLED and KDSKBLED above */

View File

@ -390,6 +390,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
/* There might be pages left in the balloon: free them. */
while (vb->num_pages)
leak_balloon(vb, vb->num_pages);
update_balloon_size(vb);
/* Now we reset the device so we can clean up the queues. */
vdev->config->reset(vdev);

View File

@ -505,9 +505,14 @@ EXPORT_SYMBOL(bio_clone);
int bio_get_nr_vecs(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
return min_t(unsigned,
int nr_pages;
nr_pages = min_t(unsigned,
queue_max_segments(q),
queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
}
EXPORT_SYMBOL(bio_get_nr_vecs);

View File

@ -70,7 +70,7 @@ static void bdev_inode_switch_bdi(struct inode *inode,
spin_unlock(&dst->wb.list_lock);
}
static sector_t max_block(struct block_device *bdev)
sector_t blkdev_max_block(struct block_device *bdev)
{
sector_t retval = ~((sector_t)0);
loff_t sz = i_size_read(bdev->bd_inode);
@ -163,7 +163,7 @@ static int
blkdev_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
if (iblock >= max_block(I_BDEV(inode))) {
if (iblock >= blkdev_max_block(I_BDEV(inode))) {
if (create)
return -EIO;
@ -185,7 +185,7 @@ static int
blkdev_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
sector_t end_block = max_block(I_BDEV(inode));
sector_t end_block = blkdev_max_block(I_BDEV(inode));
unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
if ((iblock + max_blocks) > end_block) {

View File

@ -921,6 +921,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
int uptodate = PageUptodate(page);
sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
do {
if (!buffer_mapped(bh)) {
@ -929,6 +930,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
bh->b_blocknr = block;
if (uptodate)
set_buffer_uptodate(bh);
if (block < end_block)
set_buffer_mapped(bh);
}
block++;

View File

@ -164,7 +164,8 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_sign, "sign" },
{ Opt_seal, "seal" },
{ Opt_direct, "direct" },
{ Opt_direct, "forceddirectio" },
{ Opt_direct, "directio" },
{ Opt_direct, "forcedirectio" },
{ Opt_strictcache, "strictcache" },
{ Opt_noac, "noac" },
{ Opt_fsc, "fsc" },

View File

@ -234,8 +234,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
return 0;
jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
spin_lock(&c->erase_completion_lock);
mutex_lock(&c->alloc_sem);
spin_lock(&c->erase_completion_lock);
}
/* First, work out which block we're garbage-collecting */

View File

@ -1799,10 +1799,15 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
if (task) {
files = get_files_struct(task);
if (files) {
struct file *file;
rcu_read_lock();
if (fcheck_files(files, fd)) {
file = fcheck_files(files, fd);
if (file) {
unsigned i_mode, f_mode = file->f_mode;
rcu_read_unlock();
put_files_struct(files);
if (task_dumpable(task)) {
rcu_read_lock();
cred = __task_cred(task);
@ -1813,7 +1818,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
inode->i_uid = 0;
inode->i_gid = 0;
}
inode->i_mode &= ~(S_ISUID | S_ISGID);
i_mode = S_IFLNK;
if (f_mode & FMODE_READ)
i_mode |= S_IRUSR | S_IXUSR;
if (f_mode & FMODE_WRITE)
i_mode |= S_IWUSR | S_IXUSR;
inode->i_mode = i_mode;
security_task_to_inode(task, inode);
put_task_struct(task);
return 1;
@ -1837,8 +1849,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
unsigned fd = *(const unsigned *)ptr;
struct file *file;
struct files_struct *files;
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
@ -1848,25 +1858,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
goto out;
ei = PROC_I(inode);
ei->fd = fd;
files = get_files_struct(task);
if (!files)
goto out_iput;
inode->i_mode = S_IFLNK;
/*
* We are not taking a ref to the file structure, so we must
* hold ->file_lock.
*/
spin_lock(&files->file_lock);
file = fcheck_files(files, fd);
if (!file)
goto out_unlock;
if (file->f_mode & FMODE_READ)
inode->i_mode |= S_IRUSR | S_IXUSR;
if (file->f_mode & FMODE_WRITE)
inode->i_mode |= S_IWUSR | S_IXUSR;
spin_unlock(&files->file_lock);
put_files_struct(files);
inode->i_op = &proc_pid_link_inode_operations;
inode->i_size = 64;
@ -1879,12 +1870,6 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
out:
return error;
out_unlock:
spin_unlock(&files->file_lock);
put_files_struct(files);
out_iput:
iput(inode);
goto out;
}
static struct dentry *proc_lookupfd_common(struct inode *dir,
@ -2177,16 +2162,16 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
goto out;
result = ERR_PTR(-EACCES);
if (lock_trace(task))
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out_put_task;
result = ERR_PTR(-ENOENT);
if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
goto out_unlock;
goto out_put_task;
mm = get_task_mm(task);
if (!mm)
goto out_unlock;
goto out_put_task;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
@ -2198,8 +2183,6 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
out_no_vma:
up_read(&mm->mmap_sem);
mmput(mm);
out_unlock:
unlock_trace(task);
out_put_task:
put_task_struct(task);
out:
@ -2233,7 +2216,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
goto out;
ret = -EACCES;
if (lock_trace(task))
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out_put_task;
ret = 0;
@ -2241,12 +2224,12 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
case 0:
ino = inode->i_ino;
if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
goto out_unlock;
goto out_put_task;
filp->f_pos++;
case 1:
ino = parent_ino(dentry);
if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
goto out_unlock;
goto out_put_task;
filp->f_pos++;
default:
{
@ -2257,7 +2240,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
mm = get_task_mm(task);
if (!mm)
goto out_unlock;
goto out_put_task;
down_read(&mm->mmap_sem);
nr_files = 0;
@ -2287,7 +2270,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
flex_array_free(fa);
up_read(&mm->mmap_sem);
mmput(mm);
goto out_unlock;
goto out_put_task;
}
for (i = 0, vma = mm->mmap, pos = 2; vma;
vma = vma->vm_next) {
@ -2332,8 +2315,6 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
}
out_unlock:
unlock_trace(task);
out_put_task:
put_task_struct(task);
out:

View File

@ -1,9 +1,10 @@
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
#include <linux/sched.h>
#ifdef CONFIG_BLOCK
#include <linux/sched.h>
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>

View File

@ -2051,6 +2051,7 @@ extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
extern sector_t blkdev_max_block(struct block_device *bdev);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);

View File

@ -179,6 +179,7 @@ enum {
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
};
enum {
@ -187,6 +188,7 @@ enum {
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
};
struct ftrace_event_call {

View File

@ -222,12 +222,6 @@ static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
}
}
static inline char *part_unpack_uuid(const u8 *uuid, char *out)
{
sprintf(out, "%pU", uuid);
return out;
}
static inline int disk_max_parts(struct gendisk *disk)
{
if (disk->flags & GENHD_FL_EXT_DEVT)

View File

@ -99,6 +99,22 @@ struct ip_set_hash {
#endif
};
static size_t
htable_size(u8 hbits)
{
size_t hsize;
/* We must fit both into u32 in jhash and size_t */
if (hbits > 31)
return 0;
hsize = jhash_size(hbits);
if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket)
< hsize)
return 0;
return hsize * sizeof(struct hbucket) + sizeof(struct htable);
}
/* Compute htable_bits from the user input parameter hashsize */
static u8
htable_bits(u32 hashsize)

View File

@ -191,7 +191,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
enum skb_state {
illegal = 0,
tx_start, tx_done,
rx_start, rx_done, rx_cleanup
rx_start, rx_done, rx_cleanup,
unlink_start
};
struct skb_data { /* skb->cb is one of these */

View File

@ -59,6 +59,7 @@ struct soc_camera_device {
struct soc_camera_host {
struct v4l2_device v4l2_dev;
struct list_head list;
struct mutex host_lock; /* Protect during probing */
unsigned char nr; /* Host number */
void *priv;
const char *drv_name;

View File

@ -191,6 +191,7 @@ struct bt_sock {
struct list_head accept_q;
struct sock *parent;
u32 defer_setup;
bool suspended;
};
struct bt_sock_list {

View File

@ -518,6 +518,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL(handle_edge_irq);
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**

View File

@ -112,6 +112,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return radix_tree_lookup(&irq_desc_tree, irq);
}
EXPORT_SYMBOL(irq_to_desc);
static void delete_irq_desc(unsigned int irq)
{

Some files were not shown because too many files have changed in this diff Show More