Merge v3.9-rc5 into char-misc-next

This picks up the fixes in 3.9-rc5 that we need here.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2013-04-01 10:50:58 -07:00
commit 974857266a
262 changed files with 2868 additions and 1412 deletions

View File

@ -15,6 +15,13 @@ amemthresh - INTEGER
enabled and the variable is automatically set to 2, otherwise
the strategy is disabled and the variable is set to 1.
backup_only - BOOLEAN
0 - disabled (default)
not 0 - enabled
If set, disable the director function while the server is
in backup mode to avoid packet loops for DR/TUN methods.
conntrack - BOOLEAN
0 - disabled (default)
not 0 - enabled

View File

@ -3243,6 +3243,12 @@ F: Documentation/firmware_class/
F: drivers/base/firmware*.c
F: include/linux/firmware.h
FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
S: Maintained
F: drivers/block/rsxx/
FLOPPY DRIVER
M: Jiri Kosina <jkosina@suse.cz>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@ -5684,7 +5690,7 @@ S: Maintained
F: arch/arm/*omap*/*clock*
OMAP POWER MANAGEMENT SUPPORT
M: Kevin Hilman <khilman@ti.com>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/*omap*/*pm*
@ -5778,7 +5784,7 @@ F: arch/arm/*omap*/usb*
OMAP GPIO DRIVER
M: Santosh Shilimkar <santosh.shilimkar@ti.com>
M: Kevin Hilman <khilman@ti.com>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-omap.c
@ -6210,7 +6216,7 @@ F: include/linux/power_supply.h
F: drivers/power/
PNP SUPPORT
M: Adam Belay <abelay@mit.edu>
M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
M: Bjorn Helgaas <bhelgaas@google.com>
S: Maintained
F: drivers/pnp/
@ -6552,12 +6558,6 @@ S: Maintained
F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
S: Maintained
F: drivers/block/rsxx/
RANDOM NUMBER DRIVER
M: Theodore Ts'o" <tytso@mit.edu>
S: Maintained
@ -7174,7 +7174,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
M: Kevin Hilman <khilman@ti.com>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
T: git git://gitorious.org/linux-davinci/linux-davinci.git
Q: http://patchwork.kernel.org/project/linux-davinci/list/
@ -7707,9 +7707,10 @@ F: include/linux/swiotlb.h
SYNOPSYS ARC ARCHITECTURE
M: Vineet Gupta <vgupta@synopsys.com>
L: linux-snps-arc@vger.kernel.org
S: Supported
F: arch/arc/
F: Documentation/devicetree/bindings/arc/
F: drivers/tty/serial/arc-uart.c
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Unicycling Gorilla
# *DOCUMENTATION*

View File

@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;

View File

@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
*/
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif

View File

@ -415,7 +415,7 @@
*-------------------------------------------------------------*/
.macro SAVE_ALL_EXCEPTION marker
st \marker, [sp, 8]
st \marker, [sp, 8] /* orig_r8 */
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
/* Restore r9 used to code the early prologue */

View File

@ -13,7 +13,7 @@
#ifdef CONFIG_KGDB
#include <asm/user.h>
#include <asm/ptrace.h>
/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
* register API yet */
@ -53,9 +53,7 @@ enum arc700_linux_regnums {
};
#else
static inline void kgdb_trap(struct pt_regs *regs, int param)
{
}
#define kgdb_trap(regs, param)
#endif
#endif /* __ARC_KGDB_H__ */

View File

@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define orig_r8_IS_SCALL 0x0001
#define orig_r8_IS_SCALL_RESTARTED 0x0002
#define orig_r8_IS_BRKPT 0x0004
#define orig_r8_IS_EXCPN 0x0004
#define orig_r8_IS_EXCPN 0x0008
#define orig_r8_IS_IRQ1 0x0010
#define orig_r8_IS_IRQ2 0x0020

View File

@ -16,8 +16,6 @@
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
int sys_fork_wrapper(void);
int sys_vfork_wrapper(void);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);

View File

@ -28,14 +28,14 @@
*/
struct user_regs_struct {
struct scratch {
struct {
long pad;
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
} scratch;
struct callee {
struct {
long pad;
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;

View File

@ -452,7 +452,7 @@ tracesys:
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR]
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
;################### Special Sys Call Wrappers ##########################
; TBD: call do_fork directly from here
ARC_ENTRY sys_fork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_fork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_fork_wrapper
ARC_ENTRY sys_vfork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_vfork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_vfork_wrapper
ARC_ENTRY sys_clone_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_clone

View File

@ -9,6 +9,7 @@
*/
#include <linux/kgdb.h>
#include <linux/sched.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>

View File

@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "\n");
#ifdef _ASM_GENERIC_UNISTD_H
n += scnprintf(buf + n, len - n,
"OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
#endif
"OS ABI [v3]\t: no-legacy-syscalls\n");
return buf;
}

View File

@ -6,8 +6,6 @@
#include <asm/syscalls.h>
#define sys_clone sys_clone_wrapper
#define sys_fork sys_fork_wrapper
#define sys_vfork sys_vfork_wrapper
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),

View File

@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
DEBUG_IMX53_UART || \
DEBUG_IMX6Q_UART
default 1
depends on ARCH_MXC
help
Choose UART port on which kernel low-level debug messages
should be output.

View File

@ -385,7 +385,7 @@
spi@7000d800 {
compatible = "nvidia,tegra20-slink";
reg = <0x7000d480 0x200>;
reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;

View File

@ -372,7 +372,7 @@
spi@7000d800 {
compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
reg = <0x7000d480 0x200>;
reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;

View File

@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
.lower_margin = 4,
.hsync_len = 1,
.vsync_len = 1,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
.lower_margin = 45,
.hsync_len = 1,
.vsync_len = 1,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
},
};
@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
.lower_margin = 13,
.hsync_len = 48,
.vsync_len = 3,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
.lower_margin = 0x15,
.hsync_len = 64,
.vsync_len = 4,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
.lower_margin = 2,
.hsync_len = 15,
.vsync_len = 15,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
},
};
@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static inline void enable_clk_enet_out(void)
@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
}
@ -297,6 +291,7 @@ static void __init m28evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init sc_sps1_init(void)
@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init cfa10037_init(void)
@ -423,6 +421,8 @@ static void __init apf28_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static void __init mxs_machine_init(void)

View File

@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
{
unsigned long size, mask;
bool page64k = IS_ENABLED(ARM64_64K_PAGES);
bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;

View File

@ -291,7 +291,6 @@ cpu_idle (void)
}
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP
min_xtp();
#endif
@ -299,9 +298,7 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);
if (!idle)
idle = default_idle;
(*idle)();
default_idle();
if (mark_idle)
(*mark_idle)(0);
#ifdef CONFIG_SMP

View File

@ -23,8 +23,10 @@
#include <asm/code-patching.h>
#include <asm/machdep.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
#endif
bool epapr_paravirt_enabled;
@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, insts[i]);
#endif
}
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_property(hyper_node, "has-idle", NULL))
ppc_md.power_save = epapr_ev_idle;
#endif
epapr_paravirt_enabled = true;

View File

@ -1066,78 +1066,6 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
* r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
_GLOBAL(slb_miss_realmode)
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
bl .slb_allocate_realmode
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_PPR_PACA(PACA_EXSLB, r9)
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
bl .save_nvgprs
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
b 1b
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
blr
#endif
.align 7
.globl alignment_common
alignment_common:
@ -1335,6 +1263,78 @@ _GLOBAL(opal_mc_secondary_handler)
#endif /* CONFIG_PPC_POWERNV */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
* r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
_GLOBAL(slb_miss_realmode)
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
bl .slb_allocate_realmode
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_PPR_PACA(PACA_EXSLB, r9)
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
bl .save_nvgprs
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
b 1b
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
blr
#endif
/*
* Hash table stuff
*/

View File

@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
extern int __must_check xen_physdev_op_compat(int, void *);
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
if (unlikely(rc == -ENOSYS))
rc = HYPERVISOR_physdev_op_compat(cmd, arg);
rc = xen_physdev_op_compat(cmd, arg);
return rc;
}

View File

@ -44,6 +44,7 @@
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e

View File

@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
struct microcode_intel ***mc_saved;
mc_saved = (struct microcode_intel ***)
__pa_symbol(&mc_saved_data->mc_saved);
__pa_nodebug(&mc_saved_data->mc_saved);
for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
struct microcode_intel *p;
p = *(struct microcode_intel **)
__pa(mc_saved_data->mc_saved + i);
mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
__pa_nodebug(mc_saved_data->mc_saved + i);
mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
}
}
#endif
@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
struct cpio_data cd;
long offset = 0;
#ifdef CONFIG_X86_32
char *p = (char *)__pa_symbol(ucode_name);
char *p = (char *)__pa_nodebug(ucode_name);
#else
char *p = ucode_name;
#endif
@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
if (mc_intel == NULL)
return;
delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
current_mc_date_p = (int *)__pa_symbol(&current_mc_date);
delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
*delay_ucode_info_p = 1;
*current_mc_date_p = mc_intel->hdr.date;
@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
}
#endif
static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
struct ucode_cpu_info *uci)
static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
unsigned int val[2];
@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
#ifdef CONFIG_X86_32
struct boot_params *boot_params_p;
boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
ramdisk_image = boot_params_p->hdr.ramdisk_image;
ramdisk_size = boot_params_p->hdr.ramdisk_size;
initrd_start_early = ramdisk_image;
initrd_end_early = initrd_start_early + ramdisk_size;
_load_ucode_intel_bsp(
(struct mc_saved_data *)__pa_symbol(&mc_saved_data),
(unsigned long *)__pa_symbol(&mc_saved_in_initrd),
(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
initrd_start_early, initrd_end_early, &uci);
#else
ramdisk_image = boot_params.hdr.ramdisk_image;
@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
unsigned long *initrd_start_p;
mc_saved_in_initrd_p =
(unsigned long *)__pa_symbol(mc_saved_in_initrd);
mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
#else
mc_saved_data_p = &mc_saved_data;
mc_saved_in_initrd_p = mc_saved_in_initrd;

View File

@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
char c;
unsigned zero_len;
for (; len; --len) {
for (; len; --len, to++) {
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to++, sizeof(char)))
if (__put_user_nocheck(c, to, sizeof(char)))
break;
}

View File

@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
pv_mmu_ops.write_cr3 = &xen_write_cr3;
}
#endif
@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
#endif
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_mark_init_mm_pinned();

View File

@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
*error_sector = bio->bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;

View File

@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
hd_struct_put(part);
}
EXPORT_SYMBOL(delete_partition);
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)

View File

@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
return rc;
data_len = estatus->data_length;
gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len > sizeof(*gdata)) {
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
return -EINVAL;

View File

@ -646,6 +646,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
static void handle_root_bridge_removal(struct acpi_device *device)
{
acpi_status status;
struct acpi_eject_event *ej_event;
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@ -661,7 +662,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
acpi_bus_hot_remove_device(ej_event);
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
if (ACPI_FAILURE(status))
kfree(ej_event);
}
static void _handle_hotplug_event_root(struct work_struct *work)
@ -676,8 +679,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
handle = hp_work->handle;
type = hp_work->type;
root = acpi_pci_find_root(handle);
acpi_scan_lock_acquire();
root = acpi_pci_find_root(handle);
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@ -711,6 +715,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
break;
}
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
kfree(buffer.pointer);
}

View File

@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB17FX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),

View File

@ -532,11 +532,11 @@ config BLK_DEV_RBD
If unsure, say N.
config BLK_DEV_RSXX
tristate "RamSam PCIe Flash SSD Device Driver"
tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
depends on PCI
help
Device driver for IBM's high speed PCIe SSD
storage devices: RamSan-70 and RamSan-80.
storage devices: FlashSystem-70 and FlashSystem-80.
To compile this driver as a module, choose M here: the
module will be called rsxx.

View File

@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h)
if (rc)
return rc;
h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
if (!h->cfgtable)
return -ENOMEM;
rc = write_driver_ver_to_cfgtable(h->cfgtable);

View File

@ -1044,12 +1044,29 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
ioctl_by_bdev(bdev, BLKRRPART, 0);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
mutex_unlock(&lo->lo_ctl_mutex);
/*
* Remove all partitions, since BLKRRPART won't remove user
* added partitions when max_part=0
*/
if (bdev) {
struct disk_part_iter piter;
struct hd_struct *part;
mutex_lock_nested(&bdev->bd_mutex, 1);
invalidate_partition(bdev->bd_disk, 0);
disk_part_iter_init(&piter, bdev->bd_disk,
DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter)))
delete_partition(bdev->bd_disk, part->partno);
disk_part_iter_exit(&piter);
mutex_unlock(&bdev->bd_mutex);
}
/*
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
@ -1623,6 +1640,7 @@ static int loop_add(struct loop_device **l, int i)
goto out_free_dev;
i = err;
err = -ENOMEM;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
goto out_free_dev;

View File

@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev)
gpio_direction_output(host->rst, 1);
/* reset out pin */
if (!(prv_data->dev_attr & MG_DEV_MASK))
if (!(prv_data->dev_attr & MG_DEV_MASK)) {
err = -EINVAL;
goto probe_err_3a;
}
if (prv_data->dev_attr != MG_BOOT_DEV) {
rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,

View File

@ -4224,6 +4224,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->isr_workq = create_workqueue(dd->workq_name);
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
goto block_initialize_err;
}
@ -4282,7 +4283,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
pci_set_master(pdev);
if (pci_enable_msi(pdev)) {
rv = pci_enable_msi(pdev);
if (rv) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
goto block_initialize_err;

View File

@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request)
return atomic_read(&obj_request->done) != 0;
}
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, obj_request->img_request, obj_request->result,
obj_request->xferred, obj_request->length);
/*
* ENOENT means a hole in the image. We zero-fill the
* entire length of the request. A short read also implies
* zero-fill to the end of the request. Either way we
* update the xferred count to indicate the whole request
* was satisfied.
*/
BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
if (obj_request->result == -ENOENT) {
zero_bio_chain(obj_request->bio_list, 0);
obj_request->result = 0;
obj_request->xferred = obj_request->length;
} else if (obj_request->xferred < obj_request->length &&
!obj_request->result) {
zero_bio_chain(obj_request->bio_list, obj_request->xferred);
obj_request->xferred = obj_request->length;
}
obj_request_done_set(obj_request);
}
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p cb %p\n", __func__, obj_request,
@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
obj_request->result, obj_request->xferred, obj_request->length);
/*
* ENOENT means a hole in the object. We zero-fill the
* entire length of the request. A short read also implies
* zero-fill to the end of the request. Either way we
* update the xferred count to indicate the whole request
* was satisfied.
*/
if (obj_request->result == -ENOENT) {
zero_bio_chain(obj_request->bio_list, 0);
obj_request->result = 0;
obj_request->xferred = obj_request->length;
} else if (obj_request->xferred < obj_request->length &&
!obj_request->result) {
zero_bio_chain(obj_request->bio_list, obj_request->xferred);
obj_request->xferred = obj_request->length;
}
obj_request_done_set(obj_request);
if (obj_request->img_request)
rbd_img_obj_request_read_callback(obj_request);
else
obj_request_done_set(obj_request);
}
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)

View File

@ -1,2 +1,2 @@
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
rsxx-y := config.o core.o cregs.o dev.o dma.o
rsxx-objs := config.o core.o cregs.o dev.o dma.o

View File

@ -29,15 +29,13 @@
#include "rsxx_priv.h"
#include "rsxx_cfg.h"
static void initialize_config(void *config)
static void initialize_config(struct rsxx_card_cfg *cfg)
{
struct rsxx_card_cfg *cfg = config;
cfg->hdr.version = RSXX_CFG_VERSION;
cfg->data.block_size = RSXX_HW_BLK_SIZE;
cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM;
cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
cfg->data.cache_order = (-1);
cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
cfg->data.intr_coal.count = 0;
@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
} else {
dev_info(CARD_TO_DEV(card),
"Initializing card configuration.\n");
initialize_config(card);
initialize_config(&card->config);
st = rsxx_save_config(card);
if (st)
return st;

View File

@ -30,6 +30,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/genhd.h>
#include <linux/idr.h>
@ -39,8 +40,8 @@
#define NO_LEGACY 0
MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
MODULE_AUTHOR("IBM <support@ramsan.com>");
MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
static DEFINE_SPINLOCK(rsxx_ida_lock);
/*----------------- Interrupt Control & Handling -------------------*/
static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
{
card->isr_mask = 0;
card->ier_mask = 0;
}
static void __enable_intr(unsigned int *mask, unsigned int intr)
{
*mask |= intr;
@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
*/
void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
{
if (unlikely(card->halt))
if (unlikely(card->halt) ||
unlikely(card->eeh_state))
return;
__enable_intr(&card->ier_mask, intr);
@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
{
if (unlikely(card->eeh_state))
return;
__disable_intr(&card->ier_mask, intr);
iowrite32(card->ier_mask, card->regmap + IER);
}
@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
unsigned int intr)
{
if (unlikely(card->halt))
if (unlikely(card->halt) ||
unlikely(card->eeh_state))
return;
__enable_intr(&card->isr_mask, intr);
@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
unsigned int intr)
{
if (unlikely(card->eeh_state))
return;
__disable_intr(&card->isr_mask, intr);
__disable_intr(&card->ier_mask, intr);
iowrite32(card->ier_mask, card->regmap + IER);
@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
do {
reread_isr = 0;
if (unlikely(card->eeh_state))
break;
isr = ioread32(card->regmap + ISR);
if (isr == 0xffffffff) {
/*
@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
}
/*----------------- Card Event Handler -------------------*/
static char *rsxx_card_state_to_str(unsigned int state)
static const char * const rsxx_card_state_to_str(unsigned int state)
{
static char *state_strings[] = {
static const char * const state_strings[] = {
"Unknown", "Shutdown", "Starting", "Formatting",
"Uninitialized", "Good", "Shutting Down",
"Fault", "Read Only Fault", "dStroying"
@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
return 0;
}
static int rsxx_eeh_frozen(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
int i;
int st;
dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
card->eeh_state = 1;
rsxx_mask_interrupts(card);
/*
* We need to guarantee that the write for eeh_state and masking
* interrupts does not become reordered. This will prevent a possible
* race condition with the EEH code.
*/
wmb();
pci_disable_device(dev);
st = rsxx_eeh_save_issued_dmas(card);
if (st)
return st;
rsxx_eeh_save_issued_creg(card);
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
card->ctrl[i].status.buf,
card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
card->ctrl[i].cmd.buf,
card->ctrl[i].cmd.dma_addr);
}
return 0;
}
static void rsxx_eeh_failure(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
int i;
dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
card->eeh_state = 1;
for (i = 0; i < card->n_targets; i++)
del_timer_sync(&card->ctrl[i].activity_timer);
rsxx_eeh_cancel_dmas(card);
}
static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
{
unsigned int status;
int iter = 0;
/* We need to wait for the hardware to reset */
while (iter++ < 10) {
status = ioread32(card->regmap + PCI_RECONFIG);
if (status & RSXX_FLUSH_BUSY) {
ssleep(1);
continue;
}
if (status & RSXX_FLUSH_TIMEOUT)
dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
return 0;
}
/* Hardware failed resetting itself. */
return -1;
}
static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
int st;
if (dev->revision < RSXX_EEH_SUPPORT)
return PCI_ERS_RESULT_NONE;
if (error == pci_channel_io_perm_failure) {
rsxx_eeh_failure(dev);
return PCI_ERS_RESULT_DISCONNECT;
}
st = rsxx_eeh_frozen(dev);
if (st) {
dev_err(&dev->dev, "Slot reset setup failed\n");
rsxx_eeh_failure(dev);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
unsigned long flags;
int i;
int st;
dev_warn(&dev->dev,
"IBM FlashSystem PCI: recovering from slot reset.\n");
st = pci_enable_device(dev);
if (st)
goto failed_hw_setup;
pci_set_master(dev);
st = rsxx_eeh_fifo_flush_poll(card);
if (st)
goto failed_hw_setup;
rsxx_dma_queue_reset(card);
for (i = 0; i < card->n_targets; i++) {
st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
if (st)
goto failed_hw_buffers_init;
}
if (card->config_valid)
rsxx_dma_configure(card);
/* Clears the ISR register from spurious interrupts */
st = ioread32(card->regmap + ISR);
card->eeh_state = 0;
st = rsxx_eeh_remap_dmas(card);
if (st)
goto failed_remap_dmas;
spin_lock_irqsave(&card->irq_lock, flags);
if (card->n_targets & RSXX_MAX_TARGETS)
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
else
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
spin_unlock_irqrestore(&card->irq_lock, flags);
rsxx_kick_creg_queue(card);
for (i = 0; i < card->n_targets; i++) {
spin_lock(&card->ctrl[i].queue_lock);
if (list_empty(&card->ctrl[i].queue)) {
spin_unlock(&card->ctrl[i].queue_lock);
continue;
}
spin_unlock(&card->ctrl[i].queue_lock);
queue_work(card->ctrl[i].issue_wq,
&card->ctrl[i].issue_dma_work);
}
dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
return PCI_ERS_RESULT_RECOVERED;
failed_hw_buffers_init:
failed_remap_dmas:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev,
STATUS_BUFFER_SIZE8,
card->ctrl[i].status.buf,
card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
pci_free_consistent(card->dev,
COMMAND_BUFFER_SIZE8,
card->ctrl[i].cmd.buf,
card->ctrl[i].cmd.dma_addr);
}
failed_hw_setup:
rsxx_eeh_failure(dev);
return PCI_ERS_RESULT_DISCONNECT;
}
/*----------------- Driver Initialization & Setup -------------------*/
/* Returns: 0 if the driver is compatible with the device
-1 if the driver is NOT compatible with the device */
@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
spin_lock_init(&card->irq_lock);
card->halt = 0;
card->eeh_state = 0;
spin_lock_irq(&card->irq_lock);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
spin_unlock_irqrestore(&card->irq_lock, flags);
/* Prevent work_structs from re-queuing themselves. */
card->halt = 1;
cancel_work_sync(&card->event_work);
rsxx_destroy_dev(card);
@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
spin_unlock_irqrestore(&card->irq_lock, flags);
/* Prevent work_structs from re-queuing themselves. */
card->halt = 1;
free_irq(dev->irq, card);
if (!force_legacy)
@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
card_shutdown(card);
}
static const struct pci_error_handlers rsxx_err_handler = {
.error_detected = rsxx_error_detected,
.slot_reset = rsxx_slot_reset,
};
static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
{0,},
};
@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
.remove = rsxx_pci_remove,
.suspend = rsxx_pci_suspend,
.shutdown = rsxx_pci_shutdown,
.err_handler = &rsxx_err_handler,
};
static int __init rsxx_core_init(void)

View File

@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
#error Unknown endianess!!! Aborting...
#endif
static void copy_to_creg_data(struct rsxx_cardinfo *card,
static int copy_to_creg_data(struct rsxx_cardinfo *card,
int cnt8,
void *buf,
unsigned int stream)
@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
int i = 0;
u32 *data = buf;
if (unlikely(card->eeh_state))
return -EIO;
for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
/*
* Firmware implementation makes it necessary to byte swap on
@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
else
iowrite32(data[i], card->regmap + CREG_DATA(i));
}
return 0;
}
static void copy_from_creg_data(struct rsxx_cardinfo *card,
static int copy_from_creg_data(struct rsxx_cardinfo *card,
int cnt8,
void *buf,
unsigned int stream)
@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
int i = 0;
u32 *data = buf;
if (unlikely(card->eeh_state))
return -EIO;
for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
/*
* Firmware implementation makes it necessary to byte swap on
@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
else
data[i] = ioread32(card->regmap + CREG_DATA(i));
}
}
static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
{
struct creg_cmd *cmd;
/*
* Spin lock is needed because this can be called in atomic/interrupt
* context.
*/
spin_lock_bh(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
spin_unlock_bh(&card->creg_ctrl.lock);
return cmd;
return 0;
}
static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
{
int st;
if (unlikely(card->eeh_state))
return;
iowrite32(cmd->addr, card->regmap + CREG_ADD);
iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
if (cmd->op == CREG_OP_WRITE) {
if (cmd->buf)
copy_to_creg_data(card, cmd->cnt8,
cmd->buf, cmd->stream);
if (cmd->buf) {
st = copy_to_creg_data(card, cmd->cnt8,
cmd->buf, cmd->stream);
if (st)
return;
}
}
/*
* Data copy must complete before initiating the command. This is
* needed for weakly ordered processors (i.e. PowerPC), so that all
* neccessary registers are written before we kick the hardware.
*/
wmb();
if (unlikely(card->eeh_state))
return;
/* Setting the valid bit will kick off the command. */
iowrite32(cmd->op, card->regmap + CREG_CMD);
@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
cmd->cb_private = cb_private;
cmd->status = 0;
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
list_add_tail(&cmd->list, &card->creg_ctrl.queue);
card->creg_ctrl.q_depth++;
creg_kick_queue(card);
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
return 0;
}
@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
struct creg_cmd *cmd;
cmd = pop_active_cmd(card);
spin_lock(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
spin_unlock(&card->creg_ctrl.lock);
if (cmd == NULL) {
card->creg_ctrl.creg_stats.creg_timeout++;
dev_warn(CARD_TO_DEV(card),
@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
card->creg_ctrl.creg_stats.failed_cancel_timer++;
cmd = pop_active_cmd(card);
spin_lock_bh(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
spin_unlock_bh(&card->creg_ctrl.lock);
if (cmd == NULL) {
dev_err(CARD_TO_DEV(card),
"Spurious creg interrupt!\n");
@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
goto creg_done;
}
copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
}
creg_done:
@ -296,10 +302,10 @@ creg_done:
kmem_cache_free(creg_cmd_pool, cmd);
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
card->creg_ctrl.active = 0;
creg_kick_queue(card);
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
}
static void creg_reset(struct rsxx_cardinfo *card)
@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
"Resetting creg interface for recovery\n");
/* Cancel outstanding commands */
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
list_del(&cmd->list);
card->creg_ctrl.q_depth--;
@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
card->creg_ctrl.active = 0;
}
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
card->creg_ctrl.reset = 0;
spin_lock_irqsave(&card->irq_lock, flags);
@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
return st;
/*
* This timeout is neccessary for unresponsive hardware. The additional
* This timeout is necessary for unresponsive hardware. The additional
* 20 seconds to used to guarantee that each cregs requests has time to
* complete.
*/
timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
card->creg_ctrl.q_depth) + 20000);
timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
card->creg_ctrl.q_depth + 20000);
/*
* The creg interface is guaranteed to complete. It has a timeout
@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
return 0;
}
void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
{
struct creg_cmd *cmd = NULL;
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
if (cmd) {
del_timer_sync(&card->creg_ctrl.cmd_timer);
spin_lock_bh(&card->creg_ctrl.lock);
list_add(&cmd->list, &card->creg_ctrl.queue);
card->creg_ctrl.q_depth++;
card->creg_ctrl.active = 0;
spin_unlock_bh(&card->creg_ctrl.lock);
}
}
void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
{
spin_lock_bh(&card->creg_ctrl.lock);
if (!list_empty(&card->creg_ctrl.queue))
creg_kick_queue(card);
spin_unlock_bh(&card->creg_ctrl.lock);
}
/*------------ Initialization & Setup --------------*/
int rsxx_creg_setup(struct rsxx_cardinfo *card)
{
@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
int cnt = 0;
/* Cancel outstanding commands */
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
list_del(&cmd->list);
if (cmd->cb)
@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
"Canceled active creg command\n");
kmem_cache_free(creg_cmd_pool, cmd);
}
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
cancel_work_sync(&card->creg_ctrl.done_work);
}

View File

@ -28,7 +28,7 @@
struct rsxx_dma {
struct list_head list;
u8 cmd;
unsigned int laddr; /* Logical address on the ramsan */
unsigned int laddr; /* Logical address */
struct {
u32 off;
u32 cnt;
@ -81,9 +81,6 @@ enum rsxx_hw_status {
HW_STATUS_FAULT = 0x08,
};
#define STATUS_BUFFER_SIZE8 4096
#define COMMAND_BUFFER_SIZE8 4096
static struct kmem_cache *rsxx_dma_pool;
struct dma_tracker {
@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
return tgt;
}
static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
{
/* Reset all DMA Command/Status Queues */
iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
u32 q_depth = 0;
u32 intr_coal;
if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
unlikely(card->eeh_state))
return;
for (i = 0; i < card->n_targets; i++)
@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
}
/*----------------- RSXX DMA Handling -------------------*/
static void rsxx_complete_dma(struct rsxx_cardinfo *card,
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
struct rsxx_dma *dma,
unsigned int status)
{
if (status & DMA_SW_ERR)
printk_ratelimited(KERN_ERR
"SW Error in DMA(cmd x%02x, laddr x%08x)\n",
dma->cmd, dma->laddr);
ctrl->stats.dma_sw_err++;
if (status & DMA_HW_FAULT)
printk_ratelimited(KERN_ERR
"HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
dma->cmd, dma->laddr);
ctrl->stats.dma_hw_fault++;
if (status & DMA_CANCELLED)
printk_ratelimited(KERN_ERR
"DMA Cancelled(cmd x%02x, laddr x%08x)\n",
dma->cmd, dma->laddr);
ctrl->stats.dma_cancelled++;
if (dma->dma_addr)
pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
pci_unmap_page(ctrl->card->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
if (dma->cb)
dma->cb(card, dma->cb_data, status ? 1 : 0);
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
kmem_cache_free(rsxx_dma_pool, dma);
}
@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
if (requeue_cmd)
rsxx_requeue_dma(ctrl, dma);
else
rsxx_complete_dma(ctrl->card, dma, status);
rsxx_complete_dma(ctrl, dma, status);
}
static void dma_engine_stalled(unsigned long data)
{
struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
unlikely(ctrl->card->eeh_state))
return;
if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
hw_cmd_buf = ctrl->cmd.buf;
if (unlikely(ctrl->card->halt))
if (unlikely(ctrl->card->halt) ||
unlikely(ctrl->card->eeh_state))
return;
while (1) {
@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
*/
if (unlikely(ctrl->card->dma_fault)) {
push_tracker(ctrl->trackers, tag);
rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
continue;
}
@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
/* Let HW know we've queued commands. */
if (cmds_pending) {
/*
* We must guarantee that the CPU writes to 'ctrl->cmd.buf'
* (which is in PCI-consistent system-memory) from the loop
* above make it into the coherency domain before the
* following PIO "trigger" updating the cmd.idx. A WMB is
* sufficient. We need not explicitly CPU cache-flush since
* the memory is a PCI-consistent (ie; coherent) mapping.
*/
wmb();
atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
mod_timer(&ctrl->activity_timer,
jiffies + DMA_ACTIVITY_TIMEOUT);
if (unlikely(ctrl->card->eeh_state)) {
del_timer_sync(&ctrl->activity_timer);
return;
}
iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
}
}
@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
hw_st_buf = ctrl->status.buf;
if (unlikely(ctrl->card->halt) ||
unlikely(ctrl->card->dma_fault))
unlikely(ctrl->card->dma_fault) ||
unlikely(ctrl->card->eeh_state))
return;
count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
if (status)
rsxx_handle_dma_error(ctrl, dma, status);
else
rsxx_complete_dma(ctrl->card, dma, 0);
rsxx_complete_dma(ctrl, dma, 0);
push_tracker(ctrl->trackers, tag);
@ -727,20 +719,54 @@ bvec_err:
/*----------------- DMA Engine Initialization & Setup -------------------*/
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
{
ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
&ctrl->status.dma_addr);
ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
&ctrl->cmd.dma_addr);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_LO);
iowrite32(upper_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_HI);
memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
return 0;
}
static int rsxx_dma_ctrl_init(struct pci_dev *dev,
struct rsxx_dma_ctrl *ctrl)
{
int i;
int st;
memset(&ctrl->stats, 0, sizeof(ctrl->stats));
ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
&ctrl->status.dma_addr);
ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
&ctrl->cmd.dma_addr);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
if (!ctrl->trackers)
return -ENOMEM;
@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_LO);
iowrite32(upper_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_HI);
memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
wmb();
st = rsxx_hw_buffers_init(dev, ctrl);
if (st)
return st;
return 0;
}
@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
return 0;
}
static int rsxx_dma_configure(struct rsxx_cardinfo *card)
int rsxx_dma_configure(struct rsxx_cardinfo *card)
{
u32 intr_coal;
@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
}
}
int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
{
int i;
int j;
int cnt;
struct rsxx_dma *dma;
struct list_head *issued_dmas;
issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
GFP_KERNEL);
if (!issued_dmas)
return -ENOMEM;
for (i = 0; i < card->n_targets; i++) {
INIT_LIST_HEAD(&issued_dmas[i]);
cnt = 0;
for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
dma = get_tracker_dma(card->ctrl[i].trackers, j);
if (dma == NULL)
continue;
if (dma->cmd == HW_CMD_BLK_WRITE)
card->ctrl[i].stats.writes_issued--;
else if (dma->cmd == HW_CMD_BLK_DISCARD)
card->ctrl[i].stats.discards_issued--;
else
card->ctrl[i].stats.reads_issued--;
list_add_tail(&dma->list, &issued_dmas[i]);
push_tracker(card->ctrl[i].trackers, j);
cnt++;
}
spin_lock(&card->ctrl[i].queue_lock);
list_splice(&issued_dmas[i], &card->ctrl[i].queue);
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
card->ctrl[i].stats.sw_q_depth += cnt;
card->ctrl[i].e_cnt = 0;
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
if (dma->dma_addr)
pci_unmap_page(card->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
}
spin_unlock(&card->ctrl[i].queue_lock);
}
kfree(issued_dmas);
return 0;
}
void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
{
struct rsxx_dma *dma;
struct rsxx_dma *tmp;
int i;
for (i = 0; i < card->n_targets; i++) {
spin_lock(&card->ctrl[i].queue_lock);
list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
list_del(&dma->list);
rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
}
spin_unlock(&card->ctrl[i].queue_lock);
}
}
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
{
struct rsxx_dma *dma;
int i;
for (i = 0; i < card->n_targets; i++) {
spin_lock(&card->ctrl[i].queue_lock);
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
dma->dma_addr = pci_map_page(card->dev, dma->page,
dma->pg_off, get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
if (!dma->dma_addr) {
spin_unlock(&card->ctrl[i].queue_lock);
kmem_cache_free(rsxx_dma_pool, dma);
return -ENOMEM;
}
}
spin_unlock(&card->ctrl[i].queue_lock);
}
return 0;
}
int rsxx_dma_init(void)
{

View File

@ -27,15 +27,17 @@
/*----------------- IOCTL Definitions -------------------*/
#define RSXX_MAX_DATA 8
struct rsxx_reg_access {
__u32 addr;
__u32 cnt;
__u32 stat;
__u32 stream;
__u32 data[8];
__u32 data[RSXX_MAX_DATA];
};
#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32)))
#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
#define RSXX_IOC_MAGIC 'r'

View File

@ -58,7 +58,7 @@ struct rsxx_card_cfg {
};
/* Vendor ID Values */
#define RSXX_VENDOR_ID_TMS_IBM 0
#define RSXX_VENDOR_ID_IBM 0
#define RSXX_VENDOR_ID_DSI 1
#define RSXX_VENDOR_COUNT 2

View File

@ -45,16 +45,13 @@
struct proc_cmd;
#define PCI_VENDOR_ID_TMS_IBM 0x15B6
#define PCI_DEVICE_ID_RS70_FLASH 0x0019
#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
#define PCI_DEVICE_ID_RS80_FLASH 0x001C
#define PCI_DEVICE_ID_RS81_FLASH 0x001E
#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
#define RS70_PCI_REV_SUPPORTED 4
#define DRIVER_NAME "rsxx"
#define DRIVER_VERSION "3.7"
#define DRIVER_VERSION "4.0"
/* Block size is 4096 */
#define RSXX_HW_BLK_SHIFT 12
@ -67,6 +64,9 @@ struct proc_cmd;
#define RSXX_MAX_OUTSTANDING_CMDS 255
#define RSXX_CS_IDX_MASK 0xff
#define STATUS_BUFFER_SIZE8 4096
#define COMMAND_BUFFER_SIZE8 4096
#define RSXX_MAX_TARGETS 8
struct dma_tracker_list;
@ -91,6 +91,9 @@ struct rsxx_dma_stats {
u32 discards_failed;
u32 done_rescheduled;
u32 issue_rescheduled;
u32 dma_sw_err;
u32 dma_hw_fault;
u32 dma_cancelled;
u32 sw_q_depth; /* Number of DMAs on the SW queue. */
atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
};
@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
struct rsxx_cardinfo {
struct pci_dev *dev;
unsigned int halt;
unsigned int eeh_state;
void __iomem *regmap;
spinlock_t irq_lock;
@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
PERF_RD512_HI = 0xac,
PERF_WR512_LO = 0xb0,
PERF_WR512_HI = 0xb4,
PCI_RECONFIG = 0xb8,
};
enum rsxx_intr {
@ -237,6 +242,8 @@ enum rsxx_intr {
CR_INTR_DMA5 = 0x00000080,
CR_INTR_DMA6 = 0x00000100,
CR_INTR_DMA7 = 0x00000200,
CR_INTR_ALL_C = 0x0000003f,
CR_INTR_ALL_G = 0x000003ff,
CR_INTR_DMA_ALL = 0x000003f5,
CR_INTR_ALL = 0xffffffff,
};
@ -253,8 +260,14 @@ enum rsxx_pci_reset {
DMA_QUEUE_RESET = 0x00000001,
};
enum rsxx_hw_fifo_flush {
RSXX_FLUSH_BUSY = 0x00000002,
RSXX_FLUSH_TIMEOUT = 0x00000004,
};
enum rsxx_pci_revision {
RSXX_DISCARD_SUPPORT = 2,
RSXX_EEH_SUPPORT = 3,
};
enum rsxx_creg_cmd {
@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
int rsxx_dma_init(void);
void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
int rsxx_dma_configure(struct rsxx_cardinfo *card);
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio,
atomic_t *n_dmas,
rsxx_dma_cb cb,
void *cb_data);
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
/***** cregs.c *****/
int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
void rsxx_creg_destroy(struct rsxx_cardinfo *card);
int rsxx_creg_init(void);
void rsxx_creg_cleanup(void);
int rsxx_reg_access(struct rsxx_cardinfo *card,
struct rsxx_reg_access __user *ucmd,
int read);
void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);

View File

@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
#define foreach_grant_safe(pos, n, rbtree, node) \
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
(n) = rb_next(&(pos)->node); \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
&(pos)->node != NULL; \
(pos) = container_of(n, typeof(*(pos)), node), \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct xen_blkif *blkif)
{
pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d"
" | ds %4d\n",
pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
" | ds %4llu\n",
current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req,
blkif->st_f_req, blkif->st_ds_req);
@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg)
}
struct seg_buf {
unsigned long buf;
unsigned int offset;
unsigned int nsec;
};
/*
@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req,
* If this is a new persistent grant
* save the handler
*/
persistent_gnts[i]->handle = map[j].handle;
persistent_gnts[i]->dev_bus_addr =
map[j++].dev_bus_addr;
persistent_gnts[i]->handle = map[j++].handle;
}
pending_handle(pending_req, i) =
persistent_gnts[i]->handle;
if (ret)
continue;
seg[i].buf = persistent_gnts[i]->dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
} else {
pending_handle(pending_req, i) = map[j].handle;
pending_handle(pending_req, i) = map[j++].handle;
bitmap_set(pending_req->unmap_seg, i, 1);
if (ret) {
j++;
if (ret)
continue;
}
seg[i].buf = map[j++].dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
}
seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
}
return ret;
}
@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
return err;
}
static int dispatch_other_io(struct xen_blkif *blkif,
struct blkif_request *req,
struct pending_req *pending_req)
{
free_req(pending_req);
make_response(blkif, req->u.other.id, req->operation,
BLKIF_RSP_EOPNOTSUPP);
return -EIO;
}
static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif)
/* Apply all sanity checks to /private copy/ of request. */
barrier();
if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
switch (req.operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
free_req(pending_req);
if (dispatch_discard_io(blkif, &req))
break;
} else if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done;
break;
default:
if (dispatch_other_io(blkif, &req, pending_req))
goto done;
break;
}
/* Yield point for this unbounded loop. */
cond_resched();
}
done:
return more_to_do;
}
@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects, preq.dev);
preq.sector_number + preq.nr_sects,
blkif->vbd.pdevice);
goto fail_response;
}
@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
(bio_add_page(bio,
pages[i],
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
seg[i].offset) == 0)) {
bio = bio_alloc(GFP_KERNEL, nseg-i);
if (unlikely(bio == NULL))
@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
bio->bi_end_io = end_block_io_op;
}
/*
* We set it one so that the last submit_bio does not have to call
* atomic_inc.
*/
atomic_set(&pending_req->pendcnt, nbio);
/* Get a reference count for the disk queue and start sending I/O */
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
fail_put_bio:
for (i = 0; i < nbio; i++)
bio_put(biolist[i]);
atomic_set(&pending_req->pendcnt, 1);
__end_block_io_op(pending_req, -EINVAL);
msleep(1); /* back off a bit */
return -EIO;

View File

@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
uint64_t nr_sectors;
} __attribute__((__packed__));
struct blkif_x86_32_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2;
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard;
struct blkif_x86_32_request_other other;
} u;
} __attribute__((__packed__));
@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
uint64_t nr_sectors;
} __attribute__((__packed__));
struct blkif_x86_64_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2;
uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard;
struct blkif_x86_64_request_other other;
} u;
} __attribute__((__packed__));
@ -172,7 +187,6 @@ struct persistent_gnt {
struct page *page;
grant_ref_t gnt;
grant_handle_t handle;
uint64_t dev_bus_addr;
struct rb_node node;
};
@ -208,13 +222,13 @@ struct xen_blkif {
/* statistics */
unsigned long st_print;
int st_rd_req;
int st_wr_req;
int st_oo_req;
int st_f_req;
int st_ds_req;
int st_rd_sect;
int st_wr_sect;
unsigned long long st_rd_req;
unsigned long long st_wr_req;
unsigned long long st_oo_req;
unsigned long long st_f_req;
unsigned long long st_ds_req;
unsigned long long st_rd_sect;
unsigned long long st_wr_sect;
wait_queue_head_t waiting_to_free;
};
@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}

View File

@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void)
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req);
VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req);
VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req);
VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req);
VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
static struct attribute *xen_vbdstat_attrs[] = {
&dev_attr_oo_req.attr,

View File

@ -44,7 +44,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/llist.h>
#include <linux/list.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@ -68,13 +68,12 @@ enum blkif_state {
struct grant {
grant_ref_t gref;
unsigned long pfn;
struct llist_node node;
struct list_head node;
};
struct blk_shadow {
struct blkif_request req;
struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
@ -105,7 +104,7 @@ struct blkfront_info
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
struct llist_head persistent_gnts;
struct list_head persistent_gnts;
unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info,
return 0;
}
static int fill_grant_buffer(struct blkfront_info *info, int num)
{
struct page *granted_page;
struct grant *gnt_list_entry, *n;
int i = 0;
while(i < num) {
gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
if (!gnt_list_entry)
goto out_of_memory;
granted_page = alloc_page(GFP_NOIO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
}
gnt_list_entry->pfn = page_to_pfn(granted_page);
gnt_list_entry->gref = GRANT_INVALID_REF;
list_add(&gnt_list_entry->node, &info->persistent_gnts);
i++;
}
return 0;
out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
&info->persistent_gnts, node) {
list_del(&gnt_list_entry->node);
__free_page(pfn_to_page(gnt_list_entry->pfn));
kfree(gnt_list_entry);
i--;
}
BUG_ON(i != 0);
return -ENOMEM;
}
static struct grant *get_grant(grant_ref_t *gref_head,
struct blkfront_info *info)
{
struct grant *gnt_list_entry;
unsigned long buffer_mfn;
BUG_ON(list_empty(&info->persistent_gnts));
gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
node);
list_del(&gnt_list_entry->node);
if (gnt_list_entry->gref != GRANT_INVALID_REF) {
info->persistent_gnts_c--;
return gnt_list_entry;
}
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id,
buffer_mfn, 0);
return gnt_list_entry;
}
static const char *op_name(int op)
{
static const char *const names[] = {
@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
static int blkif_queue_request(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
unsigned long id;
unsigned int fsect, lsect;
@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req)
*/
bool new_persistent_gnts;
grant_ref_t gref_head;
struct page *granted_page;
struct grant *gnt_list_entry = NULL;
struct scatterlist *sg;
@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req)
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
if (info->persistent_gnts_c) {
BUG_ON(llist_empty(&info->persistent_gnts));
gnt_list_entry = llist_entry(
llist_del_first(&info->persistent_gnts),
struct grant, node);
ref = gnt_list_entry->gref;
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
info->persistent_gnts_c--;
} else {
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
gnt_list_entry =
kmalloc(sizeof(struct grant),
GFP_ATOMIC);
if (!gnt_list_entry)
return -ENOMEM;
granted_page = alloc_page(GFP_ATOMIC);
if (!granted_page) {
kfree(gnt_list_entry);
return -ENOMEM;
}
gnt_list_entry->pfn =
page_to_pfn(granted_page);
gnt_list_entry->gref = ref;
buffer_mfn = pfn_to_mfn(page_to_pfn(
granted_page));
gnttab_grant_foreign_access_ref(ref,
info->xbdev->otherend_id,
buffer_mfn, 0);
}
gnt_list_entry = get_grant(&gref_head, info);
ref = gnt_list_entry->gref;
info->shadow[id].grants_used[i] = gnt_list_entry;
@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req)
kunmap_atomic(shared_data);
}
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
ring_req->u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
struct llist_node *all_gnts;
struct grant *persistent_gnt, *tmp;
struct llist_node *n;
struct grant *persistent_gnt;
struct grant *n;
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq);
/* Remove all persistent grants */
if (info->persistent_gnts_c) {
all_gnts = llist_del_all(&info->persistent_gnts);
persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
while (persistent_gnt) {
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
if (!list_empty(&info->persistent_gnts)) {
list_for_each_entry_safe(persistent_gnt, n,
&info->persistent_gnts, node) {
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
0, 0UL);
info->persistent_gnts_c--;
}
__free_page(pfn_to_page(persistent_gnt->pfn));
tmp = persistent_gnt;
n = persistent_gnt->node.next;
if (n)
persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
else
persistent_gnt = NULL;
kfree(tmp);
kfree(persistent_gnt);
}
info->persistent_gnts_c = 0;
}
BUG_ON(info->persistent_gnts_c != 0);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < s->req.u.rw.nr_segments; i++) {
llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
list_add(&s->grants_used[i]->node, &info->persistent_gnts);
info->persistent_gnts_c++;
}
}
@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev,
sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
/* Allocate memory for grants */
err = fill_grant_buffer(info, BLK_RING_SIZE *
BLKIF_MAX_SEGMENTS_PER_REQUEST);
if (err)
goto fail;
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) {
free_page((unsigned long)sring);
@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
init_llist_head(&info->persistent_gnts);
INIT_LIST_HEAD(&info->persistent_gnts);
info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue);
@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info)
int j;
/* Stage 1: Make a safe copy of the shadow state. */
copy = kmalloc(sizeof(info->shadow),
copy = kmemdup(info->shadow, sizeof(info->shadow),
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
if (!copy)
return -ENOMEM;
memcpy(copy, info->shadow, sizeof(info->shadow));
/* Stage 2: Set up free list. */
memset(&info->shadow, 0, sizeof(info->shadow));
@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info)
gnttab_grant_foreign_access_ref(
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
pfn_to_mfn(copy[i].grants_used[j]->pfn),
0);
}
info->shadow[req->u.rw.id].req = *req;

View File

@ -73,9 +73,11 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x03F0, 0x311D) },
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x0036) },
{ USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x3008) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x0CF3, 0x817a) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@ -107,9 +109,11 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
static struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },

View File

@ -131,9 +131,11 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
/* Atheros 3012 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },

View File

@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
cpumask_copy(policy->cpus, perf->shared_cpu_map);
}
cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
}

View File

@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!cpufreq_frequency_get_table(cpu))
if (!policy)
return;
if (policy && !policy_is_shared(policy)) {
if (!cpufreq_frequency_get_table(cpu))
goto put_ref;
if (!policy_is_shared(policy)) {
pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
}
if (policy)
cpufreq_cpu_put(policy);
put_ref:
cpufreq_cpu_put(policy);
}
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,

View File

@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void)
static int intel_pstate_min_pstate(void)
{
u64 value;
rdmsrl(0xCE, value);
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
static int intel_pstate_max_pstate(void)
{
u64 value;
rdmsrl(0xCE, value);
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void)
{
u64 value;
int nont, ret;
rdmsrl(0x1AD, value);
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
nont = intel_pstate_max_pstate();
ret = ((value) & 255);
if (ret <= nont)
@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
sample->idletime_us * 100,
sample->duration_us);
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
100);
@ -752,6 +752,29 @@ static struct cpufreq_driver intel_pstate_driver = {
static int __initdata no_load;
static int intel_pstate_msrs_not_valid(void)
{
/* Check that all the msr's we are using are valid. */
u64 aperf, mperf, tmp;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
if (!intel_pstate_min_pstate() ||
!intel_pstate_max_pstate() ||
!intel_pstate_turbo_pstate())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
if (!(tmp - aperf))
return -ENODEV;
rdmsrl(MSR_IA32_MPERF, tmp);
if (!(tmp - mperf))
return -ENODEV;
return 0;
}
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@ -764,6 +787,9 @@ static int __init intel_pstate_init(void)
if (!id)
return -ENODEV;
if (intel_pstate_msrs_not_valid())
return -ENODEV;
pr_info("Intel P-state driver initializing.\n");
all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());

View File

@ -1650,11 +1650,7 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
/*
* single-pass ipsec_esp descriptor
* authencesn(*,*) is also registered, although not present
* explicitly here.
*/
/* single-pass ipsec_esp descriptor */
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
bool done = false;
authencesn:
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@ -2233,25 +2227,8 @@ authencesn:
dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else {
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
!memcmp(driver_algs[i].name, "authenc", 7) &&
!done) {
char *name;
name = driver_algs[i].name;
memmove(name + 10, name + 7, strlen(name) - 7);
memcpy(name + 7, "esn", 3);
name = driver_algs[i].driver_name;
memmove(name + 10, name + 7, strlen(name) - 7);
memcpy(name + 7, "esn", 3);
done = true;
goto authencesn;
}
}
}
if (!list_empty(&priv->alg_list))
dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",

View File

@ -23,7 +23,6 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
#include <linux/string.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>

View File

@ -38,7 +38,6 @@
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
@ -1974,11 +1973,7 @@ struct talitos_alg_template {
};
static struct talitos_alg_template driver_algs[] = {
/*
* AEAD algorithms. These use a single-pass ipsec_esp descriptor.
* authencesn(*,*) is also registered, although not present
* explicitly here.
*/
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
char *name = NULL;
bool authenc = false;
authencesn:
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@ -2837,8 +2830,6 @@ authencesn:
err = crypto_register_alg(
&t_alg->algt.alg.crypto);
name = t_alg->algt.alg.crypto.cra_driver_name;
authenc = authenc ? !authenc :
!(bool)memcmp(name, "authenc", 7);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = crypto_register_ahash(
@ -2851,25 +2842,8 @@ authencesn:
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
} else {
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
if (authenc) {
struct crypto_alg *alg =
&driver_algs[i].alg.crypto;
name = alg->cra_name;
memmove(name + 10, name + 7,
strlen(name) - 7);
memcpy(name + 7, "esn", 3);
name = alg->cra_driver_name;
memmove(name + 10, name + 7,
strlen(name) - 7);
memcpy(name + 7, "esn", 3);
goto authencesn;
}
}
}
}
if (!list_empty(&priv->alg_list))

View File

@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst)
*maxburst = 0;
}
static inline void convert_slave_id(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
dwc->dma_sconfig.slave_id -= dw->request_line_base;
}
static int
set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
convert_slave_id(dwc);
return 0;
}
@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 3)
return NULL;
fargs.req = be32_to_cpup(dma_spec->args+0);
fargs.src = be32_to_cpup(dma_spec->args+1);
fargs.dst = be32_to_cpup(dma_spec->args+2);
fargs.req = dma_spec->args[0];
fargs.src = dma_spec->args[1];
fargs.dst = dma_spec->args[2];
if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
fargs.src >= dw->nr_masters ||
@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
static int dw_probe(struct platform_device *pdev)
{
const struct platform_device_id *match;
struct dw_dma_platform_data *pdata;
struct resource *io;
struct dw_dma *dw;
@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev)
memcpy(dw->data_width, pdata->data_width, 4);
}
/* Get the base request line if set */
match = platform_get_device_id(pdev);
if (match)
dw->request_line_base = (unsigned int)match->driver_data;
/* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << nr_channels) - 1;
@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
static const struct platform_device_id dw_dma_ids[] = {
{ "INTL9C60", 0 },
/* Name, Request Line Base */
{ "INTL9C60", (kernel_ulong_t)16 },
{ }
};

View File

@ -247,6 +247,7 @@ struct dw_dma {
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
unsigned int request_line_base;
struct dw_dma_chan chan[0];
};

View File

@ -53,6 +53,24 @@ config EFI_VARS
Subsequent efibootmgr releases may be found at:
<http://linux.dell.com/efibootmgr>
config EFI_VARS_PSTORE
bool "Register efivars backend for pstore"
depends on EFI_VARS && PSTORE
default y
help
Say Y here to enable use efivars as a backend to pstore. This
will allow writing console messages, crash dumps, or anything
else supported by pstore to EFI variables.
config EFI_VARS_PSTORE_DEFAULT_DISABLE
bool "Disable using efivars as a pstore backend by default"
depends on EFI_VARS_PSTORE
default n
help
Saying Y here will disable the use of efivars as a storage
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
config EFI_PCDP
bool "Console device selection via EFI PCDP or HCDP table"
depends on ACPI && EFI && IA64

View File

@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
*/
#define GUID_LEN 36
static bool efivars_pstore_disable =
IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
static void efivar_update_sysfs_entries(struct work_struct *);
static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
static bool efivar_wq_enabled = true;
/* Return the number of unicode characters in data */
static unsigned long
@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
.create = efivarfs_create,
};
static struct pstore_info efi_pstore_info;
#ifdef CONFIG_PSTORE
#ifdef CONFIG_EFI_VARS_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
{
@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
spin_unlock_irqrestore(&efivars->lock, flags);
if (reason == KMSG_DUMP_OOPS)
if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
schedule_work(&efivar_work);
*id = part;
@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
return 0;
}
#else
static int efi_pstore_open(struct pstore_info *psi)
{
return 0;
}
static int efi_pstore_close(struct pstore_info *psi)
{
return 0;
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
return -1;
}
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, int count, size_t size,
struct pstore_info *psi)
{
return 0;
}
static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
struct timespec time, struct pstore_info *psi)
{
return 0;
}
#endif
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
static void efivar_pstore_register(struct efivars *efivars)
{
efivars->efi_pstore_info = efi_pstore_info;
efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (efivars->efi_pstore_info.buf) {
efivars->efi_pstore_info.bufsize = 1024;
efivars->efi_pstore_info.data = efivars;
spin_lock_init(&efivars->efi_pstore_info.buf_lock);
pstore_register(&efivars->efi_pstore_info);
}
}
#else
static void efivar_pstore_register(struct efivars *efivars)
{
return;
}
#endif
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
return found;
}
/*
* Returns the size of variable_name, in bytes, including the
* terminating NULL character, or variable_name_size if no NULL
* character is found among the first variable_name_size bytes.
*/
static unsigned long var_name_strnsize(efi_char16_t *variable_name,
unsigned long variable_name_size)
{
unsigned long len;
efi_char16_t c;
/*
* The variable name is, by definition, a NULL-terminated
* string, so make absolutely sure that variable_name_size is
* the value we expect it to be. If not, return the real size.
*/
for (len = 2; len <= variable_name_size; len += sizeof(c)) {
c = variable_name[(len / sizeof(c)) - 1];
if (!c)
break;
}
return min(len, variable_name_size);
}
static void efivar_update_sysfs_entries(struct work_struct *work)
{
struct efivars *efivars = &__efivars;
@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
if (!found) {
kfree(variable_name);
break;
} else
} else {
variable_name_size = var_name_strnsize(variable_name,
variable_name_size);
efivar_create_sysfs_entry(efivars,
variable_name_size,
variable_name, &vendor);
}
}
}
@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
}
EXPORT_SYMBOL_GPL(unregister_efivars);
/*
* Print a warning when duplicate EFI variables are encountered and
* disable the sysfs workqueue since the firmware is buggy.
*/
static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
unsigned long len16)
{
size_t i, len8 = len16 / sizeof(efi_char16_t);
char *s8;
/*
* Disable the workqueue since the algorithm it uses for
* detecting new variables won't work with this buggy
* implementation of GetNextVariableName().
*/
efivar_wq_enabled = false;
s8 = kzalloc(len8, GFP_KERNEL);
if (!s8)
return;
for (i = 0; i < len8; i++)
s8[i] = s16[i];
printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
s8, vendor_guid);
kfree(s8);
}
int register_efivars(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *parent_kobj)
@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
&vendor_guid);
switch (status) {
case EFI_SUCCESS:
variable_name_size = var_name_strnsize(variable_name,
variable_name_size);
/*
* Some firmware implementations return the
* same variable name on multiple calls to
* get_next_variable(). Terminate the loop
* immediately as there is no guarantee that
* we'll ever see a different variable name,
* and may end up looping here forever.
*/
if (variable_is_present(variable_name, &vendor_guid)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
status = EFI_NOT_FOUND;
break;
}
efivar_create_sysfs_entry(efivars,
variable_name_size,
variable_name,
@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
if (error)
unregister_efivars(efivars);
efivars->efi_pstore_info = efi_pstore_info;
efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (efivars->efi_pstore_info.buf) {
efivars->efi_pstore_info.bufsize = 1024;
efivars->efi_pstore_info.data = efivars;
spin_lock_init(&efivars->efi_pstore_info.buf_lock);
pstore_register(&efivars->efi_pstore_info);
}
if (!efivars_pstore_disable)
efivar_pstore_register(efivars);
register_filesystem(&efivarfs_type);

View File

@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!np)
return;
do {
for (;; index++) {
ret = of_parse_phandle_with_args(np, "gpio-ranges",
"#gpio-range-cells", index, &pinspec);
if (ret)
@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (ret)
break;
} while (index++);
}
}
#else

View File

@ -38,11 +38,12 @@
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
/* size control register for hardware window 0. */
#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08)
/* alpha control register for hardware window 1 ~ 4. */
#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16)
/* size control register for hardware window 1 ~ 4. */
/*
* size control register for hardware windows 0 and alpha control register
* for hardware windows 1 ~ 4
*/
#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
/* size control register for hardware windows 1 ~ 2. */
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
@ -50,9 +51,9 @@
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
/* color key control register for hardware window 1 ~ 4. */
#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8))
#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
/* color key value register for hardware window 1 ~ 4. */
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8))
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
@ -109,9 +110,9 @@ struct fimd_context {
#ifdef CONFIG_OF
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,exynos4-fimd",
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
{ .compatible = "samsung,exynos5-fimd",
{ .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
{},
};
@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
offset = VIDOSD_C_SIZE_W0;
offset = VIDOSD_C(win);
val = win_data->ovl_width * win_data->ovl_height;
writel(val, ctx->regs + offset);

View File

@ -48,8 +48,14 @@
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
#define G2D_DST_PLANE2_BASE_ADDR 0x0418
#define G2D_PAT_BASE_ADDR 0x0500
#define G2D_MSK_BASE_ADDR 0x0520
@ -82,7 +88,7 @@
#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
/* G2D_DMA_HOLD_CMD */
#define G2D_USET_HOLD (1 << 2)
#define G2D_USER_HOLD (1 << 2)
#define G2D_LIST_HOLD (1 << 1)
#define G2D_BITBLT_HOLD (1 << 0)
@ -91,13 +97,27 @@
#define G2D_START_NHOLT (1 << 1)
#define G2D_START_BITBLT (1 << 0)
/* buffer color format */
#define G2D_FMT_XRGB8888 0
#define G2D_FMT_ARGB8888 1
#define G2D_FMT_RGB565 2
#define G2D_FMT_XRGB1555 3
#define G2D_FMT_ARGB1555 4
#define G2D_FMT_XRGB4444 5
#define G2D_FMT_ARGB4444 6
#define G2D_FMT_PACKED_RGB888 7
#define G2D_FMT_A8 11
#define G2D_FMT_L8 12
/* buffer valid length */
#define G2D_LEN_MIN 1
#define G2D_LEN_MAX 8000
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
#define G2D_CMDLIST_NUM 64
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
#define MAX_BUF_ADDR_NR 6
/* maximum buffer pool size of userptr is 64MB as default */
#define MAX_POOL (64 * 1024 * 1024)
@ -106,6 +126,17 @@ enum {
BUF_TYPE_USERPTR,
};
enum g2d_reg_type {
REG_TYPE_NONE = -1,
REG_TYPE_SRC,
REG_TYPE_SRC_PLANE2,
REG_TYPE_DST,
REG_TYPE_DST_PLANE2,
REG_TYPE_PAT,
REG_TYPE_MSK,
MAX_REG_TYPE_NR
};
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
@ -113,6 +144,42 @@ struct g2d_cmdlist {
u32 last; /* last data offset */
};
/*
* A structure of buffer description
*
* @format: color format
* @left_x: the x coordinates of left top corner
* @top_y: the y coordinates of left top corner
* @right_x: the x coordinates of right bottom corner
* @bottom_y: the y coordinates of right bottom corner
*
*/
struct g2d_buf_desc {
unsigned int format;
unsigned int left_x;
unsigned int top_y;
unsigned int right_x;
unsigned int bottom_y;
};
/*
* A structure of buffer information
*
* @map_nr: manages the number of mapped buffers
* @reg_types: stores regitster type in the order of requested command
* @handles: stores buffer handle in its reg_type position
* @types: stores buffer type in its reg_type position
* @descs: stores buffer description in its reg_type position
*
*/
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
unsigned long handles[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
struct drm_exynos_pending_g2d_event {
struct drm_pending_event base;
struct drm_exynos_g2d_event event;
@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr {
bool in_pool;
bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
unsigned int map_nr;
unsigned long handles[MAX_BUF_ADDR_NR];
unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct g2d_buf_info buf_info;
struct drm_exynos_pending_g2d_event *event;
};
@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
struct g2d_buf_info *buf_info;
init_dma_attrs(&g2d->cmdlist_dma_attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
}
for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
unsigned int i;
node[nr].cmdlist =
g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
node[nr].dma_addr =
g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
buf_info = &node[nr].buf_info;
for (i = 0; i < MAX_REG_TYPE_NR; i++)
buf_info->reg_types[i] = REG_TYPE_NONE;
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
}
@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
DMA_BIDIRECTIONAL);
if (ret < 0) {
DRM_ERROR("failed to map sgt with dma region.\n");
goto err_free_sgt;
goto err_sg_free_table;
}
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return &g2d_userptr->dma_addr;
err_free_sgt:
err_sg_free_table:
sg_free_table(sgt);
err_free_sgt:
kfree(sgt);
sgt = NULL;
@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev,
g2d->current_pool = 0;
}
static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
{
enum g2d_reg_type reg_type;
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
reg_type = REG_TYPE_SRC;
break;
case G2D_SRC_PLANE2_BASE_ADDR:
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
reg_type = REG_TYPE_DST;
break;
case G2D_DST_PLANE2_BASE_ADDR:
reg_type = REG_TYPE_DST_PLANE2;
break;
case G2D_PAT_BASE_ADDR:
reg_type = REG_TYPE_PAT;
break;
case G2D_MSK_BASE_ADDR:
reg_type = REG_TYPE_MSK;
break;
default:
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
};
return reg_type;
}
static unsigned long g2d_get_buf_bpp(unsigned int format)
{
unsigned long bpp;
switch (format) {
case G2D_FMT_XRGB8888:
case G2D_FMT_ARGB8888:
bpp = 4;
break;
case G2D_FMT_RGB565:
case G2D_FMT_XRGB1555:
case G2D_FMT_ARGB1555:
case G2D_FMT_XRGB4444:
case G2D_FMT_ARGB4444:
bpp = 2;
break;
case G2D_FMT_PACKED_RGB888:
bpp = 3;
break;
default:
bpp = 1;
break;
}
return bpp;
}
static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
enum g2d_reg_type reg_type,
unsigned long size)
{
unsigned int width, height;
unsigned long area;
/*
* check source and destination buffers only.
* so the others are always valid.
*/
if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
return true;
width = buf_desc->right_x - buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
DRM_ERROR("width[%u] is out of range!\n", width);
return false;
}
height = buf_desc->bottom_y - buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
DRM_ERROR("height[%u] is out of range!\n", height);
return false;
}
area = (unsigned long)width * (unsigned long)height *
g2d_get_buf_bpp(buf_desc->format);
if (area > size) {
DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
return false;
}
return true;
}
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_device *drm_dev,
struct drm_file *file)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
struct g2d_buf_info *buf_info = &node->buf_info;
int offset;
int ret;
int i;
for (i = 0; i < node->map_nr; i++) {
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
int reg_pos;
unsigned long handle;
dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
handle = cmdlist->data[offset];
reg_pos = cmdlist->last - 2 * (i + 1);
offset = cmdlist->data[reg_pos];
handle = cmdlist->data[reg_pos + 1];
reg_type = g2d_get_reg_type(offset);
if (reg_type == REG_TYPE_NONE) {
ret = -EFAULT;
goto err;
}
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
unsigned long size;
size = exynos_drm_gem_get_size(drm_dev, handle, file);
if (!size) {
ret = -EFAULT;
goto err;
}
if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
size)) {
ret = -EFAULT;
goto err;
}
if (node->obj_type[i] == BUF_TYPE_GEM) {
addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
file);
if (IS_ERR(addr)) {
node->map_nr = i;
return -EFAULT;
ret = -EFAULT;
goto err;
}
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
if (copy_from_user(&g2d_userptr, (void __user *)handle,
sizeof(struct drm_exynos_g2d_userptr))) {
node->map_nr = i;
return -EFAULT;
ret = -EFAULT;
goto err;
}
if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
g2d_userptr.size)) {
ret = -EFAULT;
goto err;
}
addr = g2d_userptr_get_dma_addr(drm_dev,
@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
file,
&handle);
if (IS_ERR(addr)) {
node->map_nr = i;
return -EFAULT;
ret = -EFAULT;
goto err;
}
}
cmdlist->data[offset] = *addr;
node->handles[i] = handle;
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
buf_info->handles[reg_type] = handle;
}
return 0;
err:
buf_info->map_nr = i;
return ret;
}
static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct drm_file *filp)
{
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < node->map_nr; i++) {
unsigned long handle = node->handles[i];
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
unsigned long handle;
if (node->obj_type[i] == BUF_TYPE_GEM)
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
handle = buf_info->handles[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
filp);
else
g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
false);
node->handles[i] = 0;
buf_info->reg_types[i] = REG_TYPE_NONE;
buf_info->handles[reg_type] = 0;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
node->map_nr = 0;
buf_info->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d,
pm_runtime_get_sync(g2d->dev);
clk_enable(g2d->gate_clk);
/* interrupt enable */
writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
g2d->regs + G2D_INTEN);
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
}
@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
struct g2d_data *g2d = container_of(work, struct g2d_data,
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
clk_disable(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev,
int i;
for (i = 0; i < nr; i++) {
struct g2d_buf_info *buf_info = &node->buf_info;
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
unsigned long value;
index = cmdlist->last - 2 * (i + 1);
if (for_addr) {
/* check userptr buffer type. */
reg_offset = (cmdlist->data[index] &
~0x7fffffff) >> 31;
if (reg_offset) {
node->obj_type[i] = BUF_TYPE_USERPTR;
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
}
}
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
goto err;
if (reg_offset % 4)
@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev,
if (!for_addr)
goto err;
if (node->obj_type[i] != BUF_TYPE_USERPTR)
node->obj_type[i] = BUF_TYPE_GEM;
reg_type = g2d_get_reg_type(reg_offset);
if (reg_type == REG_TYPE_NONE)
goto err;
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
buf_info->types[reg_type] = BUF_TYPE_USERPTR;
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
case G2D_SRC_COLOR_MODE:
case G2D_DST_COLOR_MODE:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(reg_offset);
if (reg_type == REG_TYPE_NONE)
goto err;
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->format = value & 0xf;
break;
case G2D_SRC_LEFT_TOP:
case G2D_DST_LEFT_TOP:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(reg_offset);
if (reg_type == REG_TYPE_NONE)
goto err;
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->left_x = value & 0x1fff;
buf_desc->top_y = (value & 0x1fff0000) >> 16;
break;
case G2D_SRC_RIGHT_BOTTOM:
case G2D_DST_RIGHT_BOTTOM:
if (for_addr)
goto err;
reg_type = g2d_get_reg_type(reg_offset);
if (reg_type == REG_TYPE_NONE)
goto err;
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
buf_desc->right_x = value & 0x1fff;
buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
break;
default:
if (for_addr)
@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
cmdlist->data[cmdlist->last++] = 0;
/*
* 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
* and GCF bit should be set to INTEN register if user wants
* G2D interrupt event once current command list execution is
* finished.
* Otherwise only ACF bit should be set to INTEN register so
* that one interrupt is occured after all command lists
* have been completed.
*/
if (node->event) {
cmdlist->data[cmdlist->last++] = G2D_INTEN;
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
} else {
cmdlist->data[cmdlist->last++] = G2D_INTEN;
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
if (ret < 0)
goto err_free_event;
node->map_nr = req->cmd_buf_nr;
node->buf_info.map_nr = req->cmd_buf_nr;
if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_buf;

View File

@ -164,6 +164,27 @@ out:
exynos_gem_obj = NULL;
}
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return 0;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
drm_gem_object_unreference_unlocked(obj);
return exynos_gem_obj->buffer->size;
}
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{

View File

@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer size to gem handle. */
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);

View File

@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev,
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kzalloc(edid_len, GFP_KERNEL);
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEBUG_KMS("failed to allocate edid\n");
return ERR_PTR(-ENOMEM);
}
memcpy(edid, ctx->raw_edid, edid_len);
return edid;
}
@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
return -ENOMEM;
}
memcpy(ctx->raw_edid, raw_edid, edid_len);
} else {
/*
* with connection = 0, free raw_edid

View File

@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win)
mixer_ctx->win_data[win].enabled = false;
}
int mixer_check_timing(void *ctx, struct fb_videomode *timing)
static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
{
struct mixer_context *mixer_ctx = ctx;
u32 w, h;

View File

@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support,
"Enable Haswell and ValleyView Support. "
"(default: false)");
int i915_disable_power_well __read_mostly = 0;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: false)");
static struct drm_driver driver;
extern int intel_agp_enabled;

View File

@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);

View File

@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
if (is_cpu_edp)
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
else
intel_crtc->cpu_transcoder = pipe;
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int ret;
if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
else
intel_crtc->cpu_transcoder = pipe;
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,

View File

@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
}
set_level:
/* Check the current backlight level and try to set again if it's zero.
* On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
* when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
* BLC_PWM_CPU_CTL may be cleared to zero automatically when these
* registers are set.
*/
if (!intel_panel_get_backlight(dev))
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)

View File

@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
if (!IS_HASWELL(dev))
return;
if (!i915_disable_power_well && !enable)
return;
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;

View File

@ -590,6 +590,9 @@
#define USB_VENDOR_ID_MONTEREY 0x0566
#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
#define USB_VENDOR_ID_MSI 0x1770
#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
#define USB_DEVICE_ID_N_S_HARMONY 0xc359
@ -684,6 +687,9 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c

View File

@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
{
struct mt_device *td = hid_get_drvdata(hid);
__s32 quirks = td->mtclass.quirks;
struct input_dev *input = field->hidinput->input;
if (hid->claimed & HID_CLAIMED_INPUT) {
switch (usage->hid) {
@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
break;
default:
if (usage->type)
input_event(input, usage->type, usage->code,
value);
return;
}
if (usage->usage_index + 1 == field->report_count) {
/* we only take into account the last report. */
if (usage->hid == td->last_slot_field)
mt_complete_slot(td, field->hidinput->input);
mt_complete_slot(td, input);
if (field->index == td->last_field_index
&& td->num_received >= td->num_expected)

View File

@ -73,6 +73,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
@ -80,6 +81,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },

View File

@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, &(wq->rq.dma_addr),
GFP_KERNEL);
if (!wq->rq.queue)
if (!wq->rq.queue) {
ret = -ENOMEM;
goto free_sq;
}
PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
__func__, wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue),

View File

@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
goto bail;
}
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
dev->opstats[opcode].n_bytes += tlen;
dev->opstats[opcode].n_packets++;

View File

@ -1,7 +1,7 @@
config INFINIBAND_QIB
tristate "QLogic PCIe HCA support"
tristate "Intel PCIe HCA support"
depends on 64BIT
---help---
This is a low-level driver for QLogic PCIe QLE InfiniBand host
channel adapters. This driver does not support the QLogic
This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel
HyperTransport card (model QHT7140).

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
"Attempt pre-IBTA 1.2 DDR speed negotiation");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("QLogic <support@qlogic.com>");
MODULE_DESCRIPTION("QLogic IB driver");
MODULE_AUTHOR("Intel <ibsupport@intel.com>");
MODULE_DESCRIPTION("Intel IB driver");
MODULE_VERSION(QIB_DRIVER_VERSION);
/*

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
/*
* This file contains all the chip-specific register information and
* access functions for the QLogic QLogic_IB PCI-Express chip.
* access functions for the Intel Intel_IB PCI-Express chip.
*
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
static void qib_remove_one(struct pci_dev *);
static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dd = qib_init_iba6120_funcs(pdev, ent);
#else
qib_early_err(&pdev->dev,
"QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
"Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
ent->device);
dd = ERR_PTR(-ENODEV);
#endif
@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
qib_early_err(&pdev->dev,
"Failing on unknown QLogic deviceid 0x%x\n",
"Failing on unknown Intel deviceid 0x%x\n",
ent->device);
ret = -ENODEV;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@ -44,7 +44,7 @@
#include "qib.h"
#include "qib_7220.h"
#define SD7220_FW_NAME "qlogic/sd7220.fw"
#define SD7220_FW_NAME "intel/sd7220.fw"
MODULE_FIRMWARE(SD7220_FW_NAME);
/*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
ibdev->dma_ops = &qib_dma_mapping_ops;
snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
"QLogic Infiniband HCA %s", init_utsname()->nodename);
"Intel Infiniband HCA %s", init_utsname()->nodename);
ret = ib_register_device(ibdev, qib_create_port_files);
if (ret)

View File

@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
rc = ib_req_notify_cq(priv->send_cq,
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
if (rc < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
else if (rc)
ipoib_send_comp_handler(priv->send_cq, dev);
}
}
}

View File

@ -130,7 +130,7 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
depends on ARCH_OMAP
depends on ARCH_OMAP2PLUS
select IOMMU_API
config OMAP_IOVMM

View File

@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb,
/* allocate a protection domain if a device is added */
dma_domain = find_protection_domain(devid);
if (dma_domain)
goto out;
dma_domain = dma_ops_domain_alloc();
if (!dma_domain)
goto out;
dma_domain->target_dev = devid;
if (!dma_domain) {
dma_domain = dma_ops_domain_alloc();
if (!dma_domain)
goto out;
dma_domain->target_dev = devid;
spin_lock_irqsave(&iommu_pd_list_lock, flags);
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
dev_data = get_dev_data(dev);
spin_lock_irqsave(&iommu_pd_list_lock, flags);
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
}
dev->archdata.dma_ops = &amd_iommu_dma_ops;

View File

@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
* BIOS should disable L2B micellaneous clock gating by setting
* L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
*/
static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
{
u32 value;

View File

@ -2,7 +2,6 @@
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/msi.h>
#include <linux/irq.h>

View File

@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
else if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
else
ret = -EINVAL;

View File

@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] =
vdelay start of active video in 2 * field lines relative to
trailing edge of /VRESET pulse (VDELAY register).
sheight height of active video in 2 * field lines.
extraheight Added to sheight for cropcap.bounds.height only
videostart0 ITU-R frame line number of the line corresponding
to vdelay in the first field. */
#define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \
vdelay, sheight, videostart0) \
vdelay, sheight, extraheight, videostart0) \
.cropcap.bounds.left = minhdelayx1, \
/* * 2 because vertically we count field lines times two, */ \
/* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \
.cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \
/* 4 is a safety margin at the end of the line. */ \
.cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \
.cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \
.cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \
MIN_VDELAY, \
.cropcap.defrect.left = hdelayx1, \
.cropcap.defrect.top = (videostart0) * 2, \
.cropcap.defrect.width = swidth, \
@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* totalwidth */ 1135,
/* sqwidth */ 944,
/* vdelay */ 0x20,
/* bt878 (and bt848?) can capture another
line below active video. */
/* sheight */ (576 + 2) + 0x20 - 2,
/* sheight */ 576,
/* bt878 (and bt848?) can capture another
line below active video. */
/* extraheight */ 2,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x1a,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_SECAM,
@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 944,
/* vdelay */ 0x20,
/* sheight */ 576,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_PAL_Nc,
@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x1a,
/* sheight */ 576,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_PAL_M,
@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x1a,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_PAL_N,
@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 944,
/* vdelay */ 0x20,
/* sheight */ 576,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M_JP,
@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x16,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
/* that one hopefully works with the strange timing
@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 944,
/* vdelay */ 0x1a,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
}
};

View File

@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
static int gsc_m2m_resume(struct gsc_dev *gsc)
{
struct gsc_ctx *ctx;
unsigned long flags;
spin_lock_irqsave(&gsc->slock, flags);
/* Clear for full H/W setup in first run after resume */
ctx = gsc->m2m.ctx;
gsc->m2m.ctx = NULL;
spin_unlock_irqrestore(&gsc->slock, flags);
if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
gsc_m2m_job_finish(gsc->m2m.ctx,
VB2_BUF_STATE_ERROR);
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
return 0;
}
@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev)
/* Do not resume if the device was idle before system suspend */
spin_lock_irqsave(&gsc->slock, flags);
if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
!gsc_m2m_active(gsc)) {
!gsc_m2m_opened(gsc)) {
spin_unlock_irqrestore(&gsc->slock, flags);
return 0;
}

View File

@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
static int fimc_m2m_resume(struct fimc_dev *fimc)
{
struct fimc_ctx *ctx;
unsigned long flags;
spin_lock_irqsave(&fimc->slock, flags);
/* Clear for full H/W setup in first run after resume */
ctx = fimc->m2m.ctx;
fimc->m2m.ctx = NULL;
spin_unlock_irqrestore(&fimc->slock, flags);
if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
fimc_m2m_job_finish(fimc->m2m.ctx,
VB2_BUF_STATE_ERROR);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
return 0;
}

View File

@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = {
void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
{
enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
unsigned int i = ARRAY_SIZE(src_pixfmt_map);
int i = ARRAY_SIZE(src_pixfmt_map);
u32 cfg;
while (i-- >= 0) {
while (--i >= 0) {
if (src_pixfmt_map[i][0] == pixelcode)
break;
}
@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
{ V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
};
u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
unsigned int i = ARRAY_SIZE(pixcode);
int i = ARRAY_SIZE(pixcode);
while (i-- >= 0)
while (--i >= 0)
if (pixcode[i][0] == dev->fmt->mbus_code)
break;
cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;

View File

@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = {
.id = V4L2_CTRL_CLASS_USER | 0x1001,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Test Pattern 640x480",
.step = 1,
};
static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)

View File

@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source,
struct fimc_pipeline *pipeline;
struct v4l2_subdev *sd;
struct mutex *lock;
int ret = 0;
int i, ret = 0;
int ref_count;
if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source,
return 0;
}
mutex_lock(lock);
ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
int i;
mutex_lock(lock);
ret = __fimc_pipeline_close(pipeline);
if (ref_count > 0) {
ret = __fimc_pipeline_close(pipeline);
if (!ret && fimc)
fimc_ctrls_delete(fimc->vid_cap.ctx);
}
for (i = 0; i < IDX_MAX; i++)
pipeline->subdevs[i] = NULL;
if (fimc)
fimc_ctrls_delete(fimc->vid_cap.ctx);
mutex_unlock(lock);
return ret;
} else if (ref_count > 0) {
/*
* Link activation. Enable power of pipeline elements only if
* the pipeline is already in use, i.e. its video node is open.
* Recreate the controls destroyed during the link deactivation.
*/
ret = __fimc_pipeline_open(pipeline,
source->entity, true);
if (!ret && fimc)
ret = fimc_capture_ctrls_create(fimc);
}
/*
* Link activation. Enable power of pipeline elements only if the
* pipeline is already in use, i.e. its video node is opened.
* Recreate the controls destroyed during the link deactivation.
*/
mutex_lock(lock);
ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
if (ref_count > 0)
ret = __fimc_pipeline_open(pipeline, source->entity, true);
if (!ret && fimc)
ret = fimc_capture_ctrls_create(fimc);
mutex_unlock(lock);
return ret ? -EPIPE : ret;

View File

@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
unsigned int frame_type;
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
/* If frame is same as previous then skip and do not dequeue */
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {

View File

@ -232,6 +232,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = 1,
.default_value = 0,
.step = 1,
.menu_skip_mask = 0,
},
{

View File

@ -291,7 +291,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.

Some files were not shown because too many files have changed in this diff Show More