forked from Minki/linux
Merge remote-tracking branch 'origin/master' into next
Merge upstream to get the audit fixes
This commit is contained in:
commit
234d15def9
@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
is selected automatically. Check
|
||||
Documentation/kdump/kdump.txt for further details.
|
||||
|
||||
crashkernel_low=size[KMG]
|
||||
[KNL, x86] parts under 4G.
|
||||
|
||||
crashkernel=range1:size1[,range2:size2,...][@offset]
|
||||
[KNL] Same as above, but depends on the memory
|
||||
in the running system. The syntax of range is
|
||||
@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
a memory unit (amount[KMG]). See also
|
||||
Documentation/kdump/kdump.txt for an example.
|
||||
|
||||
crashkernel=size[KMG],high
|
||||
[KNL, x86_64] range could be above 4G. Allow kernel
|
||||
to allocate physical memory region from top, so could
|
||||
be above 4G if system have more than 4G ram installed.
|
||||
Otherwise memory region will be allocated below 4G, if
|
||||
available.
|
||||
It will be ignored if crashkernel=X is specified.
|
||||
crashkernel=size[KMG],low
|
||||
[KNL, x86_64] range under 4G. When crashkernel=X,high
|
||||
is passed, kernel could allocate physical memory region
|
||||
above 4G, that cause second kernel crash on system
|
||||
that require some amount of low memory, e.g. swiotlb
|
||||
requires at least 64M+32K low memory. Kernel would
|
||||
try to allocate 72M below 4G automatically.
|
||||
This one let user to specify own low range under 4G
|
||||
for second kernel instead.
|
||||
0: to disable low allocation.
|
||||
It will be ignored when crashkernel=X,high is not used
|
||||
or memory reserved is below 4G.
|
||||
|
||||
cs89x0_dma= [HW,NET]
|
||||
Format: <dma>
|
||||
|
||||
@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
edd= [EDD]
|
||||
Format: {"off" | "on" | "skip[mbr]"}
|
||||
|
||||
efi_no_storage_paranoia [EFI; X86]
|
||||
Using this parameter you can use more than 50% of
|
||||
your efi variable storage. Use this parameter only if
|
||||
you are really sure that your UEFI does sane gc and
|
||||
fulfills the spec otherwise your board may brick.
|
||||
|
||||
eisa_irq_edge= [PARISC,HW]
|
||||
See header of drivers/parisc/eisa.c.
|
||||
|
||||
|
5
Makefile
5
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
|
||||
# Carefully list dependencies so we do not try to build scripts twice
|
||||
# in parallel
|
||||
PHONY += scripts
|
||||
scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
|
||||
scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
|
||||
asm-generic
|
||||
$(Q)$(MAKE) $(build)=$(@)
|
||||
|
||||
# Objects we will link into vmlinux / subdirs we need to visit
|
||||
|
@ -19,14 +19,6 @@
|
||||
#undef _CACHE
|
||||
#undef MULTI_CACHE
|
||||
|
||||
#if defined(CONFIG_CPU_CACHE_V3)
|
||||
# ifdef _CACHE
|
||||
# define MULTI_CACHE 1
|
||||
# else
|
||||
# define _CACHE v3
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_CACHE_V4)
|
||||
# ifdef _CACHE
|
||||
# define MULTI_CACHE 1
|
||||
|
@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
|
||||
* IOP3XX processor registers
|
||||
*/
|
||||
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
|
||||
#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000
|
||||
#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
|
||||
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
|
||||
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
|
||||
IOP3XX_PERIPHERAL_SIZE - 1)
|
||||
|
@ -111,7 +111,7 @@
|
||||
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
/*
|
||||
* Hyp-mode PL2 PTE definitions for LPAE.
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include <asm/glue.h>
|
||||
|
||||
#define TLB_V3_PAGE (1 << 0)
|
||||
#define TLB_V4_U_PAGE (1 << 1)
|
||||
#define TLB_V4_D_PAGE (1 << 2)
|
||||
#define TLB_V4_I_PAGE (1 << 3)
|
||||
@ -22,7 +21,6 @@
|
||||
#define TLB_V6_D_PAGE (1 << 5)
|
||||
#define TLB_V6_I_PAGE (1 << 6)
|
||||
|
||||
#define TLB_V3_FULL (1 << 8)
|
||||
#define TLB_V4_U_FULL (1 << 9)
|
||||
#define TLB_V4_D_FULL (1 << 10)
|
||||
#define TLB_V4_I_FULL (1 << 11)
|
||||
@ -52,7 +50,6 @@
|
||||
* =============
|
||||
*
|
||||
* We have the following to choose from:
|
||||
* v3 - ARMv3
|
||||
* v4 - ARMv4 without write buffer
|
||||
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
|
||||
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
|
||||
@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
|
||||
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
|
||||
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
|
||||
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
|
||||
@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
|
||||
if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
|
||||
if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
|
||||
tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
|
||||
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
|
||||
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
|
||||
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
|
||||
@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
|
||||
if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
|
||||
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
|
||||
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
|
||||
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
|
||||
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
|
||||
@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
|
||||
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
|
||||
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
|
||||
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
|
||||
|
@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
|
||||
static struct notifier_block dbg_cpu_pm_nb = {
|
||||
.notifier_call = dbg_cpu_pm_notify,
|
||||
};
|
||||
|
||||
|
@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||
|
||||
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
||||
return 1;
|
||||
|
||||
return armpmu->get_event_idx(hw_events, event) >= 0;
|
||||
|
@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
|
||||
|
||||
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
|
||||
|
||||
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
{
|
||||
return (cyc * mult) >> shift;
|
||||
}
|
||||
|
||||
static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
|
||||
static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
|
||||
{
|
||||
u64 epoch_ns;
|
||||
u32 epoch_cyc;
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <asm/virt.h>
|
||||
|
||||
#include "atags.h"
|
||||
#include "tcm.h"
|
||||
|
||||
|
||||
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
|
||||
@ -798,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
reserve_crashkernel();
|
||||
|
||||
tcm_init();
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
handle_arch_irq = mdesc->handle_irq;
|
||||
#endif
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/system_info.h>
|
||||
#include "tcm.h"
|
||||
|
||||
static struct gen_pool *tcm_pool;
|
||||
static bool dtcm_present;
|
||||
|
@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||
break;
|
||||
case KVM_CAP_ARM_SET_DEVICE_ADDR:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
r = num_online_cpus();
|
||||
break;
|
||||
|
@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||
u32 val;
|
||||
int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
if (!p->is_write)
|
||||
return read_from_write_only(vcpu, p);
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
cpumask_setall(&vcpu->arch.require_dcache_flush);
|
||||
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
|
||||
|
||||
|
@ -28,13 +28,11 @@ extern void secondary_startup(void);
|
||||
*/
|
||||
void __ref highbank_cpu_die(unsigned int cpu)
|
||||
{
|
||||
flush_cache_all();
|
||||
|
||||
highbank_set_cpu_jump(cpu, phys_to_virt(0));
|
||||
|
||||
flush_cache_louis();
|
||||
highbank_set_core_pwr();
|
||||
|
||||
cpu_do_idle();
|
||||
|
||||
/* We should never return from idle */
|
||||
panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
|
||||
while (1)
|
||||
cpu_do_idle();
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ config CPU_ARM740T
|
||||
depends on !MMU
|
||||
select CPU_32v4T
|
||||
select CPU_ABRT_LV4T
|
||||
select CPU_CACHE_V3 # although the core is v4t
|
||||
select CPU_CACHE_V4
|
||||
select CPU_CP15_MPU
|
||||
select CPU_PABRT_LEGACY
|
||||
help
|
||||
@ -469,9 +469,6 @@ config CPU_PABRT_V7
|
||||
bool
|
||||
|
||||
# The cache model
|
||||
config CPU_CACHE_V3
|
||||
bool
|
||||
|
||||
config CPU_CACHE_V4
|
||||
bool
|
||||
|
||||
|
@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
|
||||
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
|
||||
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
|
||||
|
||||
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
|
||||
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
|
||||
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
|
||||
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
|
||||
|
@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
|
||||
outer_cache.inv_range = feroceon_l2_inv_range;
|
||||
outer_cache.clean_range = feroceon_l2_clean_range;
|
||||
outer_cache.flush_range = feroceon_l2_flush_range;
|
||||
outer_cache.inv_all = l2_inv_all;
|
||||
|
||||
enable_l2();
|
||||
|
||||
|
@ -1,137 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/cache-v3.S
|
||||
*
|
||||
* Copyright (C) 1997-2002 Russell king
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/page.h>
|
||||
#include "proc-macros.S"
|
||||
|
||||
/*
|
||||
* flush_icache_all()
|
||||
*
|
||||
* Unconditionally clean and invalidate the entire icache.
|
||||
*/
|
||||
ENTRY(v3_flush_icache_all)
|
||||
mov pc, lr
|
||||
ENDPROC(v3_flush_icache_all)
|
||||
|
||||
/*
|
||||
* flush_user_cache_all()
|
||||
*
|
||||
* Invalidate all cache entries in a particular address
|
||||
* space.
|
||||
*
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
ENTRY(v3_flush_user_cache_all)
|
||||
/* FALLTHROUGH */
|
||||
/*
|
||||
* flush_kern_cache_all()
|
||||
*
|
||||
* Clean and invalidate the entire cache.
|
||||
*/
|
||||
ENTRY(v3_flush_kern_cache_all)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* flush_user_cache_range(start, end, flags)
|
||||
*
|
||||
* Invalidate a range of cache entries in the specified
|
||||
* address space.
|
||||
*
|
||||
* - start - start address (may not be aligned)
|
||||
* - end - end address (exclusive, may not be aligned)
|
||||
* - flags - vma_area_struct flags describing address space
|
||||
*/
|
||||
ENTRY(v3_flush_user_cache_range)
|
||||
mov ip, #0
|
||||
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* coherent_kern_range(start, end)
|
||||
*
|
||||
* Ensure coherency between the Icache and the Dcache in the
|
||||
* region described by start. If you have non-snooping
|
||||
* Harvard caches, you need to implement this function.
|
||||
*
|
||||
* - start - virtual start address
|
||||
* - end - virtual end address
|
||||
*/
|
||||
ENTRY(v3_coherent_kern_range)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* coherent_user_range(start, end)
|
||||
*
|
||||
* Ensure coherency between the Icache and the Dcache in the
|
||||
* region described by start. If you have non-snooping
|
||||
* Harvard caches, you need to implement this function.
|
||||
*
|
||||
* - start - virtual start address
|
||||
* - end - virtual end address
|
||||
*/
|
||||
ENTRY(v3_coherent_user_range)
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* flush_kern_dcache_area(void *page, size_t size)
|
||||
*
|
||||
* Ensure no D cache aliasing occurs, either with itself or
|
||||
* the I cache
|
||||
*
|
||||
* - addr - kernel address
|
||||
* - size - region size
|
||||
*/
|
||||
ENTRY(v3_flush_kern_dcache_area)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* dma_flush_range(start, end)
|
||||
*
|
||||
* Clean and invalidate the specified virtual address range.
|
||||
*
|
||||
* - start - virtual start address
|
||||
* - end - virtual end address
|
||||
*/
|
||||
ENTRY(v3_dma_flush_range)
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* dma_unmap_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
ENTRY(v3_dma_unmap_area)
|
||||
teq r2, #DMA_TO_DEVICE
|
||||
bne v3_dma_flush_range
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* dma_map_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
ENTRY(v3_dma_map_area)
|
||||
mov pc, lr
|
||||
ENDPROC(v3_dma_unmap_area)
|
||||
ENDPROC(v3_dma_map_area)
|
||||
|
||||
.globl v3_flush_kern_cache_louis
|
||||
.equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
|
||||
|
||||
__INITDATA
|
||||
|
||||
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
|
||||
define_cache_functions v3
|
@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
|
||||
ENTRY(v4_flush_user_cache_range)
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
mov ip, #0
|
||||
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
|
||||
mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
|
||||
mov pc, lr
|
||||
#else
|
||||
/* FALLTHROUGH */
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <asm/mach/pci.h>
|
||||
|
||||
#include "mm.h"
|
||||
#include "tcm.h"
|
||||
|
||||
/*
|
||||
* empty_zero_page is a special page that is used for
|
||||
@ -1277,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
|
||||
dma_contiguous_remap();
|
||||
devicemaps_init(mdesc);
|
||||
kmap_init();
|
||||
tcm_init();
|
||||
|
||||
top_pmd = pmd_off_k(0xffff0000);
|
||||
|
||||
|
@ -77,24 +77,27 @@ __arm740_setup:
|
||||
mcr p15, 0, r0, c6, c0 @ set area 0, default
|
||||
|
||||
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
|
||||
ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
|
||||
mov r2, #10 @ 11 is the minimum (4KB)
|
||||
1: add r2, r2, #1 @ area size *= 2
|
||||
mov r1, r1, lsr #1
|
||||
ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
|
||||
mov r4, #10 @ 11 is the minimum (4KB)
|
||||
1: add r4, r4, #1 @ area size *= 2
|
||||
movs r3, r3, lsr #1
|
||||
bne 1b @ count not zero r-shift
|
||||
orr r0, r0, r2, lsl #1 @ the area register value
|
||||
orr r0, r0, r4, lsl #1 @ the area register value
|
||||
orr r0, r0, #1 @ set enable bit
|
||||
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
|
||||
|
||||
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
|
||||
ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
|
||||
mov r2, #10 @ 11 is the minimum (4KB)
|
||||
1: add r2, r2, #1 @ area size *= 2
|
||||
mov r1, r1, lsr #1
|
||||
ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
|
||||
cmp r3, #0
|
||||
moveq r0, #0
|
||||
beq 2f
|
||||
mov r4, #10 @ 11 is the minimum (4KB)
|
||||
1: add r4, r4, #1 @ area size *= 2
|
||||
movs r3, r3, lsr #1
|
||||
bne 1b @ count not zero r-shift
|
||||
orr r0, r0, r2, lsl #1 @ the area register value
|
||||
orr r0, r0, r4, lsl #1 @ the area register value
|
||||
orr r0, r0, #1 @ set enable bit
|
||||
mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
|
||||
2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
|
||||
|
||||
mov r0, #0x06
|
||||
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
|
||||
@ -137,13 +140,14 @@ __arm740_proc_info:
|
||||
.long 0x41807400
|
||||
.long 0xfffffff0
|
||||
.long 0
|
||||
.long 0
|
||||
b __arm740_setup
|
||||
.long cpu_arch_name
|
||||
.long cpu_elf_name
|
||||
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
|
||||
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
|
||||
.long cpu_arm740_name
|
||||
.long arm740_processor_functions
|
||||
.long 0
|
||||
.long 0
|
||||
.long v3_cache_fns @ cache model
|
||||
.long v4_cache_fns @ cache model
|
||||
.size __arm740_proc_info, . - __arm740_proc_info
|
||||
|
@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
|
||||
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
||||
.globl cpu_arm920_suspend_size
|
||||
.equ cpu_arm920_suspend_size, 4 * 3
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_arm920_do_suspend)
|
||||
stmfd sp!, {r4 - r6, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ PID
|
||||
|
@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
|
||||
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
||||
.globl cpu_arm926_suspend_size
|
||||
.equ cpu_arm926_suspend_size, 4 * 3
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_arm926_do_suspend)
|
||||
stmfd sp!, {r4 - r6, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ PID
|
||||
|
@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
|
||||
|
||||
.globl cpu_mohawk_suspend_size
|
||||
.equ cpu_mohawk_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_mohawk_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
|
||||
|
@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
|
||||
|
||||
.globl cpu_sa1100_suspend_size
|
||||
.equ cpu_sa1100_suspend_size, 4 * 3
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_sa1100_do_suspend)
|
||||
stmfd sp!, {r4 - r6, lr}
|
||||
mrc p15, 0, r4, c3, c0, 0 @ domain ID
|
||||
|
@ -17,7 +17,9 @@
|
||||
|
||||
#ifndef MULTI_CPU
|
||||
EXPORT_SYMBOL(cpu_dcache_clean_area);
|
||||
#ifdef CONFIG_MMU
|
||||
EXPORT_SYMBOL(cpu_set_pte_ext);
|
||||
#endif
|
||||
#else
|
||||
EXPORT_SYMBOL(processor);
|
||||
#endif
|
||||
|
@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
|
||||
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
|
||||
.globl cpu_v6_suspend_size
|
||||
.equ cpu_v6_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_v6_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
|
||||
|
@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
|
||||
|
||||
.globl cpu_xsc3_suspend_size
|
||||
.equ cpu_xsc3_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_xsc3_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
|
||||
|
@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
|
||||
|
||||
.globl cpu_xscale_suspend_size
|
||||
.equ cpu_xscale_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_xscale_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
|
||||
|
@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
|
||||
#define readw_be __raw_readw
|
||||
#define readl_be __raw_readl
|
||||
|
||||
#define writeb_relaxed writeb
|
||||
#define writew_relaxed writew
|
||||
#define writel_relaxed writel
|
||||
|
||||
#define writeb_be __raw_writeb
|
||||
#define writew_be __raw_writew
|
||||
#define writel_be __raw_writel
|
||||
|
@ -304,7 +304,7 @@ syscall_exit_work:
|
||||
subi r12,r12,TI_FLAGS
|
||||
|
||||
4: /* Anything else left to do? */
|
||||
SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
|
||||
SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
|
||||
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
|
||||
beq .ret_from_except_lite
|
||||
|
||||
@ -657,7 +657,7 @@ resume_kernel:
|
||||
/* Clear _TIF_EMULATE_STACK_STORE flag */
|
||||
lis r11,_TIF_EMULATE_STACK_STORE@h
|
||||
addi r5,r9,TI_FLAGS
|
||||
ldarx r4,0,r5
|
||||
0: ldarx r4,0,r5
|
||||
andc r4,r4,r11
|
||||
stdcx. r4,0,r5
|
||||
bne- 0b
|
||||
|
@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
|
||||
new->thread.regs->msr |=
|
||||
(MSR_FP | new->thread.fpexc_mode);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
do_load_up_transact_altivec(&new->thread);
|
||||
new->thread.regs->msr |= MSR_VEC;
|
||||
}
|
||||
#endif
|
||||
/* We may as well turn on VSX too since all the state is restored now */
|
||||
if (msr & MSR_VSX)
|
||||
new->thread.regs->msr |= MSR_VSX;
|
||||
|
@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
||||
do_load_up_transact_fpu(¤t->thread);
|
||||
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
do_load_up_transact_altivec(¤t->thread);
|
||||
regs->msr |= MSR_VEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
|
||||
do_load_up_transact_fpu(¤t->thread);
|
||||
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
do_load_up_transact_altivec(¤t->thread);
|
||||
regs->msr |= MSR_VEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
|
||||
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
|
||||
mtmsr r5
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
|
||||
* and thread.vr[] respectively. The thread.transact_fpr[] version
|
||||
* is more modern, and will be loaded subsequently by any FPUnavailable
|
||||
@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
|
||||
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
|
||||
ld r5, THREAD_VRSAVE(r3)
|
||||
mtspr SPRN_VRSAVE, r5
|
||||
#endif
|
||||
|
||||
dont_restore_vec:
|
||||
andi. r0, r4, MSR_FP
|
||||
|
@ -26,17 +26,20 @@
|
||||
#define E500_PID_NUM 3
|
||||
#define E500_TLB_NUM 2
|
||||
|
||||
#define E500_TLB_VALID 1
|
||||
#define E500_TLB_BITMAP 2
|
||||
/* entry is mapped somewhere in host TLB */
|
||||
#define E500_TLB_VALID (1 << 0)
|
||||
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
|
||||
#define E500_TLB_BITMAP (1 << 1)
|
||||
/* TLB1 entry is mapped by host TLB0 */
|
||||
#define E500_TLB_TLB0 (1 << 2)
|
||||
|
||||
struct tlbe_ref {
|
||||
pfn_t pfn;
|
||||
unsigned int flags; /* E500_TLB_* */
|
||||
pfn_t pfn; /* valid only for TLB0, except briefly */
|
||||
unsigned int flags; /* E500_TLB_* */
|
||||
};
|
||||
|
||||
struct tlbe_priv {
|
||||
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
|
||||
struct tlbe_ref ref;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KVM_E500V2
|
||||
@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
|
||||
|
||||
unsigned int gtlb_nv[E500_TLB_NUM];
|
||||
|
||||
/*
|
||||
* information associated with each host TLB entry --
|
||||
* TLB1 only for now. If/when guest TLB1 entries can be
|
||||
* mapped with host TLB0, this will be used for that too.
|
||||
*
|
||||
* We don't want to use this for guest TLB0 because then we'd
|
||||
* have the overhead of doing the translation again even if
|
||||
* the entry is still in the guest TLB (e.g. we swapped out
|
||||
* and back, and our host TLB entries got evicted).
|
||||
*/
|
||||
struct tlbe_ref *tlb_refs[E500_TLB_NUM];
|
||||
unsigned int host_tlb1_nv;
|
||||
|
||||
u32 svr;
|
||||
|
@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
|
||||
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
|
||||
|
||||
/* Don't bother with unmapped entries */
|
||||
if (!(ref->flags & E500_TLB_VALID))
|
||||
return;
|
||||
if (!(ref->flags & E500_TLB_VALID)) {
|
||||
WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
|
||||
"%s: flags %x\n", __func__, ref->flags);
|
||||
WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
|
||||
}
|
||||
|
||||
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
|
||||
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
|
||||
@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
pfn_t pfn)
|
||||
{
|
||||
ref->pfn = pfn;
|
||||
ref->flags = E500_TLB_VALID;
|
||||
ref->flags |= E500_TLB_VALID;
|
||||
|
||||
if (tlbe_is_writable(gtlbe))
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
|
||||
{
|
||||
if (ref->flags & E500_TLB_VALID) {
|
||||
/* FIXME: don't log bogus pfn for TLB1 */
|
||||
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
|
||||
ref->flags = 0;
|
||||
}
|
||||
@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
|
||||
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
{
|
||||
int tlbsel = 0;
|
||||
int tlbsel;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
|
||||
struct tlbe_ref *ref =
|
||||
&vcpu_e500->gtlb_priv[tlbsel][i].ref;
|
||||
kvmppc_e500_ref_release(ref);
|
||||
for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
|
||||
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
|
||||
struct tlbe_ref *ref =
|
||||
&vcpu_e500->gtlb_priv[tlbsel][i].ref;
|
||||
kvmppc_e500_ref_release(ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
{
|
||||
int stlbsel = 1;
|
||||
int i;
|
||||
|
||||
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||
|
||||
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
|
||||
struct tlbe_ref *ref =
|
||||
&vcpu_e500->tlb_refs[stlbsel][i];
|
||||
kvmppc_e500_ref_release(ref);
|
||||
}
|
||||
|
||||
clear_tlb_privs(vcpu_e500);
|
||||
}
|
||||
|
||||
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
clear_tlb_refs(vcpu_e500);
|
||||
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||
clear_tlb_privs(vcpu_e500);
|
||||
clear_tlb1_bitmap(vcpu_e500);
|
||||
}
|
||||
|
||||
@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
||||
}
|
||||
|
||||
/* Drop old ref and setup new one. */
|
||||
kvmppc_e500_ref_release(ref);
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
|
||||
|
||||
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
||||
@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
|
||||
vcpu_e500->host_tlb1_nv = 0;
|
||||
|
||||
vcpu_e500->tlb_refs[1][sesel] = *ref;
|
||||
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
||||
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
||||
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
|
||||
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
|
||||
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
|
||||
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
|
||||
}
|
||||
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
|
||||
|
||||
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
||||
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
||||
vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
|
||||
WARN_ON(!(ref->flags & E500_TLB_VALID));
|
||||
|
||||
return sesel;
|
||||
}
|
||||
@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
||||
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
|
||||
{
|
||||
struct tlbe_ref ref;
|
||||
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
|
||||
int sesel;
|
||||
int r;
|
||||
|
||||
ref.flags = 0;
|
||||
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
|
||||
&ref);
|
||||
ref);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
}
|
||||
|
||||
/* Otherwise map into TLB1 */
|
||||
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
|
||||
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
|
||||
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
|
||||
|
||||
return 0;
|
||||
@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
||||
case 0:
|
||||
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
|
||||
|
||||
/* Triggers after clear_tlb_refs or on initial mapping */
|
||||
/* Triggers after clear_tlb_privs or on initial mapping */
|
||||
if (!(priv->ref.flags & E500_TLB_VALID)) {
|
||||
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
|
||||
} else {
|
||||
@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
host_tlb_params[0].entries / host_tlb_params[0].ways;
|
||||
host_tlb_params[1].sets = 1;
|
||||
|
||||
vcpu_e500->tlb_refs[0] =
|
||||
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->tlb_refs[0])
|
||||
goto err;
|
||||
|
||||
vcpu_e500->tlb_refs[1] =
|
||||
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->tlb_refs[1])
|
||||
goto err;
|
||||
|
||||
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
|
||||
host_tlb_params[1].entries,
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->h2g_tlb1_rmap)
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(vcpu_e500->tlb_refs[0]);
|
||||
kfree(vcpu_e500->tlb_refs[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
{
|
||||
kfree(vcpu_e500->h2g_tlb1_rmap);
|
||||
kfree(vcpu_e500->tlb_refs[0]);
|
||||
kfree(vcpu_e500->tlb_refs[1]);
|
||||
}
|
||||
|
@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
|
||||
{
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
|
||||
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
|
||||
|
||||
if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
|
||||
if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
|
||||
__get_cpu_var(last_vcpu_on_cpu) != vcpu) {
|
||||
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||
__get_cpu_var(last_vcpu_on_cpu) = vcpu;
|
||||
}
|
||||
|
||||
kvmppc_load_guest_fp(vcpu);
|
||||
}
|
||||
|
@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
#define ioremap_nocache(addr, size) ioremap(addr, size)
|
||||
#define ioremap_wc ioremap_nocache
|
||||
|
||||
/* TODO: s390 cannot support io_remap_pfn_range... */
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return (void __iomem *) offset;
|
||||
|
@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
|
||||
(((unsigned long)(vaddr)) &zero_page_mask))))
|
||||
#define __HAVE_COLOR_ZERO_PAGE
|
||||
|
||||
/* TODO: s390 cannot support io_remap_pfn_range... */
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
@ -2,11 +2,16 @@
|
||||
|
||||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += local64.h
|
||||
generic-y += mutex.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += local.h
|
||||
generic-y += module.h
|
||||
generic-y += serial.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -1,6 +0,0 @@
|
||||
#ifndef __SPARC_CPUTIME_H
|
||||
#define __SPARC_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __SPARC_CPUTIME_H */
|
@ -1,6 +0,0 @@
|
||||
#ifndef _ASM_EMERGENCY_RESTART_H
|
||||
#define _ASM_EMERGENCY_RESTART_H
|
||||
|
||||
#include <asm-generic/emergency-restart.h>
|
||||
|
||||
#endif /* _ASM_EMERGENCY_RESTART_H */
|
@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
|
||||
}
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* We provide our own get_unmapped_area to cope with VA holes and
|
||||
|
@ -1,6 +0,0 @@
|
||||
#ifndef __SPARC_SERIAL_H
|
||||
#define __SPARC_SERIAL_H
|
||||
|
||||
#define BASE_BAUD ( 1843200 / 16 )
|
||||
|
||||
#endif /* __SPARC_SERIAL_H */
|
@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
|
||||
void cpu_panic(void);
|
||||
extern void smp4m_irq_rotate(int cpu);
|
||||
|
||||
/*
|
||||
* General functions that each host system must provide.
|
||||
@ -46,7 +45,6 @@ void sun4m_init_smp(void);
|
||||
void sun4d_init_smp(void);
|
||||
|
||||
void smp_callin(void);
|
||||
void smp_boot_cpus(void);
|
||||
void smp_store_cpu_info(int);
|
||||
|
||||
void smp_resched_interrupt(void);
|
||||
@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
|
||||
|
||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||
|
||||
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
|
||||
#define prof_counter(__cpu) cpu_data(__cpu).counter
|
||||
|
||||
void smp_setup_cpu_possible_map(void);
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
@ -18,8 +18,7 @@ do { \
|
||||
* and 2 stores in this critical code path. -DaveM
|
||||
*/
|
||||
#define switch_to(prev, next, last) \
|
||||
do { flush_tlb_pending(); \
|
||||
save_and_clear_fpu(); \
|
||||
do { save_and_clear_fpu(); \
|
||||
/* If you are tempted to conditionalize the following */ \
|
||||
/* so that ASI is only written if it changes, think again. */ \
|
||||
__asm__ __volatile__("wr %%g0, %0, %%asi" \
|
||||
|
@ -11,24 +11,40 @@
|
||||
struct tlb_batch {
|
||||
struct mm_struct *mm;
|
||||
unsigned long tlb_nr;
|
||||
unsigned long active;
|
||||
unsigned long vaddrs[TLB_BATCH_NR];
|
||||
};
|
||||
|
||||
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_tsb_user(struct tlb_batch *tb);
|
||||
extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
|
||||
/* TLB flush operations. */
|
||||
|
||||
extern void flush_tlb_pending(void);
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#define flush_tlb_range(vma,start,end) \
|
||||
do { (void)(start); flush_tlb_pending(); } while (0)
|
||||
#define flush_tlb_page(vma,addr) flush_tlb_pending()
|
||||
#define flush_tlb_mm(mm) flush_tlb_pending()
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
||||
|
||||
extern void flush_tlb_pending(void);
|
||||
extern void arch_enter_lazy_mmu_mode(void);
|
||||
extern void arch_leave_lazy_mmu_mode(void);
|
||||
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
||||
|
||||
/* Local cpu only. */
|
||||
extern void __flush_tlb_all(void);
|
||||
|
||||
extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
|
||||
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
|
||||
__flush_tlb_kernel_range(start,end); \
|
||||
} while (0)
|
||||
|
||||
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
|
||||
#define flush_tlb_kernel_range(start, end) \
|
||||
do { flush_tsb_kernel_range(start,end); \
|
||||
smp_flush_tlb_kernel_range(start, end); \
|
||||
} while (0)
|
||||
|
||||
#define global_flush_tlb_page(mm, vaddr) \
|
||||
smp_flush_tlb_page(mm, vaddr)
|
||||
|
||||
#endif /* ! CONFIG_SMP */
|
||||
|
||||
#endif /* _SPARC64_TLBFLUSH_H */
|
||||
|
@ -44,7 +44,6 @@ header-y += swab.h
|
||||
header-y += termbits.h
|
||||
header-y += termios.h
|
||||
header-y += traps.h
|
||||
header-y += types.h
|
||||
header-y += uctx.h
|
||||
header-y += unistd.h
|
||||
header-y += utrap.h
|
||||
|
@ -1,17 +0,0 @@
|
||||
#ifndef _SPARC_TYPES_H
|
||||
#define _SPARC_TYPES_H
|
||||
/*
|
||||
* This file is never included by application software unless
|
||||
* explicitly requested (e.g., via linux/types.h) in which case the
|
||||
* application is Linux specific so (user-) name space pollution is
|
||||
* not a major issue. However, for interoperability, libraries still
|
||||
* need to be careful to avoid a name clashes.
|
||||
*/
|
||||
|
||||
#if defined(__sparc__)
|
||||
|
||||
#include <asm-generic/int-ll64.h>
|
||||
|
||||
#endif /* defined(__sparc__) */
|
||||
|
||||
#endif /* defined(_SPARC_TYPES_H) */
|
@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
extern unsigned long xcall_flush_tlb_mm;
|
||||
extern unsigned long xcall_flush_tlb_pending;
|
||||
extern unsigned long xcall_flush_tlb_page;
|
||||
extern unsigned long xcall_flush_tlb_kernel_range;
|
||||
extern unsigned long xcall_fetch_glob_regs;
|
||||
extern unsigned long xcall_fetch_glob_pmu;
|
||||
@ -1074,19 +1074,52 @@ local_flush_and_out:
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
struct tlb_pending_info {
|
||||
unsigned long ctx;
|
||||
unsigned long nr;
|
||||
unsigned long *vaddrs;
|
||||
};
|
||||
|
||||
static void tlb_pending_func(void *info)
|
||||
{
|
||||
struct tlb_pending_info *t = info;
|
||||
|
||||
__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
|
||||
}
|
||||
|
||||
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
|
||||
{
|
||||
u32 ctx = CTX_HWBITS(mm->context);
|
||||
struct tlb_pending_info info;
|
||||
int cpu = get_cpu();
|
||||
|
||||
info.ctx = ctx;
|
||||
info.nr = nr;
|
||||
info.vaddrs = vaddrs;
|
||||
|
||||
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
|
||||
else
|
||||
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
|
||||
&info, 1);
|
||||
|
||||
__flush_tlb_pending(ctx, nr, vaddrs);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long context = CTX_HWBITS(mm->context);
|
||||
int cpu = get_cpu();
|
||||
|
||||
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
|
||||
else
|
||||
smp_cross_call_masked(&xcall_flush_tlb_pending,
|
||||
ctx, nr, (unsigned long) vaddrs,
|
||||
smp_cross_call_masked(&xcall_flush_tlb_page,
|
||||
context, vaddr, 0,
|
||||
mm_cpumask(mm));
|
||||
|
||||
__flush_tlb_pending(ctx, nr, vaddrs);
|
||||
__flush_tlb_page(context, vaddr);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
|
||||
|
||||
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
|
||||
{
|
||||
|
||||
if ((size & 07) != 0)
|
||||
BUG();
|
||||
memset(map, 0, size>>3);
|
||||
|
||||
bitmap_zero(map, size);
|
||||
memset(t, 0, sizeof *t);
|
||||
spin_lock_init(&t->lock);
|
||||
t->map = map;
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define IOMMU_RNGE IOMMU_RNGE_256MB
|
||||
#define IOMMU_START 0xF0000000
|
||||
#define IOMMU_WINSIZE (256*1024*1024U)
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
|
||||
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
|
||||
|
||||
/* srmmu.c */
|
||||
|
@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
|
||||
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
|
||||
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
|
||||
|
||||
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
|
||||
srmmu_nocache_bitmap =
|
||||
__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
|
||||
SMP_CACHE_BYTES, 0UL);
|
||||
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
|
||||
|
||||
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
|
||||
|
@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
|
||||
void flush_tlb_pending(void)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
struct mm_struct *mm = tb->mm;
|
||||
|
||||
if (tb->tlb_nr) {
|
||||
flush_tsb_user(tb);
|
||||
if (!tb->tlb_nr)
|
||||
goto out;
|
||||
|
||||
if (CTX_VALID(tb->mm->context)) {
|
||||
flush_tsb_user(tb);
|
||||
|
||||
if (CTX_VALID(mm->context)) {
|
||||
if (tb->tlb_nr == 1) {
|
||||
global_flush_tlb_page(mm, tb->vaddrs[0]);
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
|
||||
&tb->vaddrs[0]);
|
||||
@ -37,12 +43,30 @@ void flush_tlb_pending(void)
|
||||
tb->tlb_nr, &tb->vaddrs[0]);
|
||||
#endif
|
||||
}
|
||||
tb->tlb_nr = 0;
|
||||
}
|
||||
|
||||
tb->tlb_nr = 0;
|
||||
|
||||
out:
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
void arch_enter_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
||||
|
||||
tb->active = 1;
|
||||
}
|
||||
|
||||
void arch_leave_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
||||
|
||||
if (tb->tlb_nr)
|
||||
flush_tlb_pending();
|
||||
tb->active = 0;
|
||||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
bool exec)
|
||||
{
|
||||
@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
if (!tb->active) {
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
tb->mm = mm;
|
||||
|
||||
|
@ -7,11 +7,10 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tsb.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
|
||||
@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
}
|
||||
|
||||
static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
|
||||
unsigned long hash_shift,
|
||||
unsigned long nentries)
|
||||
{
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
|
||||
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
|
||||
unsigned long tsb, unsigned long nentries)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < tb->tlb_nr; i++) {
|
||||
unsigned long v = tb->vaddrs[i];
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
for (i = 0; i < tb->tlb_nr; i++)
|
||||
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
|
||||
}
|
||||
|
||||
void flush_tsb_user(struct tlb_batch *tb)
|
||||
@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
|
||||
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
|
||||
|
||||
|
@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_page
|
||||
__flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, %pstate
|
||||
mov SECONDARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
andn %o1, 1, %o3
|
||||
be,pn %icc, 1f
|
||||
or %o3, 0x10, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_pending
|
||||
__flush_tlb_pending: /* 26 insns */
|
||||
@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, 0x0, %pstate
|
||||
wrpr %g0, 1, %tl
|
||||
mov PRIMARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
or %o0, %o3, %o0 /* Preserve nucleus page size fields */
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
be,pn %icc, 1f
|
||||
andn %o1, 1, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
wrpr %g0, 0, %tl
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_pending: /* 27 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
rdpr %pstate, %g7
|
||||
@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_page: /* 11 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
|
||||
mov %g2, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_pending: /* 16 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
sllx %o1, 3, %g1
|
||||
@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 19, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_page), %o1
|
||||
or %o1, %lo(__cheetah_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 22, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_pending), %o1
|
||||
@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_pending
|
||||
xcall_flush_tlb_pending: /* 21 insns */
|
||||
/* %g5=context, %g1=nr, %g7=vaddrs[] */
|
||||
sllx %g1, 3, %g1
|
||||
.globl xcall_flush_tlb_page
|
||||
xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=context, %g1=vaddr */
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
ldxa [%g4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
|
||||
or %g5, %g4, %g5
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
stxa %g5, [%g4] ASI_DMMU
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g7 + %g1], %g5
|
||||
andcc %g5, 0x1, %g0
|
||||
andcc %g1, 0x1, %g0
|
||||
be,pn %icc, 2f
|
||||
|
||||
andn %g5, 0x1, %g5
|
||||
andn %g1, 0x1, %g5
|
||||
stxa %g0, [%g5] ASI_IMMU_DEMAP
|
||||
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
stxa %g2, [%g4] ASI_DMMU
|
||||
retry
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_kernel_range
|
||||
xcall_flush_tlb_kernel_range: /* 25 insns */
|
||||
@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
|
||||
membar #Sync
|
||||
retry
|
||||
|
||||
.globl __hypervisor_xcall_flush_tlb_pending
|
||||
__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
|
||||
/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
|
||||
sllx %g1, 3, %g1
|
||||
.globl __hypervisor_xcall_flush_tlb_page
|
||||
__hypervisor_xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=ctx, %g1=vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %g3
|
||||
mov %o2, %g4
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
|
||||
mov %g1, %o0 /* ARG0: virtual address */
|
||||
mov %g5, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
|
||||
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
|
||||
mov %o0, %g5
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
mov %g2, %o0
|
||||
mov %g3, %o1
|
||||
mov %g4, %o2
|
||||
@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 10, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 11, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_pending), %o1
|
||||
@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_pending), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
|
||||
sethi %hi(xcall_flush_tlb_page), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
mov 17, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_kernel_range), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
|
||||
|
@ -1549,6 +1549,7 @@ config X86_SMAP
|
||||
config EFI
|
||||
bool "EFI runtime service support"
|
||||
depends on ACPI
|
||||
select UCS2_STRING
|
||||
---help---
|
||||
This enables the kernel to use EFI runtime services that are
|
||||
available (such as the EFI variable services).
|
||||
|
@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
|
||||
*size = len;
|
||||
}
|
||||
|
||||
static efi_status_t setup_efi_vars(struct boot_params *params)
|
||||
{
|
||||
struct setup_data *data;
|
||||
struct efi_var_bootdata *efidata;
|
||||
u64 store_size, remaining_size, var_size;
|
||||
efi_status_t status;
|
||||
|
||||
if (!sys_table->runtime->query_variable_info)
|
||||
return EFI_UNSUPPORTED;
|
||||
|
||||
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
|
||||
|
||||
while (data && data->next)
|
||||
data = (struct setup_data *)(unsigned long)data->next;
|
||||
|
||||
status = efi_call_phys4(sys_table->runtime->query_variable_info,
|
||||
EFI_VARIABLE_NON_VOLATILE |
|
||||
EFI_VARIABLE_BOOTSERVICE_ACCESS |
|
||||
EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
|
||||
&remaining_size, &var_size);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
status = efi_call_phys3(sys_table->boottime->allocate_pool,
|
||||
EFI_LOADER_DATA, sizeof(*efidata), &efidata);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
efidata->data.type = SETUP_EFI_VARS;
|
||||
efidata->data.len = sizeof(struct efi_var_bootdata) -
|
||||
sizeof(struct setup_data);
|
||||
efidata->data.next = 0;
|
||||
efidata->store_size = store_size;
|
||||
efidata->remaining_size = remaining_size;
|
||||
efidata->max_var_size = var_size;
|
||||
|
||||
if (data)
|
||||
data->next = (unsigned long)efidata;
|
||||
else
|
||||
params->hdr.setup_data = (unsigned long)efidata;
|
||||
|
||||
}
|
||||
|
||||
static efi_status_t setup_efi_pci(struct boot_params *params)
|
||||
{
|
||||
efi_pci_io_protocol *pci;
|
||||
@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
|
||||
|
||||
setup_graphics(boot_params);
|
||||
|
||||
setup_efi_vars(boot_params);
|
||||
|
||||
setup_efi_pci(boot_params);
|
||||
|
||||
status = efi_call_phys3(sys_table->boottime->allocate_pool,
|
||||
|
@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
|
||||
extern void efi_unmap_memmap(void);
|
||||
extern void efi_memory_uc(u64 addr, unsigned long size);
|
||||
|
||||
struct efi_var_bootdata {
|
||||
struct setup_data data;
|
||||
u64 store_size;
|
||||
u64 remaining_size;
|
||||
u64 max_var_size;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
|
||||
static inline bool efi_is_native(void)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define SETUP_E820_EXT 1
|
||||
#define SETUP_DTB 2
|
||||
#define SETUP_PCI 3
|
||||
#define SETUP_EFI_VARS 4
|
||||
|
||||
/* ram_size flags */
|
||||
#define RAMDISK_IMAGE_START_MASK 0x07FF
|
||||
|
@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Xen emulates Hyper-V to support enlightened Windows.
|
||||
* Check to see first if we are on a Xen Hypervisor.
|
||||
*/
|
||||
if (xen_cpuid_base())
|
||||
return false;
|
||||
|
||||
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
|
||||
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
|
||||
|
||||
@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
|
||||
|
||||
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
|
||||
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
/*
|
||||
* Setup the IDT for hypervisor callback.
|
||||
*/
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
|
||||
#endif
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
|
||||
|
||||
void hv_register_vmbus_handler(int irq, irq_handler_t handler)
|
||||
{
|
||||
/*
|
||||
* Setup the IDT for hypervisor callback.
|
||||
*/
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
|
||||
|
||||
vmbus_irq = irq;
|
||||
vmbus_isr = handler;
|
||||
}
|
||||
|
@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
|
||||
};
|
||||
|
||||
static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.event_constraints = intel_snb_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
||||
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
if (boot_cpu_data.x86_model == 45)
|
||||
x86_pmu.extra_regs = intel_snbep_extra_regs;
|
||||
else
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
/* all extra regs are per-cpu when HT is on */
|
||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
|
||||
@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.event_constraints = intel_ivb_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
|
||||
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
if (boot_cpu_data.x86_model == 62)
|
||||
x86_pmu.extra_regs = intel_snbep_extra_regs;
|
||||
else
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
/* all extra regs are per-cpu when HT is on */
|
||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
|
||||
|
@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
|
||||
u32 eax = 0x00000000;
|
||||
u32 ebx, ecx = 0, edx;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
return X86_VENDOR_UNKNOWN;
|
||||
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
|
||||
@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
|
||||
return X86_VENDOR_UNKNOWN;
|
||||
}
|
||||
|
||||
static int __cpuinit x86_family(void)
|
||||
{
|
||||
u32 eax = 0x00000001;
|
||||
u32 ebx, ecx = 0, edx;
|
||||
int x86;
|
||||
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
|
||||
x86 = (eax >> 8) & 0xf;
|
||||
if (x86 == 15)
|
||||
x86 += (eax >> 20) & 0xff;
|
||||
|
||||
return x86;
|
||||
}
|
||||
|
||||
void __init load_ucode_bsp(void)
|
||||
{
|
||||
int vendor = x86_vendor();
|
||||
int vendor, x86;
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL)
|
||||
if (!have_cpuid_p())
|
||||
return;
|
||||
|
||||
vendor = x86_vendor();
|
||||
x86 = x86_family();
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL && x86 >= 6)
|
||||
load_ucode_intel_bsp();
|
||||
}
|
||||
|
||||
void __cpuinit load_ucode_ap(void)
|
||||
{
|
||||
int vendor = x86_vendor();
|
||||
int vendor, x86;
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL)
|
||||
if (!have_cpuid_p())
|
||||
return;
|
||||
|
||||
vendor = x86_vendor();
|
||||
x86 = x86_family();
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL && x86 >= 6)
|
||||
load_ucode_intel_ap();
|
||||
}
|
||||
|
@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
|
||||
/*
|
||||
* Keep the crash kernel below this limit. On 32 bits earlier kernels
|
||||
* would limit the kernel to the low 512 MiB due to mapping restrictions.
|
||||
* On 64bit, old kexec-tools need to under 896MiB.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
# define CRASH_KERNEL_ADDR_MAX (512 << 20)
|
||||
# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
|
||||
# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
|
||||
#else
|
||||
# define CRASH_KERNEL_ADDR_MAX MAXMEM
|
||||
# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
|
||||
# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
|
||||
#endif
|
||||
|
||||
static void __init reserve_crashkernel_low(void)
|
||||
@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
|
||||
unsigned long long low_base = 0, low_size = 0;
|
||||
unsigned long total_low_mem;
|
||||
unsigned long long base;
|
||||
bool auto_set = false;
|
||||
int ret;
|
||||
|
||||
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
|
||||
/* crashkernel=Y,low */
|
||||
ret = parse_crashkernel_low(boot_command_line, total_low_mem,
|
||||
&low_size, &base);
|
||||
if (ret != 0 || low_size <= 0)
|
||||
return;
|
||||
if (ret != 0) {
|
||||
/*
|
||||
* two parts from lib/swiotlb.c:
|
||||
* swiotlb size: user specified with swiotlb= or default.
|
||||
* swiotlb overflow buffer: now is hardcoded to 32k.
|
||||
* We round it to 8M for other buffers that
|
||||
* may need to stay low too.
|
||||
*/
|
||||
low_size = swiotlb_size_or_default() + (8UL<<20);
|
||||
auto_set = true;
|
||||
} else {
|
||||
/* passed with crashkernel=0,low ? */
|
||||
if (!low_size)
|
||||
return;
|
||||
}
|
||||
|
||||
low_base = memblock_find_in_range(low_size, (1ULL<<32),
|
||||
low_size, alignment);
|
||||
|
||||
if (!low_base) {
|
||||
pr_info("crashkernel low reservation failed - No suitable area found.\n");
|
||||
if (!auto_set)
|
||||
pr_info("crashkernel low reservation failed - No suitable area found.\n");
|
||||
|
||||
return;
|
||||
}
|
||||
@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)
|
||||
const unsigned long long alignment = 16<<20; /* 16M */
|
||||
unsigned long long total_mem;
|
||||
unsigned long long crash_size, crash_base;
|
||||
bool high = false;
|
||||
int ret;
|
||||
|
||||
total_mem = memblock_phys_mem_size();
|
||||
|
||||
/* crashkernel=XM */
|
||||
ret = parse_crashkernel(boot_command_line, total_mem,
|
||||
&crash_size, &crash_base);
|
||||
if (ret != 0 || crash_size <= 0)
|
||||
return;
|
||||
if (ret != 0 || crash_size <= 0) {
|
||||
/* crashkernel=X,high */
|
||||
ret = parse_crashkernel_high(boot_command_line, total_mem,
|
||||
&crash_size, &crash_base);
|
||||
if (ret != 0 || crash_size <= 0)
|
||||
return;
|
||||
high = true;
|
||||
}
|
||||
|
||||
/* 0 means: find the address automatically */
|
||||
if (crash_base <= 0) {
|
||||
@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)
|
||||
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
|
||||
*/
|
||||
crash_base = memblock_find_in_range(alignment,
|
||||
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
|
||||
high ? CRASH_KERNEL_ADDR_HIGH_MAX :
|
||||
CRASH_KERNEL_ADDR_LOW_MAX,
|
||||
crash_size, alignment);
|
||||
|
||||
if (!crash_base) {
|
||||
pr_info("crashkernel reservation failed - No suitable area found.\n");
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/ucs2_string.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/efi.h>
|
||||
@ -51,6 +52,13 @@
|
||||
|
||||
#define EFI_DEBUG 1
|
||||
|
||||
/*
|
||||
* There's some additional metadata associated with each
|
||||
* variable. Intel's reference implementation is 60 bytes - bump that
|
||||
* to account for potential alignment constraints
|
||||
*/
|
||||
#define VAR_METADATA_SIZE 64
|
||||
|
||||
struct efi __read_mostly efi = {
|
||||
.mps = EFI_INVALID_TABLE_ADDR,
|
||||
.acpi = EFI_INVALID_TABLE_ADDR,
|
||||
@ -69,6 +77,13 @@ struct efi_memory_map memmap;
|
||||
static struct efi efi_phys __initdata;
|
||||
static efi_system_table_t efi_systab __initdata;
|
||||
|
||||
static u64 efi_var_store_size;
|
||||
static u64 efi_var_remaining_size;
|
||||
static u64 efi_var_max_var_size;
|
||||
static u64 boot_used_size;
|
||||
static u64 boot_var_size;
|
||||
static u64 active_size;
|
||||
|
||||
unsigned long x86_efi_facility;
|
||||
|
||||
/*
|
||||
@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)
|
||||
}
|
||||
early_param("add_efi_memmap", setup_add_efi_memmap);
|
||||
|
||||
static bool efi_no_storage_paranoia;
|
||||
|
||||
static int __init setup_storage_paranoia(char *arg)
|
||||
{
|
||||
efi_no_storage_paranoia = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("efi_no_storage_paranoia", setup_storage_paranoia);
|
||||
|
||||
|
||||
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
|
||||
{
|
||||
@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
|
||||
efi_char16_t *name,
|
||||
efi_guid_t *vendor)
|
||||
{
|
||||
return efi_call_virt3(get_next_variable,
|
||||
name_size, name, vendor);
|
||||
efi_status_t status;
|
||||
static bool finished = false;
|
||||
static u64 var_size;
|
||||
|
||||
status = efi_call_virt3(get_next_variable,
|
||||
name_size, name, vendor);
|
||||
|
||||
if (status == EFI_NOT_FOUND) {
|
||||
finished = true;
|
||||
if (var_size < boot_used_size) {
|
||||
boot_var_size = boot_used_size - var_size;
|
||||
active_size += boot_var_size;
|
||||
} else {
|
||||
printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (boot_used_size && !finished) {
|
||||
unsigned long size;
|
||||
u32 attr;
|
||||
efi_status_t s;
|
||||
void *tmp;
|
||||
|
||||
s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
|
||||
|
||||
if (s != EFI_BUFFER_TOO_SMALL || !size)
|
||||
return status;
|
||||
|
||||
tmp = kmalloc(size, GFP_ATOMIC);
|
||||
|
||||
if (!tmp)
|
||||
return status;
|
||||
|
||||
s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
|
||||
|
||||
if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
|
||||
var_size += size;
|
||||
var_size += ucs2_strsize(name, 1024);
|
||||
active_size += size;
|
||||
active_size += VAR_METADATA_SIZE;
|
||||
active_size += ucs2_strsize(name, 1024);
|
||||
}
|
||||
|
||||
kfree(tmp);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
|
||||
@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
|
||||
unsigned long data_size,
|
||||
void *data)
|
||||
{
|
||||
return efi_call_virt5(set_variable,
|
||||
name, vendor, attr,
|
||||
data_size, data);
|
||||
efi_status_t status;
|
||||
u32 orig_attr = 0;
|
||||
unsigned long orig_size = 0;
|
||||
|
||||
status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
|
||||
NULL);
|
||||
|
||||
if (status != EFI_BUFFER_TOO_SMALL)
|
||||
orig_size = 0;
|
||||
|
||||
status = efi_call_virt5(set_variable,
|
||||
name, vendor, attr,
|
||||
data_size, data);
|
||||
|
||||
if (status == EFI_SUCCESS) {
|
||||
if (orig_size) {
|
||||
active_size -= orig_size;
|
||||
active_size -= ucs2_strsize(name, 1024);
|
||||
active_size -= VAR_METADATA_SIZE;
|
||||
}
|
||||
if (data_size) {
|
||||
active_size += data_size;
|
||||
active_size += ucs2_strsize(name, 1024);
|
||||
active_size += VAR_METADATA_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t virt_efi_query_variable_info(u32 attr,
|
||||
@ -682,6 +776,9 @@ void __init efi_init(void)
|
||||
char vendor[100] = "unknown";
|
||||
int i = 0;
|
||||
void *tmp;
|
||||
struct setup_data *data;
|
||||
struct efi_var_bootdata *efi_var_data;
|
||||
u64 pa_data;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (boot_params.efi_info.efi_systab_hi ||
|
||||
@ -699,6 +796,22 @@ void __init efi_init(void)
|
||||
if (efi_systab_init(efi_phys.systab))
|
||||
return;
|
||||
|
||||
pa_data = boot_params.hdr.setup_data;
|
||||
while (pa_data) {
|
||||
data = early_ioremap(pa_data, sizeof(*efi_var_data));
|
||||
if (data->type == SETUP_EFI_VARS) {
|
||||
efi_var_data = (struct efi_var_bootdata *)data;
|
||||
|
||||
efi_var_store_size = efi_var_data->store_size;
|
||||
efi_var_remaining_size = efi_var_data->remaining_size;
|
||||
efi_var_max_var_size = efi_var_data->max_var_size;
|
||||
}
|
||||
pa_data = data->next;
|
||||
early_iounmap(data, sizeof(*efi_var_data));
|
||||
}
|
||||
|
||||
boot_used_size = efi_var_store_size - efi_var_remaining_size;
|
||||
|
||||
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
|
||||
|
||||
/*
|
||||
@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some firmware has serious problems when using more than 50% of the EFI
|
||||
* variable store, i.e. it triggers bugs that can brick machines. Ensure that
|
||||
* we never use more than this safe limit.
|
||||
*
|
||||
* Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
|
||||
* store.
|
||||
*/
|
||||
efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
||||
{
|
||||
efi_status_t status;
|
||||
u64 storage_size, remaining_size, max_size;
|
||||
|
||||
status = efi.query_variable_info(attributes, &storage_size,
|
||||
&remaining_size, &max_size);
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (!max_size && remaining_size > size)
|
||||
printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
|
||||
" is returning MaxVariableSize=0\n");
|
||||
/*
|
||||
* Some firmware implementations refuse to boot if there's insufficient
|
||||
* space in the variable store. We account for that by refusing the
|
||||
* write if permitting it would reduce the available space to under
|
||||
* 50%. However, some firmware won't reclaim variable space until
|
||||
* after the used (not merely the actively used) space drops below
|
||||
* a threshold. We can approximate that case with the value calculated
|
||||
* above. If both the firmware and our calculations indicate that the
|
||||
* available space would drop below 50%, refuse the write.
|
||||
*/
|
||||
|
||||
if (!storage_size || size > remaining_size ||
|
||||
(max_size && size > max_size))
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
if (!efi_no_storage_paranoia &&
|
||||
((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
|
||||
(remaining_size - size < storage_size / 2)))
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(efi_query_variable_store);
|
||||
|
@ -39,6 +39,7 @@
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
|
||||
|
||||
DEFINE_IDA(blk_queue_ida);
|
||||
|
@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
|
||||
struct rbd_device *rbd_dev = img_request->rbd_dev;
|
||||
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
|
||||
struct rbd_obj_request *obj_request;
|
||||
struct rbd_obj_request *next_obj_request;
|
||||
|
||||
dout("%s: img %p\n", __func__, img_request);
|
||||
for_each_obj_request(img_request, obj_request) {
|
||||
for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
|
||||
int ret;
|
||||
|
||||
obj_request->callback = rbd_img_obj_callback;
|
||||
|
@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
struct hpet_dev *devp;
|
||||
unsigned long addr;
|
||||
|
||||
if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
|
||||
return -EINVAL;
|
||||
|
||||
devp = file->private_data;
|
||||
addr = devp->hd_hpets->hp_hpet_phys;
|
||||
|
||||
if (addr & (PAGE_SIZE - 1))
|
||||
return -ENOSYS;
|
||||
|
||||
vma->vm_flags |= VM_IO;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
|
||||
PAGE_SIZE, vma->vm_page_prot)) {
|
||||
printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
|
||||
__func__);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return vm_iomap_memory(vma, addr, PAGE_SIZE);
|
||||
#else
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
|
@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
|
||||
|
||||
BUG_ON(atc_chan_is_enabled(atchan));
|
||||
|
||||
/*
|
||||
* Submit queued descriptors ASAP, i.e. before we go through
|
||||
* the completed ones.
|
||||
@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan)
|
||||
{
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
|
||||
|
||||
if (atc_chan_is_enabled(atchan))
|
||||
return;
|
||||
|
||||
if (list_empty(&atchan->active_list) ||
|
||||
list_is_singular(&atchan->active_list)) {
|
||||
atc_complete_all(atchan);
|
||||
@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
if (!atc_chan_is_enabled(atchan)) {
|
||||
atc_advance_work(atchan);
|
||||
}
|
||||
atc_advance_work(atchan);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP
|
||||
config EFI_VARS
|
||||
tristate "EFI Variable Support via sysfs"
|
||||
depends on EFI
|
||||
select UCS2_STRING
|
||||
default n
|
||||
help
|
||||
If you say Y here, you are able to get EFI (Extensible Firmware
|
||||
|
@ -80,6 +80,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pstore.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/ucs2_string.h>
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/ramfs.h>
|
||||
@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *);
|
||||
static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
|
||||
static bool efivar_wq_enabled = true;
|
||||
|
||||
/* Return the number of unicode characters in data */
|
||||
static unsigned long
|
||||
utf16_strnlen(efi_char16_t *s, size_t maxlength)
|
||||
{
|
||||
unsigned long length = 0;
|
||||
|
||||
while (*s++ != 0 && length < maxlength)
|
||||
length++;
|
||||
return length;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
utf16_strlen(efi_char16_t *s)
|
||||
{
|
||||
return utf16_strnlen(s, ~0UL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the number of bytes is the length of this string
|
||||
* Note: this is NOT the same as the number of unicode characters
|
||||
*/
|
||||
static inline unsigned long
|
||||
utf16_strsize(efi_char16_t *data, unsigned long maxlength)
|
||||
{
|
||||
return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
|
||||
}
|
||||
|
||||
static inline int
|
||||
utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
|
||||
{
|
||||
while (1) {
|
||||
if (len == 0)
|
||||
return 0;
|
||||
if (*a < *b)
|
||||
return -1;
|
||||
if (*a > *b)
|
||||
return 1;
|
||||
if (*a == 0) /* implies *b == 0 */
|
||||
return 0;
|
||||
a++;
|
||||
b++;
|
||||
len--;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
validate_device_path(struct efi_variable *var, int match, u8 *buffer,
|
||||
unsigned long len)
|
||||
@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
|
||||
u16 filepathlength;
|
||||
int i, desclength = 0, namelen;
|
||||
|
||||
namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
|
||||
namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
|
||||
|
||||
/* Either "Boot" or "Driver" followed by four digits of hex */
|
||||
for (i = match; i < match+4; i++) {
|
||||
@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
|
||||
* There's no stored length for the description, so it has to be
|
||||
* found by hand
|
||||
*/
|
||||
desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
|
||||
desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
|
||||
|
||||
/* Each boot entry must have a descriptor */
|
||||
if (!desclength)
|
||||
@ -436,24 +392,12 @@ static efi_status_t
|
||||
check_var_size_locked(struct efivars *efivars, u32 attributes,
|
||||
unsigned long size)
|
||||
{
|
||||
u64 storage_size, remaining_size, max_size;
|
||||
efi_status_t status;
|
||||
const struct efivar_operations *fops = efivars->ops;
|
||||
|
||||
if (!efivars->ops->query_variable_info)
|
||||
if (!efivars->ops->query_variable_store)
|
||||
return EFI_UNSUPPORTED;
|
||||
|
||||
status = fops->query_variable_info(attributes, &storage_size,
|
||||
&remaining_size, &max_size);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (!storage_size || size > remaining_size || size > max_size ||
|
||||
(remaining_size - size) < (storage_size / 2))
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
return status;
|
||||
return fops->query_variable_store(attributes, size);
|
||||
}
|
||||
|
||||
|
||||
@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
|
||||
spin_lock_irq(&efivars->lock);
|
||||
|
||||
status = check_var_size_locked(efivars, new_var->Attributes,
|
||||
new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
|
||||
new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
|
||||
|
||||
if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
|
||||
status = efivars->ops->set_variable(new_var->VariableName,
|
||||
@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file,
|
||||
* QueryVariableInfo() isn't supported by the firmware.
|
||||
*/
|
||||
|
||||
varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
|
||||
varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
|
||||
status = check_var_size(efivars, attributes, varsize);
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
inode = NULL;
|
||||
|
||||
len = utf16_strlen(entry->var.VariableName);
|
||||
len = ucs2_strlen(entry->var.VariableName);
|
||||
|
||||
/* name, plus '-', plus GUID, plus NUL*/
|
||||
name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
|
||||
@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
|
||||
if (efi_guidcmp(entry->var.VendorGuid, vendor))
|
||||
continue;
|
||||
if (utf16_strncmp(entry->var.VariableName, efi_name,
|
||||
utf16_strlen(efi_name))) {
|
||||
if (ucs2_strncmp(entry->var.VariableName, efi_name,
|
||||
ucs2_strlen(efi_name))) {
|
||||
/*
|
||||
* Check if an old format,
|
||||
* which doesn't support holding
|
||||
@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
for (i = 0; i < DUMP_NAME_LEN; i++)
|
||||
efi_name_old[i] = name_old[i];
|
||||
|
||||
if (utf16_strncmp(entry->var.VariableName, efi_name_old,
|
||||
utf16_strlen(efi_name_old)))
|
||||
if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
|
||||
ucs2_strlen(efi_name_old)))
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
||||
* Does this variable already exist?
|
||||
*/
|
||||
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
|
||||
strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
|
||||
strsize2 = utf16_strsize(new_var->VariableName, 1024);
|
||||
strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
|
||||
strsize2 = ucs2_strsize(new_var->VariableName, 1024);
|
||||
if (strsize1 == strsize2 &&
|
||||
!memcmp(&(search_efivar->var.VariableName),
|
||||
new_var->VariableName, strsize1) &&
|
||||
@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
status = check_var_size_locked(efivars, new_var->Attributes,
|
||||
new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
|
||||
new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
|
||||
|
||||
if (status && status != EFI_UNSUPPORTED) {
|
||||
spin_unlock_irq(&efivars->lock);
|
||||
@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
||||
|
||||
/* Create the entry in sysfs. Locking is not required here */
|
||||
status = efivar_create_sysfs_entry(efivars,
|
||||
utf16_strsize(new_var->VariableName,
|
||||
ucs2_strsize(new_var->VariableName,
|
||||
1024),
|
||||
new_var->VariableName,
|
||||
&new_var->VendorGuid);
|
||||
@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
|
||||
* Does this variable already exist?
|
||||
*/
|
||||
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
|
||||
strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
|
||||
strsize2 = utf16_strsize(del_var->VariableName, 1024);
|
||||
strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
|
||||
strsize2 = ucs2_strsize(del_var->VariableName, 1024);
|
||||
if (strsize1 == strsize2 &&
|
||||
!memcmp(&(search_efivar->var.VariableName),
|
||||
del_var->VariableName, strsize1) &&
|
||||
@ -1691,9 +1635,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
|
||||
unsigned long strsize1, strsize2;
|
||||
bool found = false;
|
||||
|
||||
strsize1 = utf16_strsize(variable_name, 1024);
|
||||
strsize1 = ucs2_strsize(variable_name, 1024);
|
||||
list_for_each_entry_safe(entry, n, &efivars->list, list) {
|
||||
strsize2 = utf16_strsize(entry->var.VariableName, 1024);
|
||||
strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
|
||||
if (strsize1 == strsize2 &&
|
||||
!memcmp(variable_name, &(entry->var.VariableName),
|
||||
strsize2) &&
|
||||
@ -2131,7 +2075,7 @@ efivars_init(void)
|
||||
ops.get_variable = efi.get_variable;
|
||||
ops.set_variable = efi.set_variable;
|
||||
ops.get_next_variable = efi.get_next_variable;
|
||||
ops.query_variable_info = efi.query_variable_info;
|
||||
ops.query_variable_store = efi_query_variable_store;
|
||||
|
||||
error = register_efivars(&__efivars, &ops, efi_kobj);
|
||||
if (error)
|
||||
|
@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
|
||||
ICPU(0x3c, idle_cpu_hsw),
|
||||
ICPU(0x3f, idle_cpu_hsw),
|
||||
ICPU(0x45, idle_cpu_hsw),
|
||||
ICPU(0x46, idle_cpu_hsw),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
|
||||
|
@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
|
||||
case 0x802: /* Intuos4 General Pen */
|
||||
case 0x804: /* Intuos4 Marker Pen */
|
||||
case 0x40802: /* Intuos4 Classic Pen */
|
||||
case 0x18803: /* DTH2242 Grip Pen */
|
||||
case 0x18802: /* DTH2242 Grip Pen */
|
||||
case 0x022:
|
||||
wacom->tool[idx] = BTN_TOOL_PEN;
|
||||
break;
|
||||
@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
|
||||
{ "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
|
||||
63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||
static const struct wacom_features wacom_features_0xBC =
|
||||
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
|
||||
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
|
||||
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||
static const struct wacom_features wacom_features_0x26 =
|
||||
{ "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
|
||||
@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
|
||||
{ USB_DEVICE_WACOM(0x44) },
|
||||
{ USB_DEVICE_WACOM(0x45) },
|
||||
{ USB_DEVICE_WACOM(0x59) },
|
||||
{ USB_DEVICE_WACOM(0x5D) },
|
||||
{ USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
|
||||
{ USB_DEVICE_WACOM(0xB0) },
|
||||
{ USB_DEVICE_WACOM(0xB1) },
|
||||
{ USB_DEVICE_WACOM(0xB2) },
|
||||
@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
|
||||
{ USB_DEVICE_WACOM(0x47) },
|
||||
{ USB_DEVICE_WACOM(0xF4) },
|
||||
{ USB_DEVICE_WACOM(0xF8) },
|
||||
{ USB_DEVICE_WACOM(0xF6) },
|
||||
{ USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
|
||||
{ USB_DEVICE_WACOM(0xFA) },
|
||||
{ USB_DEVICE_LENOVO(0x6004) },
|
||||
{ }
|
||||
|
@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
|
||||
if (gic_arch_extn.irq_retrigger)
|
||||
return gic_arch_extn.irq_retrigger(d);
|
||||
|
||||
return -ENXIO;
|
||||
/* the genirq layer expects 0 if we can't retrigger in hardware */
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
queue_io(md, bio);
|
||||
} else {
|
||||
/* done with normal IO or empty flush */
|
||||
trace_block_bio_complete(md->queue, bio, io_error);
|
||||
bio_endio(bio, io_error);
|
||||
}
|
||||
}
|
||||
|
@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi)
|
||||
return_bi = bi->bi_next;
|
||||
bi->bi_next = NULL;
|
||||
bi->bi_size = 0;
|
||||
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
|
||||
bi, 0);
|
||||
bio_endio(bi, 0);
|
||||
bi = return_bi;
|
||||
}
|
||||
@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error)
|
||||
rdev_dec_pending(rdev, conf->mddev);
|
||||
|
||||
if (!error && uptodate) {
|
||||
trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
|
||||
raid_bi, 0);
|
||||
bio_endio(raid_bi, 0);
|
||||
if (atomic_dec_and_test(&conf->active_aligned_reads))
|
||||
wake_up(&conf->wait_for_stripe);
|
||||
@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
if ( rw == WRITE )
|
||||
md_write_end(mddev);
|
||||
|
||||
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
|
||||
bi, 0);
|
||||
bio_endio(bi, 0);
|
||||
}
|
||||
}
|
||||
@ -4758,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
|
||||
handled++;
|
||||
}
|
||||
remaining = raid5_dec_bi_active_stripes(raid_bio);
|
||||
if (remaining == 0)
|
||||
if (remaining == 0) {
|
||||
trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
|
||||
raid_bio, 0);
|
||||
bio_endio(raid_bio, 0);
|
||||
}
|
||||
if (atomic_dec_and_test(&conf->active_aligned_reads))
|
||||
wake_up(&conf->wait_for_stripe);
|
||||
return handled;
|
||||
|
@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long get_vm_size(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_end - vma->vm_start;
|
||||
}
|
||||
|
||||
static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
|
||||
{
|
||||
return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a new vm offset.
|
||||
*
|
||||
* Verify that the incoming offset really works as a page offset,
|
||||
* and that the offset and size fit in a resource_size_t.
|
||||
*/
|
||||
static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
|
||||
{
|
||||
pgoff_t pgoff = off >> PAGE_SHIFT;
|
||||
if (off != (resource_size_t) pgoff << PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
if (off + get_vm_size(vma) - 1 < off)
|
||||
return -EINVAL;
|
||||
vma->vm_pgoff = pgoff;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* set up a mapping for shared memory segments
|
||||
*/
|
||||
@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
struct mtd_file_info *mfi = file->private_data;
|
||||
struct mtd_info *mtd = mfi->mtd;
|
||||
struct map_info *map = mtd->priv;
|
||||
resource_size_t start, off;
|
||||
unsigned long len, vma_len;
|
||||
|
||||
/* This is broken because it assumes the MTD device is map-based
|
||||
and that mtd->priv is a valid struct map_info. It should be
|
||||
replaced with something that uses the mtd_get_unmapped_area()
|
||||
operation properly. */
|
||||
if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
|
||||
off = get_vm_offset(vma);
|
||||
start = map->phys;
|
||||
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
|
||||
start &= PAGE_MASK;
|
||||
vma_len = get_vm_size(vma);
|
||||
|
||||
/* Overflow in off+len? */
|
||||
if (vma_len + off < off)
|
||||
return -EINVAL;
|
||||
/* Does it fit in the mapping? */
|
||||
if (vma_len + off > len)
|
||||
return -EINVAL;
|
||||
|
||||
off += start;
|
||||
/* Did that overflow? */
|
||||
if (off < start)
|
||||
return -EINVAL;
|
||||
if (set_vm_offset(vma, off) < 0)
|
||||
return -EINVAL;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
|
||||
#ifdef pgprot_noncached
|
||||
if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
|
||||
if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
#endif
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
|
||||
return 0;
|
||||
return vm_iomap_memory(vma, map->phys, map->size);
|
||||
}
|
||||
return -ENOSYS;
|
||||
#else
|
||||
|
@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
|
||||
if (bond->dev->flags & IFF_ALLMULTI)
|
||||
dev_set_allmulti(old_active->dev, -1);
|
||||
|
||||
netif_addr_lock_bh(bond->dev);
|
||||
netdev_for_each_mc_addr(ha, bond->dev)
|
||||
dev_mc_del(old_active->dev, ha->addr);
|
||||
netif_addr_unlock_bh(bond->dev);
|
||||
}
|
||||
|
||||
if (new_active) {
|
||||
@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
|
||||
if (bond->dev->flags & IFF_ALLMULTI)
|
||||
dev_set_allmulti(new_active->dev, 1);
|
||||
|
||||
netif_addr_lock_bh(bond->dev);
|
||||
netdev_for_each_mc_addr(ha, bond->dev)
|
||||
dev_mc_add(new_active->dev, ha->addr);
|
||||
netif_addr_unlock_bh(bond->dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1901,11 +1905,29 @@ err_dest_symlinks:
|
||||
bond_destroy_slave_symlinks(bond_dev, slave_dev);
|
||||
|
||||
err_detach:
|
||||
if (!USES_PRIMARY(bond->params.mode)) {
|
||||
netif_addr_lock_bh(bond_dev);
|
||||
bond_mc_list_flush(bond_dev, slave_dev);
|
||||
netif_addr_unlock_bh(bond_dev);
|
||||
}
|
||||
bond_del_vlans_from_slave(bond, slave_dev);
|
||||
write_lock_bh(&bond->lock);
|
||||
bond_detach_slave(bond, new_slave);
|
||||
if (bond->primary_slave == new_slave)
|
||||
bond->primary_slave = NULL;
|
||||
write_unlock_bh(&bond->lock);
|
||||
if (bond->curr_active_slave == new_slave) {
|
||||
read_lock(&bond->lock);
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
bond_change_active_slave(bond, NULL);
|
||||
bond_select_active_slave(bond);
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
read_unlock(&bond->lock);
|
||||
}
|
||||
slave_disable_netpoll(new_slave);
|
||||
|
||||
err_close:
|
||||
slave_dev->priv_flags &= ~IFF_BONDING;
|
||||
dev_close(slave_dev);
|
||||
|
||||
err_unset_master:
|
||||
@ -3168,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
struct slave *slave = bond_slave_get_rtnl(slave_dev);
|
||||
struct bonding *bond = slave->bond;
|
||||
struct net_device *bond_dev = slave->bond->dev;
|
||||
struct bonding *bond;
|
||||
struct net_device *bond_dev;
|
||||
u32 old_speed;
|
||||
u8 old_duplex;
|
||||
|
||||
/* A netdev event can be generated while enslaving a device
|
||||
* before netdev_rx_handler_register is called in which case
|
||||
* slave will be NULL
|
||||
*/
|
||||
if (!slave)
|
||||
return NOTIFY_DONE;
|
||||
bond_dev = slave->bond->dev;
|
||||
bond = slave->bond;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_UNREGISTER:
|
||||
if (bond->setup_by_slave)
|
||||
@ -3286,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
|
||||
*/
|
||||
static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
|
||||
{
|
||||
struct ethhdr *data = (struct ethhdr *)skb->data;
|
||||
struct iphdr *iph;
|
||||
struct ipv6hdr *ipv6h;
|
||||
const struct ethhdr *data;
|
||||
const struct iphdr *iph;
|
||||
const struct ipv6hdr *ipv6h;
|
||||
u32 v6hash;
|
||||
__be32 *s, *d;
|
||||
const __be32 *s, *d;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP) &&
|
||||
skb_network_header_len(skb) >= sizeof(*iph)) {
|
||||
pskb_network_may_pull(skb, sizeof(*iph))) {
|
||||
iph = ip_hdr(skb);
|
||||
data = (struct ethhdr *)skb->data;
|
||||
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
|
||||
(data->h_dest[5] ^ data->h_source[5])) % count;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6) &&
|
||||
skb_network_header_len(skb) >= sizeof(*ipv6h)) {
|
||||
pskb_network_may_pull(skb, sizeof(*ipv6h))) {
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
data = (struct ethhdr *)skb->data;
|
||||
s = &ipv6h->saddr.s6_addr32[0];
|
||||
d = &ipv6h->daddr.s6_addr32[0];
|
||||
v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
|
||||
@ -3318,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
|
||||
static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
|
||||
{
|
||||
u32 layer4_xor = 0;
|
||||
struct iphdr *iph;
|
||||
struct ipv6hdr *ipv6h;
|
||||
__be32 *s, *d;
|
||||
__be16 *layer4hdr;
|
||||
const struct iphdr *iph;
|
||||
const struct ipv6hdr *ipv6h;
|
||||
const __be32 *s, *d;
|
||||
const __be16 *l4 = NULL;
|
||||
__be16 _l4[2];
|
||||
int noff = skb_network_offset(skb);
|
||||
int poff;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP) &&
|
||||
skb_network_header_len(skb) >= sizeof(*iph)) {
|
||||
pskb_may_pull(skb, noff + sizeof(*iph))) {
|
||||
iph = ip_hdr(skb);
|
||||
if (!ip_is_fragment(iph) &&
|
||||
(iph->protocol == IPPROTO_TCP ||
|
||||
iph->protocol == IPPROTO_UDP) &&
|
||||
(skb_headlen(skb) - skb_network_offset(skb) >=
|
||||
iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
|
||||
layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
|
||||
layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
|
||||
poff = proto_ports_offset(iph->protocol);
|
||||
|
||||
if (!ip_is_fragment(iph) && poff >= 0) {
|
||||
l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
|
||||
sizeof(_l4), &_l4);
|
||||
if (l4)
|
||||
layer4_xor = ntohs(l4[0] ^ l4[1]);
|
||||
}
|
||||
return (layer4_xor ^
|
||||
((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6) &&
|
||||
skb_network_header_len(skb) >= sizeof(*ipv6h)) {
|
||||
pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
if ((ipv6h->nexthdr == IPPROTO_TCP ||
|
||||
ipv6h->nexthdr == IPPROTO_UDP) &&
|
||||
(skb_headlen(skb) - skb_network_offset(skb) >=
|
||||
sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
|
||||
layer4hdr = (__be16 *)(ipv6h + 1);
|
||||
layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
|
||||
poff = proto_ports_offset(ipv6h->nexthdr);
|
||||
if (poff >= 0) {
|
||||
l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
|
||||
sizeof(_l4), &_l4);
|
||||
if (l4)
|
||||
layer4_xor = ntohs(l4[0] ^ l4[1]);
|
||||
}
|
||||
s = &ipv6h->saddr.s6_addr32[0];
|
||||
d = &ipv6h->daddr.s6_addr32[0];
|
||||
|
@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net)
|
||||
struct mcp251x_priv *priv = netdev_priv(net);
|
||||
struct spi_device *spi = priv->spi;
|
||||
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = open_candev(net);
|
||||
@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net)
|
||||
priv->tx_skb = NULL;
|
||||
priv->tx_len = 0;
|
||||
|
||||
flags = IRQF_ONESHOT;
|
||||
if (pdata->irq_flags)
|
||||
flags |= pdata->irq_flags;
|
||||
else
|
||||
flags |= IRQF_TRIGGER_FALLING;
|
||||
|
||||
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
|
||||
pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
|
||||
DEVICE_NAME, priv);
|
||||
flags, DEVICE_NAME, priv);
|
||||
if (ret) {
|
||||
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
|
||||
if (pdata->transceiver_enable)
|
||||
|
@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
|
||||
struct net_device *dev;
|
||||
struct sja1000_priv *priv;
|
||||
struct resource res;
|
||||
const u32 *prop;
|
||||
int err, irq, res_size, prop_size;
|
||||
u32 prop;
|
||||
int err, irq, res_size;
|
||||
void __iomem *base;
|
||||
|
||||
err = of_address_to_resource(np, 0, &res);
|
||||
@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
|
||||
priv->read_reg = sja1000_ofp_read_reg;
|
||||
priv->write_reg = sja1000_ofp_write_reg;
|
||||
|
||||
prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)))
|
||||
priv->can.clock.freq = *prop / 2;
|
||||
err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
|
||||
if (!err)
|
||||
priv->can.clock.freq = prop / 2;
|
||||
else
|
||||
priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
|
||||
|
||||
prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)))
|
||||
priv->ocr |= *prop & OCR_MODE_MASK;
|
||||
err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
|
||||
if (!err)
|
||||
priv->ocr |= prop & OCR_MODE_MASK;
|
||||
else
|
||||
priv->ocr |= OCR_MODE_NORMAL; /* default */
|
||||
|
||||
prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)))
|
||||
priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
|
||||
err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
|
||||
if (!err)
|
||||
priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
|
||||
else
|
||||
priv->ocr |= OCR_TX0_PULLDOWN; /* default */
|
||||
|
||||
prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
|
||||
if (prop && (prop_size == sizeof(u32)) && *prop) {
|
||||
u32 divider = priv->can.clock.freq * 2 / *prop;
|
||||
err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
|
||||
if (!err && prop) {
|
||||
u32 divider = priv->can.clock.freq * 2 / prop;
|
||||
|
||||
if (divider > 1)
|
||||
priv->cdr |= divider / 2 - 1;
|
||||
@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
|
||||
priv->cdr |= CDR_CLK_OFF; /* default */
|
||||
}
|
||||
|
||||
prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
|
||||
if (!prop)
|
||||
if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
|
||||
priv->cdr |= CDR_CBP; /* default */
|
||||
|
||||
priv->irq_flags = IRQF_SHARED;
|
||||
|
@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
|
||||
struct ei_device *ei_local;
|
||||
struct ax_device *ax;
|
||||
struct resource *irq, *mem, *mem2;
|
||||
resource_size_t mem_size, mem2_size = 0;
|
||||
unsigned long mem_size, mem2_size = 0;
|
||||
int ret = 0;
|
||||
|
||||
dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
|
||||
|
@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize FW coalescing state machines in RAM */
|
||||
bnx2x_update_coalesce(bp);
|
||||
|
||||
/* setup the leading queue */
|
||||
rc = bnx2x_setup_leading(bp);
|
||||
if (rc) {
|
||||
@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
|
||||
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
|
||||
u32 addr = BAR_CSTRORM_INTMEM +
|
||||
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
|
||||
u16 flags = REG_RD16(bp, addr);
|
||||
u8 flags = REG_RD8(bp, addr);
|
||||
/* clear and set */
|
||||
flags &= ~HC_INDEX_DATA_HC_ENABLED;
|
||||
flags |= enable_flag;
|
||||
REG_WR16(bp, addr, flags);
|
||||
REG_WR8(bp, addr, flags);
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"port %x fw_sb_id %d sb_index %d disable %d\n",
|
||||
port, fw_sb_id, sb_index, disable);
|
||||
|
@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
||||
REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
|
||||
}
|
||||
}
|
||||
if (!CHIP_IS_E1x(bp))
|
||||
/* block FW from writing to host */
|
||||
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
|
||||
|
||||
/* wait until BRB is empty */
|
||||
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
|
||||
while (timer_count) {
|
||||
|
@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
|
||||
|
||||
if (vlan_tx_tag_present(skb)) {
|
||||
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
|
||||
__vlan_put_tag(skb, vlan_tag);
|
||||
skb->vlan_tci = 0;
|
||||
skb = __vlan_put_tag(skb, vlan_tag);
|
||||
if (skb)
|
||||
skb->vlan_tci = 0;
|
||||
}
|
||||
|
||||
return skb;
|
||||
|
@ -1002,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
|
||||
} else {
|
||||
if (fep->link) {
|
||||
fec_stop(ndev);
|
||||
fep->link = phy_dev->link;
|
||||
status_change = 1;
|
||||
}
|
||||
}
|
||||
|
@ -284,18 +284,10 @@ struct igb_q_vector {
|
||||
enum e1000_ring_flags_t {
|
||||
IGB_RING_FLAG_RX_SCTP_CSUM,
|
||||
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
|
||||
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
|
||||
IGB_RING_FLAG_TX_CTX_IDX,
|
||||
IGB_RING_FLAG_TX_DETECT_HANG
|
||||
};
|
||||
|
||||
#define ring_uses_build_skb(ring) \
|
||||
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
|
||||
#define set_ring_build_skb_enabled(ring) \
|
||||
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
|
||||
#define clear_ring_build_skb_enabled(ring) \
|
||||
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
|
||||
|
||||
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
|
||||
|
||||
#define IGB_RX_DESC(R, i) \
|
||||
|
@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
|
||||
wr32(E1000_RXDCTL(reg_idx), rxdctl);
|
||||
}
|
||||
|
||||
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
|
||||
struct igb_ring *rx_ring)
|
||||
{
|
||||
#define IGB_MAX_BUILD_SKB_SIZE \
|
||||
(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
|
||||
(NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
|
||||
|
||||
/* set build_skb flag */
|
||||
if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
|
||||
set_ring_build_skb_enabled(rx_ring);
|
||||
else
|
||||
clear_ring_build_skb_enabled(rx_ring);
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_configure_rx - Configure receive Unit after Reset
|
||||
* @adapter: board private structure
|
||||
@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
|
||||
|
||||
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
||||
* the Base and Length of the Rx Descriptor Ring */
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct igb_ring *rx_ring = adapter->rx_ring[i];
|
||||
igb_set_rx_buffer_len(adapter, rx_ring);
|
||||
igb_configure_rx_ring(adapter, rx_ring);
|
||||
}
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
|
||||
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
|
||||
}
|
||||
|
||||
static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
|
||||
union e1000_adv_rx_desc *rx_desc)
|
||||
{
|
||||
struct igb_rx_buffer *rx_buffer;
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
void *page_addr;
|
||||
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = IGB_RX_BUFSZ;
|
||||
#else
|
||||
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
||||
SKB_DATA_ALIGN(NET_SKB_PAD +
|
||||
NET_IP_ALIGN +
|
||||
size);
|
||||
#endif
|
||||
|
||||
/* If we spanned a buffer we have a huge mess so test for it */
|
||||
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
|
||||
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
page = rx_buffer->page;
|
||||
prefetchw(page);
|
||||
|
||||
page_addr = page_address(page) + rx_buffer->page_offset;
|
||||
|
||||
/* prefetch first cache line of first page */
|
||||
prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
|
||||
#if L1_CACHE_BYTES < 128
|
||||
prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
|
||||
#endif
|
||||
|
||||
/* build an skb to around the page buffer */
|
||||
skb = build_skb(page_addr, truesize);
|
||||
if (unlikely(!skb)) {
|
||||
rx_ring->rx_stats.alloc_failed++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
rx_buffer->dma,
|
||||
rx_buffer->page_offset,
|
||||
IGB_RX_BUFSZ,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* update pointers within the skb to store the data */
|
||||
skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
|
||||
__skb_put(skb, size);
|
||||
|
||||
/* pull timestamp out of packet data */
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
|
||||
__skb_pull(skb, IGB_TS_HDR_LEN);
|
||||
}
|
||||
|
||||
if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
|
||||
/* hand second half of page back to the ring */
|
||||
igb_reuse_rx_page(rx_ring, rx_buffer);
|
||||
} else {
|
||||
/* we are not reusing the buffer so unmap it */
|
||||
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
/* clear contents of buffer_info */
|
||||
rx_buffer->dma = 0;
|
||||
rx_buffer->page = NULL;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
||||
union e1000_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
||||
rmb();
|
||||
|
||||
/* retrieve a buffer from the ring */
|
||||
if (ring_uses_build_skb(rx_ring))
|
||||
skb = igb_build_rx_buffer(rx_ring, rx_desc);
|
||||
else
|
||||
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
|
||||
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
|
||||
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb)
|
||||
@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
|
||||
{
|
||||
if (ring_uses_build_skb(rx_ring))
|
||||
return NET_SKB_PAD + NET_IP_ALIGN;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
|
||||
* @adapter: address of board private structure
|
||||
@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
|
||||
* Refresh the desc even if buffer_addrs didn't change
|
||||
* because each write-back erases this info.
|
||||
*/
|
||||
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
|
||||
bi->page_offset +
|
||||
igb_rx_offset(rx_ring));
|
||||
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
|
||||
|
||||
rx_desc++;
|
||||
bi++;
|
||||
|
@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
|
||||
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
|
||||
return -EINVAL;
|
||||
if (vlan || qos) {
|
||||
if (adapter->vfinfo[vf].pf_vlan)
|
||||
err = ixgbe_set_vf_vlan(adapter, false,
|
||||
adapter->vfinfo[vf].pf_vlan,
|
||||
vf);
|
||||
if (err)
|
||||
goto out;
|
||||
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -33,6 +33,7 @@ config MV643XX_ETH
|
||||
|
||||
config MVMDIO
|
||||
tristate "Marvell MDIO interface support"
|
||||
select PHYLIB
|
||||
---help---
|
||||
This driver supports the MDIO interface found in the network
|
||||
interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
|
||||
@ -45,7 +46,6 @@ config MVMDIO
|
||||
config MVNETA
|
||||
tristate "Marvell Armada 370/XP network interface support"
|
||||
depends on MACH_ARMADA_370_XP
|
||||
select PHYLIB
|
||||
select MVMDIO
|
||||
---help---
|
||||
This driver supports the network interface units in the
|
||||
|
@ -374,7 +374,6 @@ static int rxq_number = 8;
|
||||
static int txq_number = 8;
|
||||
|
||||
static int rxq_def;
|
||||
static int txq_def;
|
||||
|
||||
#define MVNETA_DRIVER_NAME "mvneta"
|
||||
#define MVNETA_DRIVER_VERSION "1.0"
|
||||
@ -1475,7 +1474,8 @@ error:
|
||||
static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
|
||||
u16 txq_id = skb_get_queue_mapping(skb);
|
||||
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
|
||||
struct mvneta_tx_desc *tx_desc;
|
||||
struct netdev_queue *nq;
|
||||
int frags = 0;
|
||||
@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
goto out;
|
||||
|
||||
frags = skb_shinfo(skb)->nr_frags + 1;
|
||||
nq = netdev_get_tx_queue(dev, txq_def);
|
||||
nq = netdev_get_tx_queue(dev, txq_id);
|
||||
|
||||
/* Get a descriptor for the first part of the packet */
|
||||
tx_desc = mvneta_txq_next_desc_get(txq);
|
||||
@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
|
||||
dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2844,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);
|
||||
module_param(txq_number, int, S_IRUGO);
|
||||
|
||||
module_param(rxq_def, int, S_IRUGO);
|
||||
module_param(txq_def, int, S_IRUGO);
|
||||
|
@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
|
||||
}
|
||||
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
|
||||
|
||||
/* Make sure carrier is off and queue is stopped during loopback */
|
||||
if (netif_running(netdev)) {
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
}
|
||||
|
||||
ret = qlcnic_do_lb_test(adapter, mode);
|
||||
|
||||
qlcnic_83xx_clear_lb_mode(adapter, mode);
|
||||
@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
|
||||
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
|
||||
{
|
||||
struct qlcnic_cmd_args cmd;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int ret = 0;
|
||||
|
||||
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
|
||||
@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
|
||||
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
|
||||
QLC_83XX_STAT_TX, &ret);
|
||||
if (ret) {
|
||||
dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
|
||||
netdev_err(netdev, "Error getting Tx stats\n");
|
||||
goto out;
|
||||
}
|
||||
/* Get MAC stats */
|
||||
@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
|
||||
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
|
||||
QLC_83XX_STAT_MAC, &ret);
|
||||
if (ret) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Error getting Rx stats\n");
|
||||
netdev_err(netdev, "Error getting MAC stats\n");
|
||||
goto out;
|
||||
}
|
||||
/* Get Rx stats */
|
||||
@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
|
||||
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
|
||||
QLC_83XX_STAT_RX, &ret);
|
||||
if (ret)
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Error getting Tx stats\n");
|
||||
netdev_err(netdev, "Error getting Rx stats\n");
|
||||
out:
|
||||
qlcnic_free_mbx_args(&cmd);
|
||||
}
|
||||
|
@ -358,8 +358,7 @@ set_flags:
|
||||
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
|
||||
}
|
||||
opcode = TX_ETHER_PKT;
|
||||
if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
|
||||
skb_shinfo(skb)->gso_size > 0) {
|
||||
if (skb_is_gso(skb)) {
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
||||
first_desc->total_hdr_length = hdr_len;
|
||||
|
@ -200,10 +200,10 @@ beacon_err:
|
||||
}
|
||||
|
||||
err = qlcnic_config_led(adapter, b_state, b_rate);
|
||||
if (!err)
|
||||
if (!err) {
|
||||
err = len;
|
||||
else
|
||||
ahw->beacon_state = b_state;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
|
||||
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
|
||||
|
@ -18,7 +18,7 @@
|
||||
*/
|
||||
#define DRV_NAME "qlge"
|
||||
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
|
||||
#define DRV_VERSION "v1.00.00.31"
|
||||
#define DRV_VERSION "v1.00.00.32"
|
||||
|
||||
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
|
||||
|
||||
|
@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
|
||||
|
||||
ecmd->supported = SUPPORTED_10000baseT_Full;
|
||||
ecmd->advertising = ADVERTISED_10000baseT_Full;
|
||||
ecmd->autoneg = AUTONEG_ENABLE;
|
||||
ecmd->transceiver = XCVR_EXTERNAL;
|
||||
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
|
||||
STS_LINK_TYPE_10GBASET) {
|
||||
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
|
||||
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
|
||||
ecmd->port = PORT_TP;
|
||||
ecmd->autoneg = AUTONEG_ENABLE;
|
||||
} else {
|
||||
ecmd->supported |= SUPPORTED_FIBRE;
|
||||
ecmd->advertising |= ADVERTISED_FIBRE;
|
||||
|
@ -1434,11 +1434,13 @@ map_error:
|
||||
}
|
||||
|
||||
/* Categorizing receive firmware frame errors */
|
||||
static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
|
||||
static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
|
||||
struct rx_ring *rx_ring)
|
||||
{
|
||||
struct nic_stats *stats = &qdev->nic_stats;
|
||||
|
||||
stats->rx_err_count++;
|
||||
rx_ring->rx_errors++;
|
||||
|
||||
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
|
||||
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
|
||||
@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
|
||||
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
|
||||
struct napi_struct *napi = &rx_ring->napi;
|
||||
|
||||
/* Frame error, so drop the packet. */
|
||||
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
|
||||
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
|
||||
put_page(lbq_desc->p.pg_chunk.page);
|
||||
return;
|
||||
}
|
||||
napi->dev = qdev->ndev;
|
||||
|
||||
skb = napi_get_frags(napi);
|
||||
@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
|
||||
addr = lbq_desc->p.pg_chunk.va;
|
||||
prefetch(addr);
|
||||
|
||||
/* Frame error, so drop the packet. */
|
||||
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
|
||||
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* The max framesize filter on this chip is set higher than
|
||||
* MTU since FCoE uses 2k frames.
|
||||
*/
|
||||
@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
|
||||
memcpy(skb_put(new_skb, length), skb->data, length);
|
||||
skb = new_skb;
|
||||
|
||||
/* Frame error, so drop the packet. */
|
||||
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
|
||||
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* loopback self test for ethtool */
|
||||
if (test_bit(QL_SELFTEST, &qdev->flags)) {
|
||||
ql_check_lb_frame(qdev, skb);
|
||||
@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Frame error, so drop the packet. */
|
||||
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
|
||||
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* The max framesize filter on this chip is set higher than
|
||||
* MTU since FCoE uses 2k frames.
|
||||
*/
|
||||
@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
|
||||
|
||||
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
|
||||
|
||||
/* Frame error, so drop the packet. */
|
||||
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
|
||||
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
|
||||
return (unsigned long)length;
|
||||
}
|
||||
|
||||
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
|
||||
/* The data and headers are split into
|
||||
* separate buffers.
|
||||
|
@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
|
||||
{
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
|
||||
}
|
||||
|
||||
/* This reads the MAC core counters (if actaully supported).
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user