Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 update from Martin Schwidefsky:
 "This is the first batch of s390 patches for the 3.10 merge window.

  Included are some performance enhancements: storage key
  initialization, zero page cache synonyms, system call micro
  optimization and the speedup patches for dasdfmt.  Sebastian managed
  to get rid of the special casing for the console device in the cio
  layer.  And the usual bunch of bug fixes."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (59 commits)
  s390/pci: use pci_scan_root_bus
  s390/scm_blk: fix memleak in init function
  s390/scm_blk: allow more cluster size values
  s390/cio: fix irq statistics
  s390/memory hotplug: prevent offline of active memory increments
  s390: remove small stack config option
  s390: system call path micro optimization
  s390: lowcore stack pointer offsets
  s390/uapi: change struct statfs[64] member types to unsigned values
  s390/pci: return correct dma address for offset > PAGE_SIZE
  s390/ptrace: remove empty ifdefs
  s390/compat: remove ptrace compat definitions from uapi header file
  s390/compat: fix compile error for !COMPAT
  s390/compat: fix compat_sys_statfs() memory corruption
  s390/zcore: Fix HSA copy length for last block
  s390/mm,gmap: segment mapping race
  s390/mm,gmap: implement gmap_translate()
  s390/pci: remove disable_device implementation
  s390/pci: disable per default
  s390/pci: return error after failed pci ops
  ...
This commit is contained in:
Linus Torvalds 2013-04-29 08:19:39 -07:00
commit d0b8883800
72 changed files with 1709 additions and 1353 deletions

View File

@ -143,7 +143,8 @@ Parameter: id: handle for debug log
Return Value: none
Description: frees memory for a debug log
Description: frees memory for a debug log and removes all registered debug
views.
Must not be called within an interrupt handler
---------------------------------------------------------------------------

View File

@ -375,19 +375,6 @@ config PACK_STACK
Say Y if you are unsure.
config SMALL_STACK
def_bool n
prompt "Use 8kb for kernel stack instead of 16kb"
depends on PACK_STACK && 64BIT && !LOCKDEP
help
If you say Y here and the compiler supports the -mkernel-backchain
option the kernel will use a smaller kernel stack size. The reduced
size is 8kb instead of 16kb. This allows to run more threads on a
system and reduces the pressure on the memory management for higher
order page allocations.
Say N if you are unsure.
config CHECK_STACK
def_bool y
prompt "Detect kernel stack overflow"

View File

@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
ifeq ($(call cc-option-yn,-mkernel-backchain),y)
cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
ifdef CONFIG_SMALL_STACK
STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
endif
endif
# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
ifdef CONFIG_SMALL_STACK
STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
endif
endif
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)

View File

@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
if (IS_ERR(dbfs_dir))
return PTR_ERR(dbfs_dir);
return 0;
return PTR_RET(dbfs_dir);
}
void hypfs_dbfs_exit(void)

View File

@ -61,8 +61,6 @@ extern const char _sb_findmap[];
#ifndef CONFIG_64BIT
#define __BITOPS_ALIGN 3
#define __BITOPS_WORDSIZE 32
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
@ -81,8 +79,6 @@ extern const char _sb_findmap[];
#else /* CONFIG_64BIT */
#define __BITOPS_ALIGN 7
#define __BITOPS_WORDSIZE 64
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
@ -101,8 +97,7 @@ extern const char _sb_findmap[];
#endif /* CONFIG_64BIT */
#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#ifdef CONFIG_SMP
/*
@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR mask */
mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
}
@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND mask */
mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
}
@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR mask */
mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
}
@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR/test mask */
mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
__BITOPS_BARRIER();
barrier();
return (old & mask) != 0;
}
@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND/test mask */
mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
__BITOPS_BARRIER();
barrier();
return (old ^ new) != 0;
}
@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR/test mask */
mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
__BITOPS_BARRIER();
barrier();
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr |= 1 << (nr & 7);
}
@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr &= ~(1 << (nr & 7));
}
@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr ^= 1 << (nr & 7);
}
@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" oc %O0(1,%R0),%1"
@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" nc %O0(1,%R0),%1"
@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" xc %O0(1,%R0),%1"
@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(volatile unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
static inline int
__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
return (((volatile char *) addr)
[(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
[(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
}
#define test_bit(nr,addr) \
@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
if (offset >= size)
return size;
bit = offset & (__BITOPS_WORDSIZE - 1);
bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
p = addr + offset / BITS_PER_LONG;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
if (set < BITS_PER_LONG)
return set + offset;
offset += __BITOPS_WORDSIZE;
size -= __BITOPS_WORDSIZE;
offset += BITS_PER_LONG;
size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_left(p, size);
@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
if (offset >= size)
return size;
bit = offset & (__BITOPS_WORDSIZE - 1);
bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
* __ffz_word returns __BITOPS_WORDSIZE
* __ffz_word returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, *p >> bit);
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
if (set < BITS_PER_LONG)
return set + offset;
offset += __BITOPS_WORDSIZE;
size -= __BITOPS_WORDSIZE;
offset += BITS_PER_LONG;
size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit(p, size);
@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
if (offset >= size)
return size;
bit = offset & (__BITOPS_WORDSIZE - 1);
bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
* __ffs_word returns __BITOPS_WORDSIZE
* __ffs_word returns BITS_PER_LONG
* if no one bit is present in the word.
*/
set = __ffs_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
if (set < BITS_PER_LONG)
return set + offset;
offset += __BITOPS_WORDSIZE;
size -= __BITOPS_WORDSIZE;
offset += BITS_PER_LONG;
size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit(p, size);
@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
bit = offset & (__BITOPS_WORDSIZE - 1);
bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
* s390 version of ffz returns __BITOPS_WORDSIZE
* s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
if (set < BITS_PER_LONG)
return set + offset;
offset += __BITOPS_WORDSIZE;
size -= __BITOPS_WORDSIZE;
offset += BITS_PER_LONG;
size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit_le(p, size);
@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
bit = offset & (__BITOPS_WORDSIZE - 1);
bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
* s390 version of ffz returns __BITOPS_WORDSIZE
* s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
if (set < BITS_PER_LONG)
return set + offset;
offset += __BITOPS_WORDSIZE;
size -= __BITOPS_WORDSIZE;
offset += BITS_PER_LONG;
size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_le(p, size);

View File

@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
extern int ccw_device_force_console(void);
extern void ccw_device_wait_idle(struct ccw_device *);
extern int ccw_device_force_console(struct ccw_device *);
int ccw_device_siosl(struct ccw_device *);

View File

@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
extern void wait_cons_dev(void);
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);

View File

@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
typedef struct {
u32 mask;
u32 addr;
} __aligned(8) psw_compat_t;
typedef struct {
psw_compat_t psw;
u32 gprs[NUM_GPRS];
u32 acrs[NUM_ACRS];
u32 orig_gpr2;
} s390_compat_regs;
typedef struct {
u32 gprs_high[NUM_GPRS];
} s390_compat_regs_high;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
@ -124,18 +140,33 @@ struct compat_flock64 {
};
struct compat_statfs {
s32 f_type;
s32 f_bsize;
s32 f_blocks;
s32 f_bfree;
s32 f_bavail;
s32 f_files;
s32 f_ffree;
u32 f_type;
u32 f_bsize;
u32 f_blocks;
u32 f_bfree;
u32 f_bavail;
u32 f_files;
u32 f_ffree;
compat_fsid_t f_fsid;
s32 f_namelen;
s32 f_frsize;
s32 f_flags;
s32 f_spare[5];
u32 f_namelen;
u32 f_frsize;
u32 f_flags;
u32 f_spare[4];
};
struct compat_statfs64 {
u32 f_type;
u32 f_bsize;
u64 f_blocks;
u64 f_bfree;
u64 f_bavail;
u64 f_files;
u64 f_ffree;
compat_fsid_t f_fsid;
u32 f_namelen;
u32 f_frsize;
u32 f_flags;
u32 f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@ -248,8 +279,6 @@ static inline int is_compat_task(void)
return is_32bit_task();
}
#endif
static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
return (void __user *) (stack - len);
}
#endif
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;

View File

@ -119,6 +119,8 @@
*/
#include <asm/ptrace.h>
#include <asm/compat.h>
#include <asm/syscall.h>
#include <asm/user.h>
typedef s390_fp_regs elf_fpregset_t;
@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifdef CONFIG_64BIT
#ifndef CONFIG_COMPAT
#define SET_PERSONALITY(ex) \
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
current_thread_info()->sys_call_table = \
(unsigned long) &sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
else \
current_thread_info()->sys_call_table = \
(unsigned long) &sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
current_thread_info()->sys_call_table = \
(unsigned long) &sys_call_table; \
} \
} while (0)
#endif /* CONFIG_64BIT */
#endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL

View File

@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
void zpci_free_device(struct zpci_dev *);
int zpci_scan_device(struct zpci_dev *);

View File

@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
#ifdef CONFIG_PCI_DEBUG
#define zpci_dbg(fmt, args...) \
do { \
if (pci_debug_msg_id->level >= 2) \
debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
} while (0)
#define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#else /* !CONFIG_PCI_DEBUG */
#define zpci_dbg(fmt, args...) do { } while (0)
#define zpci_dbg(imp, fmt, args...) do { } while (0)
#endif
#define zpci_err(text...) \

View File

@ -1,10 +1,6 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
#include <linux/delay.h>
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@ -82,199 +78,12 @@ struct zpci_fib {
u64 reserved7;
} __packed;
/* Modify PCI Function Controls */
static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
{
u8 cc;
asm volatile (
" .insn rxy,0xe300000000d0,%[req],%[fib]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
: : "cc");
*status = req >> 24 & 0xff;
return cc;
}
static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
{
u8 cc, status;
do {
cc = __mpcifc(req, fib, &status);
if (cc == 2)
msleep(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
__func__, cc, status);
return (cc) ? -EIO : 0;
}
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
register u64 __addr asm("2") = addr;
register u64 __range asm("3") = range;
u8 cc;
asm volatile (
" .insn rre,0xb9d30000,%[fn],%[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
: [addr] "d" (__addr), "d" (__range)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
}
static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
{
u8 cc, status;
do {
cc = __rpcit(fn, addr, range, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
__func__, cc, status, addr, range);
return (cc) ? -EIO : 0;
}
/* Store PCI function controls */
static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
{
u64 fn = (u64) handle << 32 | space << 16;
u8 cc;
asm volatile (
" .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
: : "cc");
*status = fn >> 24 & 0xff;
return cc;
}
/* Set Interruption Controls */
static inline void sic_instr(u16 ctl, char *unused, u8 isc)
{
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
}
/* PCI Load */
static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
u64 __data;
u8 cc;
asm volatile (
" .insn rre,0xb9d20000,%[data],%[req]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
: "d" (__offset)
: "cc");
*status = __req >> 24 & 0xff;
*data = __data;
return cc;
}
static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
{
u8 cc, status;
do {
cc = __pcilg(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc) {
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
/* TODO: on IO errors set data to 0xff...
* here or in users of pcilg (le conversion)?
*/
}
return (cc) ? -EIO : 0;
}
/* PCI Store */
static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
u8 cc;
asm volatile (
" .insn rre,0xb9d00000,%[data],%[req]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (__req)
: "d" (__offset), [data] "d" (data)
: "cc");
*status = __req >> 24 & 0xff;
return cc;
}
static inline int pcistg_instr(u64 data, u64 req, u64 offset)
{
u8 cc, status;
do {
cc = __pcistg(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc) ? -EIO : 0;
}
/* PCI Store Block */
static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
{
u8 cc;
asm volatile (
" .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (req)
: [offset] "d" (offset), [data] "Q" (*data)
: "cc");
*status = req >> 24 & 0xff;
return cc;
}
static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
{
u8 cc, status;
do {
cc = __pcistb(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc) ? -EIO : 0;
}
int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
int s390pci_load(u64 *data, u64 req, u64 offset);
int s390pci_store(u64 data, u64 req, u64 offset);
int s390pci_store_block(const u64 *data, u64 req, u64 offset);
void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif

View File

@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \
int rc; \
\
rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
s390pci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */
break;
}
return pcistg_instr(val, req, offset);
return s390pci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
u8 cc;
int cc;
cc = s390pci_load(&data, req, offset);
if (cc)
goto out;
cc = pcilg_instr(&data, req, offset);
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
*((u64 *) dst) = (u64) data;
break;
}
out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
return pcistb_instr(data, req, offset);
return s390pci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)

View File

@ -764,6 +764,8 @@ void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long length);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
unsigned long __gmap_translate(unsigned long address, struct gmap *);
unsigned long gmap_translate(unsigned long address, struct gmap *);
unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);

View File

@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]);
extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
unsigned int len);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \

View File

@ -9,9 +9,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
#ifndef __s390x__
#else /* __s390x__ */
#endif /* __s390x__ */
extern long psw_kernel_bits;
extern long psw_user_bits;
@ -77,8 +75,6 @@ struct per_struct_kernel {
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
#ifdef __s390x__
#endif /* __s390x__ */
/*
* These are defined as per linux/ptrace.h, which see.
*/

View File

@ -23,6 +23,7 @@
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
extern const unsigned int sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)

View File

@ -14,13 +14,8 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else /* CONFIG_64BIT */
#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
#else
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#endif
#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@ -41,6 +36,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;

View File

@ -215,12 +215,6 @@ typedef struct
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
typedef struct
{
__u32 mask;
__u32 addr;
} __attribute__ ((aligned(8))) psw_compat_t;
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
@ -295,20 +289,6 @@ typedef struct
unsigned long orig_gpr2;
} s390_regs;
typedef struct
{
psw_compat_t psw;
__u32 gprs[NUM_GPRS];
__u32 acrs[NUM_ACRS];
__u32 orig_gpr2;
} s390_compat_regs;
typedef struct
{
__u32 gprs_high[NUM_GPRS];
} s390_compat_regs_high;
/*
* Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't

View File

@ -7,9 +7,6 @@
#ifndef _S390_STATFS_H
#define _S390_STATFS_H
#ifndef __s390x__
#include <asm-generic/statfs.h>
#else
/*
* We can't use <asm-generic/statfs.h> because in 64-bit mode
* we mix ints of different sizes in our struct statfs.
@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
#endif
struct statfs {
int f_type;
int f_bsize;
long f_blocks;
long f_bfree;
long f_bavail;
long f_files;
long f_ffree;
unsigned int f_type;
unsigned int f_bsize;
unsigned long f_blocks;
unsigned long f_bfree;
unsigned long f_bavail;
unsigned long f_files;
unsigned long f_ffree;
__kernel_fsid_t f_fsid;
int f_namelen;
int f_frsize;
int f_flags;
int f_spare[4];
unsigned int f_namelen;
unsigned int f_frsize;
unsigned int f_flags;
unsigned int f_spare[4];
};
struct statfs64 {
int f_type;
int f_bsize;
long f_blocks;
long f_bfree;
long f_bavail;
long f_files;
long f_ffree;
unsigned int f_type;
unsigned int f_bsize;
unsigned long f_blocks;
unsigned long f_bfree;
unsigned long f_bavail;
unsigned long f_files;
unsigned long f_ffree;
__kernel_fsid_t f_fsid;
int f_namelen;
int f_frsize;
int f_flags;
int f_spare[4];
unsigned int f_namelen;
unsigned int f_frsize;
unsigned int f_flags;
unsigned int f_spare[4];
};
struct compat_statfs64 {
__u32 f_type;
__u32 f_bsize;
__u64 f_blocks;
__u64 f_bfree;
__u64 f_bavail;
__u64 f_files;
__u64 f_ffree;
__kernel_fsid_t f_fsid;
__u32 f_namelen;
__u32 f_frsize;
__u32 f_flags;
__u32 f_spare[4];
};
#endif /* __s390x__ */
#endif

View File

@ -13,6 +13,14 @@ endif
#
CFLAGS_smp.o := -Wno-nonnull
#
# Disable tailcall optimizations for stack / callchain walking functions
# since this might generate broken code when accessing register 15 and
# passing its content to other functions.
#
CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
# Pass UTS_MACHINE for user_regset definition
#
@ -20,10 +28,11 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += dumpstack.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)

View File

@ -35,6 +35,7 @@ int main(void)
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));

View File

@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = task_thread_info(current)->last_break;
}
/* Place signal number on stack to allow backtrace from handler. */
@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:

View File

@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
* insn_to_mnemonic - decode an s390 instruction
* @instruction: instruction to decode
* @buf: buffer to fill with mnemonic
* @len: length of buffer
*
* Decode the instruction at @instruction and store the corresponding
* mnemonic into @buf.
* mnemonic into @buf of length @len.
* @buf is left unchanged if the instruction could not be decoded.
* Returns:
* %0 on success, %-ENOENT if the instruction was not found.
*/
int insn_to_mnemonic(unsigned char *instruction, char buf[8])
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
struct insn *insn;
@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
if (!insn)
return -ENOENT;
if (insn->name[0] == '\0')
snprintf(buf, 8, "%s",
snprintf(buf, len, "%s",
long_insn_name[(int) insn->name[1]]);
else
snprintf(buf, 8, "%.5s", insn->name);
snprintf(buf, len, "%.5s", insn->name);
return 0;
}
EXPORT_SYMBOL_GPL(insn_to_mnemonic);

View File

@ -0,0 +1,236 @@
/*
* Stack dumping functions
*
* Copyright IBM Corp. 1999, 2013
*/
#include <linux/kallsyms.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/utsname.h>
#include <linux/export.h>
#include <linux/kdebug.h>
#include <linux/ptrace.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#ifndef CONFIG_64BIT
#define LONG "%08lx "
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
static int kstack_depth_to_print = 12;
#else /* CONFIG_64BIT */
#define LONG "%016lx "
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
static int kstack_depth_to_print = 20;
#endif /* CONFIG_64BIT */
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stack and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
static unsigned long
__show_trace(unsigned long sp, unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (1) {
sp = sp & PSW_ADDR_INSN;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain & PSW_ADDR_INSN;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
low = sp;
sp = regs->gprs[15];
}
}
static void show_trace(struct task_struct *task, unsigned long *stack)
{
register unsigned long __r15 asm ("15");
unsigned long sp;
sp = (unsigned long) stack;
if (!sp)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
S390_lowcore.panic_stack);
#endif
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
else
__show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
if (!task)
task = current;
debug_show_held_locks(task);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
register unsigned long *__r15 asm ("15");
unsigned long *stack;
int i;
if (!sp)
stack = task ? (unsigned long *) task->thread.ksp : __r15;
else
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if ((i * sizeof(long) % 32) == 0)
printk("%s ", i == 0 ? "" : "\n");
printk(LONG, *stack++);
}
printk("\n");
show_trace(task, sp);
}
static void show_last_breaking_event(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
#endif
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, current,
(void *) current->thread.ksp);
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
{
return (regs->psw.mask & bits) / ((~bits + 1) & bits);
}
void show_registers(struct pt_regs *regs)
{
char *mode;
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p",
mode, (void *) regs->psw.mask,
(void *) regs->psw.addr);
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
#ifdef CONFIG_64BIT
printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
#endif
printk("\n%s GPRS: " FOURLONG, mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" " FOURLONG,
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
printk(" " FOURLONG,
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
printk(" " FOURLONG,
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
void show_regs(struct pt_regs *regs)
{
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, current,
(void *) current->thread.ksp);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs))
show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_last_breaking_event(regs);
}
static DEFINE_SPINLOCK(die_lock);
void die(struct pt_regs *regs, const char *str)
{
static int die_counter;
oops_enter();
lgr_info_log();
debug_stop_all();
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
do_exit(SIGSEGV);
}

View File

@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
#define BASED(name) name-system_call(%r13)
@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: l %r15,\stack # load target stack
2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro ADD64 high,low,timer
@ -150,7 +151,7 @@ ENTRY(__switch_to)
l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
lr %r15,%r5
ahi %r15,STACK_SIZE # end of kernel stack of next
ahi %r15,STACK_INIT # end of kernel stack of next
st %r3,__LC_CURRENT # store task struct of next
st %r5,__LC_THREAD_INFO # store thread info of next
st %r15,__LC_KERNEL_STACK # store end of kernel stack
@ -178,7 +179,6 @@ sysc_stm:
l %r13,__LC_SVC_NEW_PSW+4
sysc_per:
l %r15,__LC_KERNEL_STACK
ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@ -188,6 +188,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+3(%r12),_TIF_SYSCALL
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0
jnz sysc_nr_ok
@ -198,7 +199,6 @@ sysc_do_svc:
lr %r8,%r1
sla %r8,2
sysc_nr_ok:
l %r10,BASED(.Lsys_call_table) # 31 bit system call table
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
l %r15,__LC_KERNEL_STACK
2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
stm %r8,%r9,__PT_PSW(%r11)
@ -485,7 +485,6 @@ io_work:
#
io_work_user:
l %r1,__LC_KERNEL_STACK
ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@ -646,7 +645,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@ -674,6 +672,7 @@ mcck_panic:
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
j mcck_skip
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
@ -714,12 +713,10 @@ ENTRY(restart_int_handler)
*/
stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack
ahi %r15,-__PT_SIZE # create pt_regs
stm %r0,%r7,__PT_R0(%r15)
stm %r8,%r9,__PT_PSW(%r15)
la %r11,STACK_FRAME_OVERHEAD(%r15)
stm %r0,%r7,__PT_R0(%r11)
stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(32,%r11),0(%r14)
lr %r15,%r11
ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,BASED(1f)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
lr %r2,%r11 # pass pointer to pt_regs
@ -799,15 +796,14 @@ cleanup_system_call:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
# set up saved register 11
l %r15,__LC_KERNEL_STACK
ahi %r15,-__PT_SIZE
st %r15,12(%r11) # r11 pt_regs pointer
la %r9,STACK_FRAME_OVERHEAD(%r15)
st %r9,12(%r11) # r11 pt_regs pointer
# fill pt_regs
mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
stm %r0,%r7,__PT_R0(%r15)
mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
stm %r0,%r7,__PT_R0(%r9)
mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register 15
ahi %r15,-STACK_FRAME_OVERHEAD
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@ -910,7 +906,6 @@ cleanup_idle_wait:
.Ltrace_enter: .long do_syscall_trace_enter
.Ltrace_exit: .long do_syscall_trace_exit
.Lschedule_tail: .long schedule_tail
.Lsys_call_table: .long sys_call_table
.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
.Lhardirqs_on: .long trace_hardirqs_on_caller

View File

@ -7,6 +7,7 @@
#include <asm/cputime.h>
extern void *restart_stack;
extern unsigned long suspend_zero_pages;
void system_call(void);
void pgm_check_handler(void);

View File

@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP )
@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lg %r15,\stack # load target stack
2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME scratch,enter_timer
@ -177,7 +178,7 @@ ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
lgr %r15,%r5
aghi %r15,STACK_SIZE # end of kernel stack of next
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@ -203,10 +204,8 @@ sysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
sysc_per:
lg %r15,__LC_KERNEL_STACK
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@ -217,6 +216,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+7(%r12),_TIF_SYSCALL
lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok
@ -227,13 +227,6 @@ sysc_do_svc:
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
sysc_nr_ok:
larl %r10,sys_call_table # 64 bit system call table
#ifdef CONFIG_COMPAT
tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
jno sysc_noemu
larl %r10,sys_call_table_emu # 31 bit system call table
sysc_noemu:
#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r14
@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@ -526,7 +519,6 @@ io_work:
#
io_work_user:
lg %r1,__LC_KERNEL_STACK
aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@ -688,7 +680,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@ -755,14 +746,12 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
lg %r11,__LC_PANIC_STACK # change to panic stack
aghi %r11,-__PT_SIZE # create pt_regs
lg %r15,__LC_PANIC_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
lgr %r15,%r11
aghi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
@ -846,15 +835,14 @@ cleanup_system_call:
mvc __TI_last_break(8,%r12),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
aghi %r15,-__PT_SIZE
stg %r15,24(%r11) # r11 pt_regs pointer
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
stmg %r0,%r7,__PT_R0(%r15)
mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
stmg %r0,%r7,__PT_R0(%r9)
mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,sysc_do_svc
@ -1011,6 +999,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
.globl sys_call_table_emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL

View File

@ -13,6 +13,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
#include <linux/suspend.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
@ -67,6 +68,35 @@ void setup_regs(void)
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
/*
* PM notifier callback for kdump
*/
static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
void *ptr)
{
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
if (crashk_res.start)
crash_map_reserved_pages();
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
if (crashk_res.start)
crash_unmap_reserved_pages();
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int __init machine_kdump_pm_init(void)
{
pm_notifier(machine_kdump_pm_cb, 0);
return 0;
}
arch_initcall(machine_kdump_pm_init);
#endif
/*

View File

@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;

View File

@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
lc->async_stack = pcpu->async_stack + ASYNC_SIZE
- STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
- STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->cpu_nr = cpu;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
struct _lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->user_timer = ti->user_timer;
@ -810,8 +813,10 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);

View File

@ -41,6 +41,7 @@ struct page_key_data {
static struct page_key_data *page_key_data;
static struct page_key_data *page_key_rp, *page_key_wp;
static unsigned long page_key_rx, page_key_wx;
unsigned long suspend_zero_pages;
/*
* For each page in the hibernation image one additional byte is
@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
}
/*
* PM notifier callback for suspend
*/
static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
void *ptr)
{
switch (action) {
case PM_SUSPEND_PREPARE:
case PM_HIBERNATION_PREPARE:
suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
if (!suspend_zero_pages)
return NOTIFY_BAD;
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
free_pages(suspend_zero_pages, LC_ORDER);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int __init suspend_pm_init(void)
{
pm_notifier(suspend_pm_cb, 0);
return 0;
}
arch_initcall(suspend_pm_init);
void save_processor_state(void)
{
/* swsusp_arch_suspend() actually saves all cpu register contents.

View File

@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
/* Save prefix register contents for lowcore */
llgf %r4,__SF_EMPTY(%r15)
/* Save prefix register contents for lowcore copy */
llgf %r10,__SF_EMPTY(%r15)
/* Get pointer to save area */
lghi %r1,0x1000
@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
/* Save absolute zero pages */
larl %r2,suspend_zero_pages
lg %r2,0(%r2)
lghi %r4,0
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
jo 1b
/* Copy lowcore to absolute zero lowcore */
lghi %r2,0
lgr %r4,%r10
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
@ -248,8 +259,20 @@ restore_registers:
/* Load old stack */
lg %r15,0x2f8(%r13)
/* Save prefix register */
mvc __SF_EMPTY(4,%r15),0x318(%r13)
/* Restore absolute zero pages */
lghi %r2,0
larl %r4,suspend_zero_pages
lg %r4,0(%r4)
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
jo 1b
/* Restore prefix register */
spx 0x318(%r13)
spx __SF_EMPTY(%r15)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04

View File

@ -12,49 +12,16 @@
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/bug.h>
#include <linux/utsname.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/atomic.h>
#include <asm/mathemu.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include "entry.h"
int show_unhandled_signals = 1;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
#ifndef CONFIG_64BIT
#define LONG "%08lx "
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
static int kstack_depth_to_print = 12;
#else /* CONFIG_64BIT */
#define LONG "%016lx "
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
static int kstack_depth_to_print = 20;
#endif /* CONFIG_64BIT */
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
#endif
}
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stack and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
static unsigned long
__show_trace(unsigned long sp, unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (1) {
sp = sp & PSW_ADDR_INSN;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain & PSW_ADDR_INSN;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
low = sp;
sp = regs->gprs[15];
}
}
static void show_trace(struct task_struct *task, unsigned long *stack)
{
register unsigned long __r15 asm ("15");
unsigned long sp;
sp = (unsigned long) stack;
if (!sp)
sp = task ? task->thread.ksp : __r15;
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
S390_lowcore.panic_stack);
#endif
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
if (task)
__show_trace(sp, (unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
else
__show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
if (!task)
task = current;
debug_show_held_locks(task);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
register unsigned long * __r15 asm ("15");
unsigned long *stack;
int i;
if (!sp)
stack = task ? (unsigned long *) task->thread.ksp : __r15;
else
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if ((i * sizeof(long) % 32) == 0)
printk("%s ", i == 0 ? "" : "\n");
printk(LONG, *stack++);
}
printk("\n");
show_trace(task, sp);
}
static void show_last_breaking_event(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
#endif
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, current,
(void *) current->thread.ksp);
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
{
return (regs->psw.mask & bits) / ((~bits + 1) & bits);
}
void show_registers(struct pt_regs *regs)
{
char *mode;
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p",
mode, (void *) regs->psw.mask,
(void *) regs->psw.addr);
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
#ifdef CONFIG_64BIT
printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
#endif
printk("\n%s GPRS: " FOURLONG, mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" " FOURLONG,
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
printk(" " FOURLONG,
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
printk(" " FOURLONG,
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
void show_regs(struct pt_regs *regs)
{
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, current,
(void *) current->thread.ksp);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs))
show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_last_breaking_event(regs);
}
static DEFINE_SPINLOCK(die_lock);
void die(struct pt_regs *regs, const char *str)
{
static int die_counter;
oops_enter();
lgr_info_log();
debug_stop_all();
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
do_exit(SIGSEGV);
}
static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)

View File

@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
__entry->instruction,
insn_to_mnemonic((unsigned char *)
&__entry->instruction,
__entry->insn) ?
__entry->insn, sizeof(__entry->insn)) ?
"unknown" : __entry->insn)
);

View File

@ -458,12 +458,10 @@ static int __init cmm_init(void)
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
if (rc)
goto out_kthread;
return 0;
if (!IS_ERR(cmm_thread_ptr))
return 0;
out_kthread:
rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);

View File

@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
int fault;
trans_exc_code = regs->int_parm_long;
/* Protection exception is suppressing, decrement psw address. */
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Protection exceptions are suppressing, decrement psw address.
* The exception to this rule are aborted transactions, for these
* the PSW already points to the correct location.
*/
if (!(regs->int_code & 0x200))
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code

View File

@ -63,10 +63,18 @@ static unsigned long __init setup_zero_pages(void)
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
default:
case 0x2817: /* z196 */
case 0x2818: /* z196 */
order = 2;
break;
case 0x2827: /* zEC12 */
default:
order = 5;
break;
}
/* Limit number of empty zero pages for small memory sizes */
if (order > 2 && totalram_pages <= 16384)
order = 2;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)

View File

@ -9,31 +9,25 @@
#include <asm/pgtable.h>
#include <asm/page.h>
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
: [addr] "+a" (addr) : [skey] "d" (skey));
return addr;
}
void storage_key_init_range(unsigned long start, unsigned long end)
{
unsigned long boundary, function, size;
unsigned long boundary, size;
while (start < end) {
if (MACHINE_HAS_EDAT2) {
/* set storage keys for a 2GB frame */
function = 0x22000 | PAGE_DEFAULT_KEY;
size = 1UL << 31;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = pfmf(function, start);
} while (start < boundary);
continue;
}
}
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
start = pfmf(function, start);
start = sske_frame(start, PAGE_DEFAULT_KEY);
} while (start < boundary);
continue;
}

View File

@ -379,78 +379,186 @@ out_unmap:
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
/*
* this function is assumed to be called with mmap_sem held
*/
unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
{
unsigned long *table, vmaddr, segment;
struct mm_struct *mm;
struct gmap_pgtable *mp;
struct gmap_rmap *rmap;
struct vm_area_struct *vma;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unsigned long *table;
current->thread.gmap_addr = address;
mm = gmap->mm;
/* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
return -EFAULT;
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
return -EFAULT;
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
return -EFAULT;
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
return table;
}
/**
* __gmap_translate - translate a guest address to a user space address
* @address: guest address
* @gmap: pointer to guest mapping meta data structure
*
* Returns user space address which corresponds to the guest address or
* -EFAULT if no such mapping exists.
* This function does not establish potentially missing page table entries.
* The mmap_sem of the mm that belongs to the address space must be held
* when this function gets called.
*/
unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
{
unsigned long *segment_ptr, vmaddr, segment;
struct gmap_pgtable *mp;
struct page *page;
current->thread.gmap_addr = address;
segment_ptr = gmap_table_walk(address, gmap);
if (IS_ERR(segment_ptr))
return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
segment = *table;
if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
vma = find_vma(mm, vmaddr);
if (!vma || vma->vm_start > vmaddr)
return -EFAULT;
/* Walk the parent mm page table */
pgd = pgd_offset(mm, vmaddr);
pud = pud_alloc(mm, pgd, vmaddr);
if (!pud)
return -ENOMEM;
pmd = pmd_alloc(mm, pud, vmaddr);
if (!pmd)
return -ENOMEM;
if (!pmd_present(*pmd) &&
__pte_alloc(mm, vma, pmd, vmaddr))
return -ENOMEM;
/* pmd now points to a valid segment table entry. */
rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
if (!rmap)
return -ENOMEM;
/* Link gmap segment table entry location to page table. */
page = pmd_page(*pmd);
mp = (struct gmap_pgtable *) page->index;
rmap->entry = table;
spin_lock(&mm->page_table_lock);
list_add(&rmap->list, &mp->mapper);
spin_unlock(&mm->page_table_lock);
/* Set gmap segment table entry to page table. */
*table = pmd_val(*pmd) & PAGE_MASK;
return vmaddr | (address & ~PMD_MASK);
}
return -EFAULT;
}
EXPORT_SYMBOL_GPL(__gmap_translate);
/**
* gmap_translate - translate a guest address to a user space address
* @address: guest address
* @gmap: pointer to guest mapping meta data structure
*
* Returns user space address which corresponds to the guest address or
* -EFAULT if no such mapping exists.
* This function does not establish potentially missing page table entries.
*/
unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
{
unsigned long rc;
down_read(&gmap->mm->mmap_sem);
rc = __gmap_translate(address, gmap);
up_read(&gmap->mm->mmap_sem);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_translate);
static int gmap_connect_pgtable(unsigned long segment,
unsigned long *segment_ptr,
struct gmap *gmap)
{
unsigned long vmaddr;
struct vm_area_struct *vma;
struct gmap_pgtable *mp;
struct gmap_rmap *rmap;
struct mm_struct *mm;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
mm = gmap->mm;
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
vma = find_vma(mm, vmaddr);
if (!vma || vma->vm_start > vmaddr)
return -EFAULT;
/* Walk the parent mm page table */
pgd = pgd_offset(mm, vmaddr);
pud = pud_alloc(mm, pgd, vmaddr);
if (!pud)
return -ENOMEM;
pmd = pmd_alloc(mm, pud, vmaddr);
if (!pmd)
return -ENOMEM;
if (!pmd_present(*pmd) &&
__pte_alloc(mm, vma, pmd, vmaddr))
return -ENOMEM;
/* pmd now points to a valid segment table entry. */
rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
if (!rmap)
return -ENOMEM;
/* Link gmap segment table entry location to page table. */
page = pmd_page(*pmd);
mp = (struct gmap_pgtable *) page->index;
rmap->entry = segment_ptr;
spin_lock(&mm->page_table_lock);
if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper);
/* Set gmap segment table entry to page table. */
*segment_ptr = pmd_val(*pmd) & PAGE_MASK;
rmap = NULL;
}
spin_unlock(&mm->page_table_lock);
kfree(rmap);
return 0;
}
static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
{
struct gmap_rmap *rmap, *next;
struct gmap_pgtable *mp;
struct page *page;
int flush;
flush = 0;
spin_lock(&mm->page_table_lock);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
*rmap->entry =
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
list_del(&rmap->list);
kfree(rmap);
flush = 1;
}
spin_unlock(&mm->page_table_lock);
if (flush)
__tlb_flush_global();
}
/*
* this function is assumed to be called with mmap_sem held
*/
unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
{
unsigned long *segment_ptr, segment;
struct gmap_pgtable *mp;
struct page *page;
int rc;
current->thread.gmap_addr = address;
segment_ptr = gmap_table_walk(address, gmap);
if (IS_ERR(segment_ptr))
return -EFAULT;
/* Convert the gmap address to an mm address. */
while (1) {
segment = *segment_ptr;
if (!(segment & _SEGMENT_ENTRY_INV)) {
/* Page table is present */
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
}
if (!(segment & _SEGMENT_ENTRY_RO))
/* Nothing mapped in the gmap address space. */
break;
rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
if (rc)
return rc;
}
return -EFAULT;
}
unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
{
@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_discard);
void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
{
struct gmap_rmap *rmap, *next;
struct gmap_pgtable *mp;
struct page *page;
int flush;
flush = 0;
spin_lock(&mm->page_table_lock);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
*rmap->entry =
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
list_del(&rmap->list);
kfree(rmap);
flush = 1;
}
spin_unlock(&mm->page_table_lock);
if (flush)
__tlb_flush_global();
}
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
{
}
static inline void gmap_unmap_notifier(struct mm_struct *mm,
unsigned long *table)
static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
unsigned long *table)
{
}
@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
unsigned int bit, mask;
if (mm_has_pgste(mm)) {
gmap_unmap_notifier(mm, table);
gmap_disconnect_pgtable(mm, table);
return page_table_free_pgste(table);
}
/* Free 1K/2K page table fragment of a 4K page */
@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
mm = tlb->mm;
if (mm_has_pgste(mm)) {
gmap_unmap_notifier(mm, table);
gmap_disconnect_pgtable(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;

View File

@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
memset(addrs, 0, fp->len * sizeof(*addrs));
memset(&jit, 0, sizeof(cjit));
memset(&cjit, 0, sizeof(cjit));

View File

@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem.
#
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
pci_sysfs.o pci_event.o pci_debug.o
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o

View File

@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
static struct kmem_cache *zdev_irq_cache;
static struct kmem_cache *zdev_fmb_cache;
debug_info_t *pci_debug_msg_id;
debug_info_t *pci_debug_err_id;
static inline int irq_to_msi_nr(unsigned int irq)
{
return irq & ZPCI_MSI_MASK;
@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
fib->aisb = (u64) bucket->aisb + aisb / 8;
fib->aisbo = aisb & ZPCI_MSI_MASK;
rc = mpcifc_instr(req, fib);
rc = s390pci_mod_fc(req, fib);
pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
free_page((unsigned long) fib);
@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
fib->iota = args->iota;
fib->fmb_addr = args->fmb_addr;
rc = mpcifc_instr(req, fib);
rc = s390pci_mod_fc(req, fib);
free_page((unsigned long) fib);
return rc;
}
@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
if (zdev->fmb)
return -EINVAL;
zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
if (!zdev->fmb)
return -ENOMEM;
memset(zdev->fmb, 0, sizeof(*zdev->fmb));
WARN_ON((u64) zdev->fmb & 0xf);
args.fmb_addr = virt_to_phys(zdev->fmb);
@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
rc = pcilg_instr(&data, req, offset);
data = data << ((8 - len) * 8);
data = le64_to_cpu(data);
if (!rc)
rc = s390pci_load(&data, req, offset);
if (!rc) {
data = data << ((8 - len) * 8);
data = le64_to_cpu(data);
*val = (u32) data;
else
} else
*val = 0xffffffff;
return rc;
}
@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
rc = pcistg_instr(data, req, offset);
rc = s390pci_store(data, req, offset);
return rc;
}
@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
int ret;
if (!zdev || devfn != ZPCI_DEVFN)
return 0;
return zpci_cfg_load(zdev, where, val, size);
ret = -ENODEV;
else
ret = zpci_cfg_load(zdev, where, val, size);
return ret;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
int ret;
if (!zdev || devfn != ZPCI_DEVFN)
return 0;
return zpci_cfg_store(zdev, where, val, size);
ret = -ENODEV;
else
ret = zpci_cfg_store(zdev, where, val, size);
return ret;
}
static struct pci_ops pci_root_ops = {
@ -474,7 +478,7 @@ scan:
}
/* enable interrupts again */
sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
/* check again to not lose initiative */
rmb();
@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
};
static void zpci_unmap_resources(struct pci_dev *pdev)
{
resource_size_t len;
int i;
for (i = 0; i < PCI_BAR_COUNT; i++) {
len = pci_resource_len(pdev, i);
if (!len)
continue;
pci_iounmap(pdev, (void *) pdev->resource[i].start);
}
};
struct zpci_dev *zpci_alloc_device(void)
{
struct zpci_dev *zdev;
@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
kfree(zdev);
}
/* Called on removal of pci_dev, leaves zpci and bus device */
static void zpci_remove_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
zdev->state = ZPCI_FN_STATE_CONFIGURED;
zpci_dma_exit_device(zdev);
zpci_fmb_disable_device(zdev);
zpci_sysfs_remove_device(&pdev->dev);
zpci_unmap_resources(pdev);
list_del(&zdev->entry); /* can be called from init */
zdev->pdev = NULL;
}
static void zpci_scan_devices(void)
{
struct zpci_dev *zdev;
mutex_lock(&zpci_list_lock);
list_for_each_entry(zdev, &zpci_list, entry)
if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
zpci_scan_device(zdev);
mutex_unlock(&zpci_list_lock);
}
/*
* Too late for any s390 specific setup, since interrupts must be set up
* already which requires DMA setup too and the pci scan will access the
@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
return 0;
}
void pcibios_disable_device(struct pci_dev *pdev)
{
zpci_remove_device(pdev);
pdev->sysdata = NULL;
}
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
spin_lock_init(&bucket->lock);
/* set summary to 1 to be called every time for the ISC */
*zpci_irq_si = 1;
sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
return 0;
out_ai:
@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
spin_unlock(&zpci_iomap_lock);
}
static int zpci_create_device_bus(struct zpci_dev *zdev)
int pcibios_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
zdev->pdev = pdev;
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
zpci_map_resources(zdev);
return 0;
}
static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
LIST_HEAD(resources);
@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
pci_add_resource(&resources, res);
}
zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
zdev, &resources);
zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
zdev, &resources);
if (!zdev->bus)
return -EIO;
@ -959,6 +930,13 @@ out:
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
int zpci_disable_device(struct zpci_dev *zdev)
{
zpci_dma_exit_device(zdev);
return clp_disable_fh(zdev);
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out;
rc = zpci_create_device_bus(zdev);
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
rc = zpci_enable_device(zdev);
if (rc)
goto out_free;
zdev->state = ZPCI_FN_STATE_ONLINE;
}
rc = zpci_scan_bus(zdev);
if (rc)
goto out_bus;
goto out_disable;
mutex_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
hotplug_ops->create_slot(zdev);
mutex_unlock(&zpci_list_lock);
if (zdev->state == ZPCI_FN_STATE_STANDBY)
return 0;
rc = zpci_enable_device(zdev);
if (rc)
goto out_start;
return 0;
out_start:
mutex_lock(&zpci_list_lock);
list_del(&zdev->entry);
if (hotplug_ops)
hotplug_ops->remove_slot(zdev);
mutex_unlock(&zpci_list_lock);
out_bus:
out_disable:
if (zdev->state == ZPCI_FN_STATE_ONLINE)
zpci_disable_device(zdev);
out_free:
zpci_free_domain(zdev);
out:
return rc;
@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
goto out;
}
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
zpci_map_resources(zdev);
pci_bus_add_devices(zdev->bus);
/* now that pdev was added to the bus mark it as used */
zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
out:
zpci_dma_exit_device(zdev);
clp_disable_fh(zdev);
@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
}
EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
unsigned int s390_pci_probe = 1;
unsigned int s390_pci_probe;
EXPORT_SYMBOL_GPL(s390_pci_probe);
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
s390_pci_probe = 0;
if (!strcmp(str, "on")) {
s390_pci_probe = 1;
return NULL;
}
return str;
@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
zpci_scan_devices();
return 0;
out_find:

View File

@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
/*
@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
struct zpci_dev *zdev;
int rc;
zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
zdev = zpci_alloc_device();
if (IS_ERR(zdev))
return PTR_ERR(zdev);
@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
pr_err("Set PCI FN failed with response: %x cc: %d\n",
rrb->response.hdr.rsp, rc);
zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
rrb->response.hdr.rsp);
rc = -EIO;
}
clp_free_block(rrb);
@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
if (!rc)
/* Success -> store enabled handle in zdev */
zdev->fh = fh;
zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
if (!rc)
/* Success -> store disabled handle in zdev */
zdev->fh = fh;
else
dev_err(&zdev->pdev->dev,
"Failed to disable fn handle: 0x%x\n", fh);
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}

View File

@ -11,12 +11,17 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <asm/debug.h>
#include <asm/pci_dma.h>
static struct dentry *debugfs_root;
debug_info_t *pci_debug_msg_id;
EXPORT_SYMBOL_GPL(pci_debug_msg_id);
debug_info_t *pci_debug_err_id;
EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
debug_set_level(pci_debug_msg_id, 3);
zpci_dbg("Debug view initialized\n");
/* error log */
pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
debug_set_level(pci_debug_err_id, 6);
zpci_err("Debug view initialized\n");
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;

View File

@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
* needs to be redone!
*/
goto no_refresh;
rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE);
rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
WARN_ON_ONCE(offset > PAGE_SIZE);
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
return dma_addr + offset;
return dma_addr + (offset & ~PAGE_MASK);
}
out_free:

202
arch/s390/pci/pci_insn.c Normal file
View File

@ -0,0 +1,202 @@
/*
* s390 specific pci instructions
*
* Copyright IBM Corp. 2013
*/
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <asm/pci_insn.h>
#include <asm/processor.h>
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
/* Modify PCI Function Controls */
static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
{
u8 cc;
asm volatile (
" .insn rxy,0xe300000000d0,%[req],%[fib]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
: : "cc");
*status = req >> 24 & 0xff;
return cc;
}
int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
{
u8 cc, status;
do {
cc = __mpcifc(req, fib, &status);
if (cc == 2)
msleep(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
__func__, cc, status);
return (cc) ? -EIO : 0;
}
/* Refresh PCI Translations */
static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
{
register u64 __addr asm("2") = addr;
register u64 __range asm("3") = range;
u8 cc;
asm volatile (
" .insn rre,0xb9d30000,%[fn],%[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=d" (cc), [fn] "+d" (fn)
: [addr] "d" (__addr), "d" (__range)
: "cc");
*status = fn >> 24 & 0xff;
return cc;
}
int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
{
u8 cc, status;
do {
cc = __rpcit(fn, addr, range, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
__func__, cc, status, addr, range);
return (cc) ? -EIO : 0;
}
/* Set Interruption Controls */
void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
}
/* PCI Load */
static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
int cc = -ENXIO;
u64 __data;
asm volatile (
" .insn rre,0xb9d20000,%[data],%[req]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
: "d" (__offset)
: "cc");
*status = __req >> 24 & 0xff;
if (!cc)
*data = __data;
return cc;
}
int s390pci_load(u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
do {
cc = __pcilg(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(s390pci_load);
/* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
{
register u64 __req asm("2") = req;
register u64 __offset asm("3") = offset;
int cc = -ENXIO;
asm volatile (
" .insn rre,0xb9d00000,%[data],%[req]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [req] "+d" (__req)
: "d" (__offset), [data] "d" (data)
: "cc");
*status = __req >> 24 & 0xff;
return cc;
}
int s390pci_store(u64 data, u64 req, u64 offset)
{
u8 status;
int cc;
do {
cc = __pcistg(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(s390pci_store);
/* PCI Store Block */
static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
{
int cc = -ENXIO;
asm volatile (
" .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+d" (cc), [req] "+d" (req)
: [offset] "d" (offset), [data] "Q" (*data)
: "cc");
*status = req >> 24 & 0xff;
return cc;
}
int s390pci_store_block(const u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
do {
cc = __pcistb(data, req, offset, &status);
if (cc == 2)
udelay(ZPCI_INSN_BUSY_DELAY);
} while (cc == 2);
if (cc)
printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
__func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
EXPORT_SYMBOL_GPL(s390pci_store_block);

View File

@ -18,8 +18,9 @@
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
static unsigned int msihash_shift = 6;
#define msi_hashfn(nr) hash_long(nr, msihash_shift)
static const unsigned int msi_hash_bits = 8;
#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
static DEFINE_SPINLOCK(msi_map_lock);
@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
INIT_HLIST_NODE(&map->msi_chain);
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
{
unsigned int i;
msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
for (i = 0; i < (1U << msihash_shift); i++)
for (i = 0; i < MSI_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}

View File

@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#include <asm/pci_debug.h>
#include <asm/sclp.h>
#define SLOT_NAME_SIZE 10
@ -49,6 +50,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return -EIO;
rc = sclp_pci_configure(slot->zdev->fid);
zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, rc);
if (!rc) {
slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
/* automatically scan the device after is was configured */
@ -66,16 +68,16 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
if (!zpci_fn_configured(slot->zdev->state))
return -EIO;
rc = zpci_disable_device(slot->zdev);
if (rc)
return rc;
/* TODO: we rely on the user to unbind/remove the device, is that plausible
* or do we need to trigger that here?
*/
rc = sclp_pci_deconfigure(slot->zdev->fid);
if (!rc) {
/* Fixme: better call List-PCI to find the disabled FH
for the FID since the FH should be opaque... */
slot->zdev->fh &= 0x7fffffff;
zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, rc);
if (!rc)
slot->zdev->state = ZPCI_FN_STATE_STANDBY;
}
return rc;
}

View File

@ -246,7 +246,7 @@ static struct dentry *dasd_debugfs_setup(const char *name,
static int dasd_state_known_to_basic(struct dasd_device *device)
{
struct dasd_block *block = device->block;
int rc;
int rc = 0;
/* Allocate and register gendisk structure. */
if (block) {
@ -273,7 +273,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
device->state = DASD_STATE_BASIC;
return 0;
return rc;
}
/*
@ -282,6 +283,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
static int dasd_state_basic_to_known(struct dasd_device *device)
{
int rc;
if (device->block) {
dasd_profile_exit(&device->block->profile);
if (device->block->debugfs_dentry)
@ -332,8 +334,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
if (block->base->discipline->do_analysis != NULL)
rc = block->base->discipline->do_analysis(block);
if (rc) {
if (rc != -EAGAIN)
if (rc != -EAGAIN) {
device->state = DASD_STATE_UNFMT;
goto out;
}
return rc;
}
dasd_setup_queue(block);
@ -341,11 +345,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
block->blocks << block->s2b_shift);
device->state = DASD_STATE_READY;
rc = dasd_scan_partitions(block);
if (rc)
if (rc) {
device->state = DASD_STATE_BASIC;
return rc;
}
} else {
device->state = DASD_STATE_READY;
}
out:
if (device->discipline->basic_to_ready)
rc = device->discipline->basic_to_ready(device);
return rc;
}
@ -368,6 +377,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
{
int rc;
if (device->discipline->ready_to_basic) {
rc = device->discipline->ready_to_basic(device);
if (rc)
return rc;
}
device->state = DASD_STATE_BASIC;
if (device->block) {
struct dasd_block *block = device->block;
@ -402,16 +416,10 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device)
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
int rc;
struct gendisk *disk;
struct disk_part_iter piter;
struct hd_struct *part;
if (device->discipline->ready_to_online) {
rc = device->discipline->ready_to_online(device);
if (rc)
return rc;
}
device->state = DASD_STATE_ONLINE;
if (device->block) {
dasd_schedule_block_bh(device->block);
@ -444,6 +452,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
if (rc)
return rc;
}
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
disk = device->block->bdev->bd_disk;
@ -2223,6 +2232,77 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
return rc;
}
static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
{
struct dasd_ccw_req *cqr;
list_for_each_entry(cqr, ccw_queue, blocklist) {
if (cqr->callback_data != DASD_SLEEPON_END_TAG)
return 0;
}
return 1;
}
static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
struct dasd_device *device;
int rc;
struct dasd_ccw_req *cqr, *n;
retry:
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
device = cqr->startdev;
if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
continue;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
continue;
}
/*Non-temporary stop condition will trigger fail fast*/
if (device->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
!dasd_eer_enabled(device)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EAGAIN;
continue;
}
/*Don't try to start requests if device is stopped*/
if (interruptible) {
rc = wait_event_interruptible(
generic_waitq, !device->stopped);
if (rc == -ERESTARTSYS) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, !(device->stopped));
if (!cqr->callback)
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
}
wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
if (__dasd_sleep_on_erp(cqr))
rc = 1;
}
if (rc)
goto retry;
return 0;
}
/*
* Queue a request to the tail of the device ccw_queue and wait for
* it's completion.
@ -2232,6 +2312,15 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
return _dasd_sleep_on(cqr, 0);
}
/*
* Start requests from a ccw_queue and wait for their completion.
*/
int dasd_sleep_on_queue(struct list_head *ccw_queue)
{
return _dasd_sleep_on_queue(ccw_queue, 0);
}
EXPORT_SYMBOL(dasd_sleep_on_queue);
/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
@ -2662,6 +2751,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
wake_up(&dasd_flush_wq);
}
/*
* Requeue a request back to the block request queue
* only works for block requests
*/
static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
struct dasd_block *block = cqr->block;
struct request *req;
unsigned long flags;
if (!block)
return -EINVAL;
spin_lock_irqsave(&block->queue_lock, flags);
req = (struct request *) cqr->callback_data;
blk_requeue_request(block->request_queue, req);
spin_unlock_irqrestore(&block->queue_lock, flags);
return 0;
}
/*
* Go through all request on the dasd_block request queue, cancel them
* on the respective dasd_device, and return them to the generic
@ -3380,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
int dasd_generic_pm_freeze(struct ccw_device *cdev)
{
struct dasd_ccw_req *cqr, *n;
int rc;
struct list_head freeze_queue;
struct dasd_device *device = dasd_device_from_cdev(cdev);
struct list_head freeze_queue;
struct dasd_ccw_req *cqr, *n;
struct dasd_ccw_req *refers;
int rc;
if (IS_ERR(device))
return PTR_ERR(device);
@ -3396,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
/* disallow new I/O */
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
/* clear active requests */
/* clear active requests and requeue them to block layer if possible */
INIT_LIST_HEAD(&freeze_queue);
spin_lock_irq(get_ccwdev_lock(cdev));
rc = 0;
@ -3416,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
}
list_move_tail(&cqr->devlist, &freeze_queue);
}
spin_unlock_irq(get_ccwdev_lock(cdev));
list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
@ -3424,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
(cqr->status != DASD_CQR_CLEAR_PENDING));
if (cqr->status == DASD_CQR_CLEARED)
cqr->status = DASD_CQR_QUEUED;
}
/* move freeze_queue to start of the ccw_queue */
spin_lock_irq(get_ccwdev_lock(cdev));
list_splice_tail(&freeze_queue, &device->ccw_queue);
spin_unlock_irq(get_ccwdev_lock(cdev));
/* requeue requests to blocklayer will only work for
block device requests */
if (_dasd_requeue_request(cqr))
continue;
/* remove requests from device and block queue */
list_del_init(&cqr->devlist);
while (cqr->refers != NULL) {
refers = cqr->refers;
/* remove the request from the block queue */
list_del(&cqr->blocklist);
/* free the finished erp request */
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
if (cqr->block)
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
}
/*
* if requests remain then they are internal request
* and go back to the device queue
*/
if (!list_empty(&freeze_queue)) {
/* move freeze_queue to start of the ccw_queue */
spin_lock_irq(get_ccwdev_lock(cdev));
list_splice_tail(&freeze_queue, &device->ccw_queue);
spin_unlock_irq(get_ccwdev_lock(cdev));
}
dasd_put_device(device);
return rc;
}

View File

@ -410,8 +410,7 @@ dasd_add_busid(const char *bus_id, int features)
struct dasd_devmap *devmap, *new, *tmp;
int hash;
new = (struct dasd_devmap *)
kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
spin_lock(&dasd_devmap_lock);

View File

@ -2022,7 +2022,7 @@ static int dasd_eckd_do_analysis(struct dasd_block *block)
return dasd_eckd_end_analysis(block);
}
static int dasd_eckd_ready_to_online(struct dasd_device *device)
static int dasd_eckd_basic_to_ready(struct dasd_device *device)
{
return dasd_alias_add_device(device);
};
@ -2031,6 +2031,11 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
cancel_work_sync(&device->reload_device);
cancel_work_sync(&device->kick_validate);
return 0;
};
static int dasd_eckd_ready_to_basic(struct dasd_device *device)
{
return dasd_alias_remove_device(device);
};
@ -2050,45 +2055,34 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
}
static struct dasd_ccw_req *
dasd_eckd_format_device(struct dasd_device * device,
struct format_data_t * fdata)
dasd_eckd_build_format(struct dasd_device *base,
struct format_data_t *fdata)
{
struct dasd_eckd_private *private;
struct dasd_eckd_private *base_priv;
struct dasd_eckd_private *start_priv;
struct dasd_device *startdev;
struct dasd_ccw_req *fcp;
struct eckd_count *ect;
struct ch_t address;
struct ccw1 *ccw;
void *data;
int rpt;
struct ch_t address;
int cplength, datasize;
int i;
int i, j;
int intensity = 0;
int r0_perm;
int nr_tracks;
private = (struct dasd_eckd_private *) device->private;
rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
set_ch_t(&address,
fdata->start_unit / private->rdc_data.trk_per_cyl,
fdata->start_unit % private->rdc_data.trk_per_cyl);
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
/* Sanity checks. */
if (fdata->start_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&device->cdev->dev, "Start track number %d used in "
"formatting is too big\n", fdata->start_unit);
return ERR_PTR(-EINVAL);
}
if (fdata->start_unit > fdata->stop_unit) {
dev_warn(&device->cdev->dev, "Start track %d used in "
"formatting exceeds end track\n", fdata->start_unit);
return ERR_PTR(-EINVAL);
}
if (dasd_check_blocksize(fdata->blksize) != 0) {
dev_warn(&device->cdev->dev,
"The DASD cannot be formatted with block size %d\n",
fdata->blksize);
return ERR_PTR(-EINVAL);
}
start_priv = (struct dasd_eckd_private *) startdev->private;
base_priv = (struct dasd_eckd_private *) base->private;
rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
/*
* fdata->intensity is a bit string that tells us what to do:
@ -2106,149 +2100,282 @@ dasd_eckd_format_device(struct dasd_device * device,
r0_perm = 1;
intensity = fdata->intensity;
}
switch (intensity) {
case 0x00: /* Normal format */
case 0x08: /* Normal format, use cdl. */
cplength = 2 + rpt;
datasize = sizeof(struct DE_eckd_data) +
cplength = 2 + (rpt*nr_tracks);
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
rpt * sizeof(struct eckd_count);
rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x01: /* Write record zero and format track. */
case 0x09: /* Write record zero and format track, use cdl. */
cplength = 3 + rpt;
datasize = sizeof(struct DE_eckd_data) +
cplength = 2 + rpt * nr_tracks;
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count) +
rpt * sizeof(struct eckd_count);
rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x04: /* Invalidate track. */
case 0x0c: /* Invalidate track, use cdl. */
cplength = 3;
datasize = sizeof(struct DE_eckd_data) +
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count);
break;
default:
dev_warn(&device->cdev->dev, "An I/O control call used "
"incorrect flags 0x%x\n", fdata->intensity);
dev_warn(&startdev->cdev->dev,
"An I/O control call used incorrect flags 0x%x\n",
fdata->intensity);
return ERR_PTR(-EINVAL);
}
/* Allocate the format ccw request. */
fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev);
if (IS_ERR(fcp))
return fcp;
start_priv->count++;
data = fcp->data;
ccw = fcp->cpaddr;
switch (intensity & ~0x08) {
case 0x00: /* Normal format. */
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->start_unit,
DASD_ECKD_CCW_WRITE_CKD, device);
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, base, startdev);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct DE_eckd_data *)data)->ga_extended |= 0x04;
data += sizeof(struct DE_eckd_data);
((struct PFX_eckd_data *)data)
->define_extent.ga_extended |= 0x04;
data += sizeof(struct PFX_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt,
DASD_ECKD_CCW_WRITE_CKD, device,
fdata->start_unit, 0, rpt*nr_tracks,
DASD_ECKD_CCW_WRITE_CKD, base,
fdata->blksize);
data += sizeof(struct LO_eckd_data);
break;
case 0x01: /* Write record zero + format track. */
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->start_unit,
DASD_ECKD_CCW_WRITE_RECORD_ZERO,
device);
data += sizeof(struct DE_eckd_data);
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_RECORD_ZERO,
base, startdev);
data += sizeof(struct PFX_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt + 1,
DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
device->block->bp_block);
fdata->start_unit, 0, rpt * nr_tracks + 1,
DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
base->block->bp_block);
data += sizeof(struct LO_eckd_data);
break;
case 0x04: /* Invalidate track. */
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->start_unit,
DASD_ECKD_CCW_WRITE_CKD, device);
data += sizeof(struct DE_eckd_data);
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, base, startdev);
data += sizeof(struct PFX_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, 1,
DASD_ECKD_CCW_WRITE_CKD, device, 8);
DASD_ECKD_CCW_WRITE_CKD, base, 8);
data += sizeof(struct LO_eckd_data);
break;
}
if (intensity & 0x01) { /* write record zero */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 0;
ect->kl = 0;
ect->dl = 8;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 1;
ect->kl = 0;
ect->dl = 0;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
for (j = 0; j < nr_tracks; j++) {
/* calculate cylinder and head for the current track */
set_ch_t(&address,
(fdata->start_unit + j) /
base_priv->rdc_data.trk_per_cyl,
(fdata->start_unit + j) %
base_priv->rdc_data.trk_per_cyl);
if (intensity & 0x01) { /* write record zero */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = i + 1;
ect->record = 0;
ect->kl = 0;
ect->dl = fdata->blksize;
/* Check for special tracks 0-1 when formatting CDL */
if ((intensity & 0x08) &&
fdata->start_unit == 0) {
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
}
}
if ((intensity & 0x08) &&
fdata->start_unit == 1) {
ect->kl = 44;
ect->dl = LABEL_SIZE - 44;
}
ect->dl = 8;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 1;
ect->kl = 0;
ect->dl = 0;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = i + 1;
ect->kl = 0;
ect->dl = fdata->blksize;
/*
* Check for special tracks 0-1
* when formatting CDL
*/
if ((intensity & 0x08) &&
fdata->start_unit == 0) {
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
}
}
if ((intensity & 0x08) &&
fdata->start_unit == 1) {
ect->kl = 44;
ect->dl = LABEL_SIZE - 44;
}
ccw[-1].flags |= CCW_FLAG_CC;
if (i != 0 || j == 0)
ccw->cmd_code =
DASD_ECKD_CCW_WRITE_CKD;
else
ccw->cmd_code =
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
ccw++;
}
}
}
fcp->startdev = device;
fcp->memdev = device;
fcp->startdev = startdev;
fcp->memdev = startdev;
fcp->retries = 256;
fcp->expires = startdev->default_expires * HZ;
fcp->buildclk = get_tod_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
}
static int
dasd_eckd_format_device(struct dasd_device *base,
struct format_data_t *fdata)
{
struct dasd_ccw_req *cqr, *n;
struct dasd_block *block;
struct dasd_eckd_private *private;
struct list_head format_queue;
struct dasd_device *device;
int old_stop, format_step;
int step, rc = 0;
block = base->block;
private = (struct dasd_eckd_private *) base->private;
/* Sanity checks. */
if (fdata->start_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
"Start track number %u used in formatting is too big\n",
fdata->start_unit);
return -EINVAL;
}
if (fdata->stop_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
"Stop track number %u used in formatting is too big\n",
fdata->stop_unit);
return -EINVAL;
}
if (fdata->start_unit > fdata->stop_unit) {
dev_warn(&base->cdev->dev,
"Start track %u used in formatting exceeds end track\n",
fdata->start_unit);
return -EINVAL;
}
if (dasd_check_blocksize(fdata->blksize) != 0) {
dev_warn(&base->cdev->dev,
"The DASD cannot be formatted with block size %u\n",
fdata->blksize);
return -EINVAL;
}
INIT_LIST_HEAD(&format_queue);
old_stop = fdata->stop_unit;
while (fdata->start_unit <= 1) {
fdata->stop_unit = fdata->start_unit;
cqr = dasd_eckd_build_format(base, fdata);
list_add(&cqr->blocklist, &format_queue);
fdata->stop_unit = old_stop;
fdata->start_unit++;
if (fdata->start_unit > fdata->stop_unit)
goto sleep;
}
retry:
format_step = 255 / recs_per_track(&private->rdc_data, 0,
fdata->blksize);
while (fdata->start_unit <= old_stop) {
step = fdata->stop_unit - fdata->start_unit + 1;
if (step > format_step)
fdata->stop_unit = fdata->start_unit + format_step - 1;
cqr = dasd_eckd_build_format(base, fdata);
if (IS_ERR(cqr)) {
if (PTR_ERR(cqr) == -ENOMEM) {
/*
* not enough memory available
* go to out and start requests
* retry after first requests were finished
*/
fdata->stop_unit = old_stop;
goto sleep;
} else
return PTR_ERR(cqr);
}
list_add(&cqr->blocklist, &format_queue);
fdata->start_unit = fdata->stop_unit + 1;
fdata->stop_unit = old_stop;
}
sleep:
dasd_sleep_on_queue(&format_queue);
list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
device = cqr->startdev;
private = (struct dasd_eckd_private *) device->private;
if (cqr->status == DASD_CQR_FAILED)
rc = -EIO;
list_del_init(&cqr->blocklist);
dasd_sfree_request(cqr, device);
private->count--;
}
/*
* in case of ENOMEM we need to retry after
* first requests are finished
*/
if (fdata->start_unit <= fdata->stop_unit)
goto retry;
return rc;
}
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
{
cqr->status = DASD_CQR_FILLED;
@ -4305,8 +4432,9 @@ static struct dasd_discipline dasd_eckd_discipline = {
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
.verify_path = dasd_eckd_verify_path,
.ready_to_online = dasd_eckd_ready_to_online,
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.ready_to_basic = dasd_eckd_ready_to_basic,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,

View File

@ -300,10 +300,11 @@ struct dasd_discipline {
* Last things to do when a device is set online, and first things
* when it is set offline.
*/
int (*ready_to_online) (struct dasd_device *);
int (*basic_to_ready) (struct dasd_device *);
int (*online_to_ready) (struct dasd_device *);
int (*ready_to_basic) (struct dasd_device *);
/*
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
@ -317,8 +318,8 @@ struct dasd_discipline {
int (*start_IO) (struct dasd_ccw_req *);
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
struct dasd_ccw_req *(*format_device) (struct dasd_device *,
struct format_data_t *);
int (*format_device) (struct dasd_device *,
struct format_data_t *);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
@ -672,6 +673,7 @@ int dasd_term_IO(struct dasd_ccw_req *);
void dasd_schedule_device_bh(struct dasd_device *);
void dasd_schedule_block_bh(struct dasd_block *);
int dasd_sleep_on(struct dasd_ccw_req *);
int dasd_sleep_on_queue(struct list_head *);
int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
void dasd_device_set_timer(struct dasd_device *, int);

View File

@ -143,12 +143,12 @@ static int dasd_ioctl_resume(struct dasd_block *block)
/*
* performs formatting of _device_ according to _fdata_
* Note: The discipline's format_function is assumed to deliver formatting
* commands to format a single unit of the device. In terms of the ECKD
* devices this means CCWs are generated to format a single track.
* commands to format multiple units of the device. In terms of the ECKD
* devices this means CCWs are generated to format multiple tracks.
*/
static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
static int
dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
struct dasd_ccw_req *cqr;
struct dasd_device *base;
int rc;
@ -157,8 +157,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
pr_warning("%s: The DASD cannot be formatted while it is "
"enabled\n", dev_name(&base->cdev->dev));
pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
dev_name(&base->cdev->dev));
return -EBUSY;
}
@ -178,21 +178,10 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
bdput(bdev);
}
while (fdata->start_unit <= fdata->stop_unit) {
cqr = base->discipline->format_device(base, fdata);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
rc = dasd_sleep_on_interruptible(cqr);
dasd_sfree_request(cqr, cqr->memdev);
if (rc) {
if (rc != -ERESTARTSYS)
pr_err("%s: Formatting unit %d failed with "
"rc=%d\n", dev_name(&base->cdev->dev),
fdata->start_unit, rc);
return rc;
}
fdata->start_unit++;
}
rc = base->discipline->format_device(base, fdata);
if (rc)
return rc;
return 0;
}

View File

@ -465,7 +465,7 @@ static int __init scm_blk_init(void)
scm_major = ret;
ret = scm_alloc_rqs(nr_requests);
if (ret)
goto out_unreg;
goto out_free;
scm_debug = debug_register("scm_log", 16, 1, 16);
if (!scm_debug) {
@ -486,7 +486,6 @@ out_dbf:
debug_unregister(scm_debug);
out_free:
scm_free_rqs();
out_unreg:
unregister_blkdev(scm_major, "scm");
out:
return ret;

View File

@ -223,6 +223,8 @@ void scm_cluster_request_irq(struct scm_request *scmrq)
bool scm_cluster_size_valid(void)
{
return write_cluster_size == 0 || write_cluster_size == 32 ||
write_cluster_size == 64 || write_cluster_size == 128;
if (write_cluster_size == 1 || write_cluster_size > 128)
return false;
return !(write_cluster_size & (write_cluster_size - 1));
}

View File

@ -502,7 +502,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
#ifdef CONFIG_TN3215_CONSOLE
wait_cons_dev();
ccw_device_wait_idle(raw->cdev);
#endif
/* Enough room freed up ? */
if (RAW3215_BUFFER_SIZE - raw->count >= length)
@ -858,7 +858,7 @@ static void con3215_flush(void)
raw = raw3215[0]; /* console 3215 is the first one */
if (raw->port.flags & ASYNC_SUSPENDED)
/* The console is still frozen for suspend. */
if (ccw_device_force_console())
if (ccw_device_force_console(raw->cdev))
/* Forcing didn't work, no panic message .. */
return;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);

View File

@ -174,8 +174,7 @@ static void mon_free_mem(struct mon_private *monpriv)
int i;
for (i = 0; i < MON_MSGLIM; i++)
if (monpriv->msg_array[i])
kfree(monpriv->msg_array[i]);
kfree(monpriv->msg_array[i]);
kfree(monpriv);
}

View File

@ -796,7 +796,7 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
do {
__raw3270_reset_device(rp);
while (!raw3270_state_final(rp)) {
wait_cons_dev();
ccw_device_wait_idle(rp->cdev);
barrier();
}
} while (rp->state != RAW3270_STATE_READY);
@ -810,7 +810,7 @@ raw3270_wait_cons_dev(struct raw3270 *rp)
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
wait_cons_dev();
ccw_device_wait_idle(rp->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
@ -1274,7 +1274,7 @@ void raw3270_pm_unfreeze(struct raw3270_view *view)
rp = view->dev;
if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
ccw_device_force_console();
ccw_device_force_console(rp->cdev);
#endif
}

View File

@ -561,6 +561,8 @@ static void __init sclp_add_standby_memory(void)
add_memory_merged(0);
}
#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
@ -573,7 +575,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
new_incr->rn = rn;
new_incr->standby = standby;
if (!standby)
new_incr->usecount = 1;
new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {

View File

@ -127,7 +127,7 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs, buf,
PAGE_SIZE))
count - offs))
return -EFAULT;
} else
memcpy(dest + offs, buf, count - offs);

View File

@ -376,6 +376,26 @@ static void chp_release(struct device *dev)
kfree(cp);
}
/**
* chp_update_desc - update channel-path description
* @chp - channel-path
*
* Update the channel-path description of the specified channel-path.
* Return zero on success, non-zero otherwise.
*/
int chp_update_desc(struct channel_path *chp)
{
int rc;
rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
if (rc)
return rc;
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
return rc;
}
/**
* chp_new - register a new channel-path
* @chpid - channel-path ID
@ -403,7 +423,7 @@ int chp_new(struct chp_id chpid)
mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {

View File

@ -44,6 +44,7 @@ struct channel_path {
struct mutex lock; /* Serialize access to below members. */
int state;
struct channel_path_desc desc;
struct channel_path_desc_fmt1 desc_fmt1;
/* Channel-measurement related stuff: */
int cmg;
int shared;
@ -62,6 +63,7 @@ int chp_is_registered(struct chp_id chpid);
void *chp_get_chp_desc(struct chp_id chpid);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
int chp_update_desc(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);

View File

@ -376,7 +376,7 @@ static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
continue;
}
mutex_lock(&chp->lock);
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
chp_update_desc(chp);
mutex_unlock(&chp->lock);
}
}
@ -631,8 +631,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
* Redo PathVerification on the devices the chpid connects to
*/
if (on) {
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
/* Try to update the channel path description. */
chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &chpid);
} else
@ -825,9 +825,10 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
{
struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
unsigned long flags;
int ret;
spin_lock_irq(&chsc_page_lock);
spin_lock_irqsave(&chsc_page_lock, flags);
scpd_area = chsc_page;
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
if (ret)
@ -835,7 +836,7 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
chsc_resp = (void *)&scpd_area->response;
memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
spin_unlock_irq(&chsc_page_lock);
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}

View File

@ -471,15 +471,6 @@ int cio_disable_subchannel(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
int cio_create_sch_lock(struct subchannel *sch)
{
sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!sch->lock)
return -ENOMEM;
spin_lock_init(sch->lock);
return 0;
}
static int cio_check_devno_blacklisted(struct subchannel *sch)
{
if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
@ -536,32 +527,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
sprintf(dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT(4, dbf_txt);
/* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel));
sch->schid = schid;
if (cio_is_console(schid)) {
sch->lock = cio_get_console_lock();
} else {
err = cio_create_sch_lock(sch);
if (err)
goto out;
}
mutex_init(&sch->reg_mutex);
/*
* The first subchannel that is not-operational (ccode==3)
* indicates that there aren't any more devices available.
* indicates that there aren't any more devices available.
* If stsch gets an exception, it means the current subchannel set
* is not valid.
* is not valid.
*/
ccode = stsch_err (schid, &sch->schib);
ccode = stsch_err(schid, &sch->schib);
if (ccode) {
err = (ccode == 3) ? -ENXIO : ccode;
goto out;
}
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
sch->schid = schid;
switch (sch->st) {
case SUBCHANNEL_TYPE_IO:
@ -578,11 +556,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
return 0;
out:
if (!cio_is_console(schid))
kfree(sch->lock);
sch->lock = NULL;
return err;
}
@ -650,15 +624,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
}
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel;
static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
static struct subchannel *console_sch;
/*
* Use cio_tsch to update the subchannel status and call the interrupt handler
* if status had been pending. Called with the console_subchannel lock.
* if status had been pending. Called with the subchannel's lock held.
*/
static void cio_tsch(struct subchannel *sch)
void cio_tsch(struct subchannel *sch)
{
struct irb *irb;
int irq_context;
@ -675,6 +647,7 @@ static void cio_tsch(struct subchannel *sch)
local_bh_disable();
irq_enter();
}
kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
@ -685,135 +658,90 @@ static void cio_tsch(struct subchannel *sch)
}
}
void *cio_get_console_priv(void)
static int cio_test_for_console(struct subchannel_id schid, void *data)
{
return &console_priv;
}
struct schib schib;
/*
* busy wait for the next interrupt on the console
*/
void wait_cons_dev(void)
{
if (!console_subchannel_in_use)
return;
while (1) {
cio_tsch(&console_subchannel);
if (console_subchannel.schib.scsw.cmd.actl == 0)
break;
udelay_simple(100);
}
}
static int
cio_test_for_console(struct subchannel_id schid, void *data)
{
if (stsch_err(schid, &console_subchannel.schib) != 0)
if (stsch_err(schid, &schib) != 0)
return -ENXIO;
if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
console_subchannel.schib.pmcw.dnv &&
(console_subchannel.schib.pmcw.dev == console_devno)) {
if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
(schib.pmcw.dev == console_devno)) {
console_irq = schid.sch_no;
return 1; /* found */
}
return 0;
}
static int
cio_get_console_sch_no(void)
static int cio_get_console_sch_no(void)
{
struct subchannel_id schid;
struct schib schib;
init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
if (stsch_err(schid, &console_subchannel.schib) != 0 ||
(console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
!console_subchannel.schib.pmcw.dnv)
if (stsch_err(schid, &schib) != 0 ||
(schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
return -1;
console_devno = console_subchannel.schib.pmcw.dev;
console_devno = schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
if (console_irq == -1)
return -1;
} else {
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized.
* With some luck, the HWC console can take over */
return -1;
}
return console_irq;
}
struct subchannel *
cio_probe_console(void)
struct subchannel *cio_probe_console(void)
{
int sch_no, ret;
struct subchannel_id schid;
struct subchannel *sch;
int sch_no, ret;
if (xchg(&console_subchannel_in_use, 1) != 0)
return ERR_PTR(-EBUSY);
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
console_subchannel_in_use = 0;
pr_warning("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
memset(&console_subchannel, 0, sizeof(struct subchannel));
init_subchannel_id(&schid);
schid.sch_no = sch_no;
ret = cio_validate_subchannel(&console_subchannel, schid);
if (ret) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
}
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return sch;
/*
* enable console I/O-interrupt subclass
*/
isc_register(CONSOLE_ISC);
console_subchannel.config.isc = CONSOLE_ISC;
console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel;
ret = cio_commit_config(&console_subchannel);
sch->config.isc = CONSOLE_ISC;
sch->config.intparm = (u32)(addr_t)sch;
ret = cio_commit_config(sch);
if (ret) {
isc_unregister(CONSOLE_ISC);
console_subchannel_in_use = 0;
put_device(&sch->dev);
return ERR_PTR(ret);
}
return &console_subchannel;
console_sch = sch;
return sch;
}
void
cio_release_console(void)
int cio_is_console(struct subchannel_id schid)
{
console_subchannel.config.intparm = 0;
cio_commit_config(&console_subchannel);
isc_unregister(CONSOLE_ISC);
console_subchannel_in_use = 0;
}
/* Bah... hack to catch console special sausages. */
int
cio_is_console(struct subchannel_id schid)
{
if (!console_subchannel_in_use)
if (!console_sch)
return 0;
return schid_equal(&schid, &console_subchannel.schid);
return schid_equal(&schid, &console_sch->schid);
}
struct subchannel *
cio_get_console_subchannel(void)
void cio_register_early_subchannels(void)
{
if (!console_subchannel_in_use)
return NULL;
return &console_subchannel;
}
int ret;
if (!console_sch)
return;
ret = css_register_subchannel(console_sch);
if (ret)
put_device(&console_sch->dev);
}
#endif /* CONFIG_CCW_CONSOLE */
#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{

View File

@ -121,23 +121,18 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(u8 isc);
void do_IRQ(struct pt_regs *);
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
extern void *cio_get_console_priv(void);
extern void cio_register_early_subchannels(void);
extern void cio_tsch(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL
#define cio_get_console_priv() NULL
static inline void cio_register_early_subchannels(void) {}
#endif
#endif

View File

@ -137,37 +137,53 @@ out:
static void css_sch_todo(struct work_struct *work);
static struct subchannel *
css_alloc_subchannel(struct subchannel_id schid)
static int css_sch_create_locks(struct subchannel *sch)
{
sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
if (!sch->lock)
return -ENOMEM;
spin_lock_init(sch->lock);
mutex_init(&sch->reg_mutex);
return 0;
}
static void css_subchannel_release(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
if (!sch)
return ERR_PTR(-ENOMEM);
ret = cio_validate_subchannel (sch, schid);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
ret = cio_validate_subchannel(sch, schid);
if (ret < 0)
goto err;
ret = css_sch_create_locks(sch);
if (ret)
goto err;
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
device_initialize(&sch->dev);
return sch;
}
static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
err:
kfree(sch);
return ERR_PTR(ret);
}
static int css_sch_device_register(struct subchannel *sch)
@ -177,7 +193,7 @@ static int css_sch_device_register(struct subchannel *sch)
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
ret = device_register(&sch->dev);
ret = device_add(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
@ -228,16 +244,11 @@ void css_update_ssd_info(struct subchannel *sch)
{
int ret;
if (cio_is_console(sch->schid)) {
/* Console is initialized too early for functions requiring
* memory allocation. */
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
} else {
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
ssd_register_chpids(&sch->ssd_info);
}
ssd_register_chpids(&sch->ssd_info);
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@ -275,14 +286,13 @@ static const struct attribute_group *default_subch_attr_groups[] = {
NULL,
};
static int css_register_subchannel(struct subchannel *sch)
int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
@ -314,23 +324,19 @@ static int css_register_subchannel(struct subchannel *sch)
return ret;
}
int css_probe_device(struct subchannel_id schid)
static int css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
int ret;
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
if (cio_is_console(schid))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
}
ret = css_register_subchannel(sch);
if (ret) {
if (!cio_is_console(schid))
put_device(&sch->dev);
}
if (ret)
put_device(&sch->dev);
return ret;
}
@ -770,7 +776,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = cio_create_sch_lock(css->pseudo_subchannel);
ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
@ -870,8 +876,7 @@ static struct notifier_block css_power_notifier = {
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
* static console subchannel).
* The struct subchannel's are created during probing.
*/
static int __init css_bus_init(void)
{
@ -1050,6 +1055,8 @@ int css_complete_work(void)
*/
static int __init channel_subsystem_init_sync(void)
{
/* Register subchannels which are already in use. */
cio_register_early_subchannels();
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
css_complete_work();
@ -1065,9 +1072,8 @@ void channel_subsystem_reinit(void)
chsc_enable_facility(CHSC_SDA_OC_MSS);
chp_id_for_each(&chpid) {
chp = chpid_to_chp(chpid);
if (!chp)
continue;
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
if (chp)
chp_update_desc(chp);
}
}

View File

@ -101,7 +101,8 @@ extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
extern int css_probe_device(struct subchannel_id);
extern int css_register_subchannel(struct subchannel *);
extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
extern int max_ssid;
@ -109,7 +110,6 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {

View File

@ -19,6 +19,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/kernel_stat.h>
@ -43,6 +44,10 @@ static DEFINE_SPINLOCK(recovery_lock);
static int recovery_phase;
static const unsigned long recovery_delay[] = { 3, 30, 300 };
static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
static struct bus_type ccw_bus_type;
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
@ -127,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
return ret;
}
static struct bus_type ccw_bus_type;
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static int io_subchannel_remove(struct subchannel *);
@ -137,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(unsigned long data);
wait_queue_head_t ccw_device_init_wq;
atomic_t ccw_device_init_count;
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@ -191,10 +192,7 @@ int __init io_subchannel_init(void)
{
int ret;
init_waitqueue_head(&ccw_device_init_wq);
atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
ret = bus_register(&ccw_bus_type);
if (ret)
return ret;
@ -1086,19 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch)
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev = sch_get_cdev(sch);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
cdev->private->flags.initialized = 1;
ccw_device_register(cdev);
/*
* Check if the device is already online. If it is
* the reference count needs to be corrected since we
* didn't obtain a reference in ccw_device_set_online.
*/
if (cdev->private->state != DEV_STATE_NOT_OPER &&
cdev->private->state != DEV_STATE_OFFLINE &&
cdev->private->state != DEV_STATE_BOXED)
get_device(&cdev->dev);
rc = ccw_device_register(cdev);
if (rc) {
/* Release online reference. */
put_device(&cdev->dev);
goto out_schedule;
}
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
return 0;
}
io_subchannel_init_fields(sch);
@ -1580,88 +1573,102 @@ out:
}
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
static struct ccw_device_private console_private;
static int console_cdev_in_use;
static DEFINE_SPINLOCK(ccw_console_lock);
spinlock_t * cio_get_console_lock(void)
{
return &ccw_console_lock;
}
static int ccw_device_console_enable(struct ccw_device *cdev,
struct subchannel *sch)
{
struct io_subchannel_private *io_priv = cio_get_console_priv();
int rc;
/* Attach subchannel private data. */
memset(io_priv, 0, sizeof(*io_priv));
set_io_private(sch, io_priv);
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
sch_set_cdev(sch, cdev);
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
wait_cons_dev();
rc = -EIO;
if (cdev->private->state != DEV_STATE_OFFLINE)
ccw_device_wait_idle(cdev);
/* Hold on to an extra reference while device is online. */
get_device(&cdev->dev);
rc = ccw_device_online(cdev);
if (rc)
goto out_unlock;
ccw_device_online(cdev);
while (!dev_fsm_final_state(cdev))
wait_cons_dev();
if (cdev->private->state != DEV_STATE_ONLINE)
goto out_unlock;
rc = 0;
ccw_device_wait_idle(cdev);
if (cdev->private->state == DEV_STATE_ONLINE)
cdev->online = 1;
else
rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
if (rc) /* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return rc;
}
struct ccw_device *
ccw_device_probe_console(void)
struct ccw_device *ccw_device_probe_console(void)
{
struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
if (xchg(&console_cdev_in_use, 1) != 0)
return ERR_PTR(-EBUSY);
sch = cio_probe_console();
if (IS_ERR(sch)) {
console_cdev_in_use = 0;
return (void *) sch;
if (IS_ERR(sch))
return ERR_CAST(sch);
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
if (!io_priv) {
put_device(&sch->dev);
return ERR_PTR(-ENOMEM);
}
memset(&console_cdev, 0, sizeof(struct ccw_device));
memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private;
console_private.cdev = &console_cdev;
console_private.int_class = IRQIO_CIO;
ret = ccw_device_console_enable(&console_cdev, sch);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
put_device(&sch->dev);
kfree(io_priv);
return cdev;
}
set_io_private(sch, io_priv);
ret = ccw_device_console_enable(cdev, sch);
if (ret) {
cio_release_console();
console_cdev_in_use = 0;
set_io_private(sch, NULL);
put_device(&sch->dev);
put_device(&cdev->dev);
kfree(io_priv);
return ERR_PTR(ret);
}
console_cdev.online = 1;
return &console_cdev;
return cdev;
}
/**
* ccw_device_wait_idle() - busy wait for device to become idle
* @cdev: ccw device
*
* Poll until activity control is zero, that is, no function or data
* transfer is pending/active.
* Called with device lock being held.
*/
void ccw_device_wait_idle(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
while (1) {
cio_tsch(sch);
if (sch->schib.scsw.cmd.actl == 0)
break;
udelay_simple(100);
}
}
static int ccw_device_pm_restore(struct device *dev);
int ccw_device_force_console(void)
int ccw_device_force_console(struct ccw_device *cdev)
{
if (!console_cdev_in_use)
return -ENODEV;
return ccw_device_pm_restore(&console_cdev.dev);
return ccw_device_pm_restore(&cdev->dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
#endif

View File

@ -81,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED);
}
extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);

View File

@ -704,9 +704,9 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct channel_path_desc_fmt1 desc;
struct channel_path *chp;
struct chp_id chpid;
int mdc = 0, ret, i;
int mdc = 0, i;
/* Adjust requested path mask to excluded varied off paths. */
if (mask)
@ -719,14 +719,20 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
if (!(mask & (0x80 >> i)))
continue;
chpid.id = sch->schib.pmcw.chpid[i];
ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
if (ret)
return ret;
if (!desc.f)
chp = chpid_to_chp(chpid);
if (!chp)
continue;
mutex_lock(&chp->lock);
if (!chp->desc_fmt1.f) {
mutex_unlock(&chp->lock);
return 0;
if (!desc.r)
}
if (!chp->desc_fmt1.r)
mdc = 1;
mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
chp->desc_fmt1.mdc;
mutex_unlock(&chp->lock);
}
return mdc;

View File

@ -17,7 +17,7 @@ struct idset {
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)