Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (71 commits)
  [S390] sclp_tty: Fix scheduling while atomic bug.
  [S390] sclp_tty: remove ioctl interface.
  [S390] Remove P390 support.
  [S390] Cleanup vmcp printk messages.
  [S390] Cleanup lcs printk messages.
  [S390] Cleanup kprobes printk messages.
  [S390] Cleanup vmwatch printk messages.
  [S390] Cleanup dcssblk printk messages.
  [S390] Cleanup zfcp dumper printk messages.
  [S390] Cleanup vmlogrdr printk messages.
  [S390] Cleanup s390 debug feature print messages.
  [S390] Cleanup monreader printk messages.
  [S390] Cleanup appldata printk messages.
  [S390] Cleanup smsgiucv printk messages.
  [S390] Cleanup cpacf printk messages.
  [S390] Cleanup qeth print messages.
  [S390] Cleanup netiucv printk messages.
  [S390] Cleanup iucv printk messages.
  [S390] Cleanup sclp printk messages.
  [S390] Cleanup zcrypt printk messages.
  ...
This commit is contained in:
Linus Torvalds 2008-07-14 13:25:01 -07:00
commit b7f80afa28
133 changed files with 6867 additions and 3087 deletions

View File

@ -0,0 +1,35 @@
What: /sys/bus/css/devices/.../type
Date: March 2008
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the subchannel type, as reported by the hardware.
This attribute is present for all subchannel types.
What: /sys/bus/css/devices/.../modalias
Date: March 2008
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the module alias as reported with uevents.
It is of the format css:t<type> and present for all
subchannel types.
What: /sys/bus/css/drivers/io_subchannel/.../chpids
Date: December 2002
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the ids of the channel paths used by this
subchannel, as reported by the channel subsystem
during subchannel recognition.
Note: This is an I/O-subchannel specific attribute.
Users: s390-tools, HAL
What: /sys/bus/css/drivers/io_subchannel/.../pimpampom
Date: December 2002
Contact: Cornelia Huck <cornelia.huck@de.ibm.com>
linux-s390@vger.kernel.org
Description: Contains the PIM/PAM/POM values, as reported by the
channel subsystem when last queried by the common I/O
layer (this implies that this attribute is not neccessarily
in sync with the values current in the channel subsystem).
Note: This is an I/O-subchannel specific attribute.
Users: s390-tools, HAL

View File

@ -117,6 +117,7 @@ Code Seq# Include File Comments
<mailto:natalia@nikhefk.nikhef.nl>
'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict!
'c' 80-9F asm-s390/chsc.h
'd' 00-FF linux/char/drm/drm/h conflict!
'd' 00-DF linux/video_decoder.h conflict!
'd' F0-FF linux/digi1.h

View File

@ -146,6 +146,7 @@ config MATHEMU
config COMPAT
bool "Kernel support for 31 bit emulation"
depends on 64BIT
select COMPAT_BINFMT_ELF
help
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
@ -312,6 +313,10 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool y
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
depends on SPARSEMEM
source "mm/Kconfig"
comment "I/O subsystem configuration"
@ -344,6 +349,22 @@ config QDIO_DEBUG
If unsure, say N.
config CHSC_SCH
tristate "Support for CHSC subchannels"
help
This driver allows usage of CHSC subchannels. A CHSC subchannel
is usually present on LPAR only.
The driver creates a device /dev/chsc, which may be used to
obtain I/O configuration information about the machine and
to issue asynchronous chsc commands (DANGEROUS).
You will usually only want to use this interface on a special
LPAR designated for system management.
To compile this driver as a module, choose M here: the
module will be called chsc_sch.
If unsure, say N.
comment "Misc"
config IPL

View File

@ -3,13 +3,11 @@
*
* Definitions and interface for Linux - z/VM Monitor Stream.
*
* Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* Copyright IBM Corp. 2003, 2008
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
//#define APPLDATA_DEBUG /* Debug messages on/off */
#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */
/* data buffer */
#define APPLDATA_MAX_PROCS 100
@ -32,12 +30,6 @@
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
#ifdef APPLDATA_DEBUG
#define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x)
#else
#define P_DEBUG(x...) do {} while (0)
#endif
struct appldata_ops {
struct list_head list;
struct ctl_table_header *sysctl_header;

View File

@ -5,7 +5,7 @@
* Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules.
*
* Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
* Copyright IBM Corp. 2003, 2008
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
@ -108,9 +108,6 @@ static LIST_HEAD(appldata_ops_list);
*/
static void appldata_timer_function(unsigned long data)
{
P_DEBUG(" -= Timer =-\n");
P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
atomic_read(&appldata_expire_count));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
queue_work(appldata_wq, (struct work_struct *) data);
@ -128,14 +125,11 @@ static void appldata_work_fn(struct work_struct *work)
struct appldata_ops *ops;
int i;
P_DEBUG(" -= Work Queue =-\n");
i = 0;
get_online_cpus();
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
++i, ops->active, ops->name);
if (ops->active == 1) {
ops->callback(ops->data);
}
@ -212,7 +206,6 @@ __appldata_vtimer_setup(int cmd)
0, 1);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
break;
case APPLDATA_DEL_TIMER:
for_each_online_cpu(i)
@ -221,7 +214,6 @@ __appldata_vtimer_setup(int cmd)
break;
appldata_timer_active = 0;
atomic_set(&appldata_expire_count, num_online_cpus());
P_INFO("Monitoring timer stopped.\n");
break;
case APPLDATA_MOD_TIMER:
per_cpu_interval = (u64) (appldata_interval*1000 /
@ -313,10 +305,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
}
interval = 0;
sscanf(buf, "%i", &interval);
if (interval <= 0) {
P_ERROR("Timer CPU interval has to be > 0!\n");
if (interval <= 0)
return -EINVAL;
}
get_online_cpus();
spin_lock(&appldata_timer_lock);
@ -324,9 +314,6 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
put_online_cpus();
P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
interval);
out:
*lenp = len;
*ppos += len;
@ -406,23 +393,16 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
P_ERROR("START DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc);
module_put(ops->owner);
} else {
P_INFO("Monitoring %s data enabled, "
"DIAG 0xDC started.\n", ops->name);
} else
ops->active = 1;
}
} else if ((buf[0] == '0') && (ops->active == 1)) {
ops->active = 0;
rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
(unsigned long) ops->data, ops->size,
ops->mod_lvl);
if (rc != 0) {
if (rc != 0)
P_ERROR("STOP DIAG 0xDC for %s failed, "
"return code: %d\n", ops->name, rc);
} else {
P_INFO("Monitoring %s data disabled, "
"DIAG 0xDC stopped.\n", ops->name);
}
module_put(ops->owner);
}
spin_unlock(&appldata_ops_lock);
@ -468,7 +448,6 @@ int appldata_register_ops(struct appldata_ops *ops)
ops->sysctl_header = register_sysctl_table(ops->ctl_table);
if (!ops->sysctl_header)
goto out;
P_INFO("%s-ops registered!\n", ops->name);
return 0;
out:
spin_lock(&appldata_ops_lock);
@ -490,7 +469,6 @@ void appldata_unregister_ops(struct appldata_ops *ops)
spin_unlock(&appldata_ops_lock);
unregister_sysctl_table(ops->sysctl_header);
kfree(ops->ctl_table);
P_INFO("%s-ops unregistered!\n", ops->name);
}
/********************** module-ops management <END> **************************/
@ -553,14 +531,9 @@ static int __init appldata_init(void)
{
int i;
P_DEBUG("sizeof(parameter_list) = %lu\n",
sizeof(struct appldata_parameter_list));
appldata_wq = create_singlethread_workqueue("appldata");
if (!appldata_wq) {
P_ERROR("Could not create work queue\n");
if (!appldata_wq)
return -ENOMEM;
}
get_online_cpus();
for_each_online_cpu(i)
@ -571,8 +544,6 @@ static int __init appldata_init(void)
register_hotcpu_notifier(&appldata_nb);
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
P_DEBUG("Base interface initialized.\n");
return 0;
}
@ -584,7 +555,9 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
EXPORT_SYMBOL_GPL(appldata_unregister_ops);
EXPORT_SYMBOL_GPL(appldata_diag);
#ifdef CONFIG_SWAP
EXPORT_SYMBOL_GPL(si_swapinfo);
#endif
EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(nr_running);
EXPORT_SYMBOL_GPL(nr_iowait);

View File

@ -14,14 +14,13 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/io.h>
#include "appldata.h"
#define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
/*
@ -70,30 +69,6 @@ static struct appldata_mem_data {
} __attribute__((packed)) appldata_mem_data;
static inline void appldata_debug_print(struct appldata_mem_data *mem_data)
{
P_DEBUG("--- MEM - RECORD ---\n");
P_DEBUG("pgpgin = %8lu KB\n", mem_data->pgpgin);
P_DEBUG("pgpgout = %8lu KB\n", mem_data->pgpgout);
P_DEBUG("pswpin = %8lu Pages\n", mem_data->pswpin);
P_DEBUG("pswpout = %8lu Pages\n", mem_data->pswpout);
P_DEBUG("pgalloc = %8lu \n", mem_data->pgalloc);
P_DEBUG("pgfault = %8lu \n", mem_data->pgfault);
P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault);
P_DEBUG("sharedram = %8lu KB\n", mem_data->sharedram);
P_DEBUG("totalram = %8lu KB\n", mem_data->totalram);
P_DEBUG("freeram = %8lu KB\n", mem_data->freeram);
P_DEBUG("totalhigh = %8lu KB\n", mem_data->totalhigh);
P_DEBUG("freehigh = %8lu KB\n", mem_data->freehigh);
P_DEBUG("bufferram = %8lu KB\n", mem_data->bufferram);
P_DEBUG("cached = %8lu KB\n", mem_data->cached);
P_DEBUG("totalswap = %8lu KB\n", mem_data->totalswap);
P_DEBUG("freeswap = %8lu KB\n", mem_data->freeswap);
P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1);
P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2);
P_DEBUG("timestamp = %lX\n", mem_data->timestamp);
}
/*
* appldata_get_mem_data()
*
@ -140,9 +115,6 @@ static void appldata_get_mem_data(void *data)
mem_data->timestamp = get_clock();
mem_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
appldata_debug_print(mem_data);
#endif
}
@ -164,17 +136,7 @@ static struct appldata_ops ops = {
*/
static int __init appldata_mem_init(void)
{
int rc;
P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data));
rc = appldata_register_ops(&ops);
if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc);
} else {
P_DEBUG("%s-ops registered!\n", ops.name);
}
return rc;
return appldata_register_ops(&ops);
}
/*
@ -185,7 +147,6 @@ static int __init appldata_mem_init(void)
static void __exit appldata_mem_exit(void)
{
appldata_unregister_ops(&ops);
P_DEBUG("%s-ops unregistered!\n", ops.name);
}

View File

@ -21,9 +21,6 @@
#include "appldata.h"
#define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */
/*
* Network data
*
@ -60,26 +57,6 @@ static struct appldata_net_sum_data {
} __attribute__((packed)) appldata_net_sum_data;
static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)
{
P_DEBUG("--- NET - RECORD ---\n");
P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces);
P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets);
P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets);
P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes);
P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes);
P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors);
P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors);
P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped);
P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped);
P_DEBUG("collisions = %8lu\n", net_data->collisions);
P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1);
P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2);
P_DEBUG("timestamp = %lX\n", net_data->timestamp);
}
/*
* appldata_get_net_sum_data()
*
@ -135,9 +112,6 @@ static void appldata_get_net_sum_data(void *data)
net_data->timestamp = get_clock();
net_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
appldata_print_debug(net_data);
#endif
}
@ -159,17 +133,7 @@ static struct appldata_ops ops = {
*/
static int __init appldata_net_init(void)
{
int rc;
P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data));
rc = appldata_register_ops(&ops);
if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc);
} else {
P_DEBUG("%s-ops registered!\n", ops.name);
}
return rc;
return appldata_register_ops(&ops);
}
/*
@ -180,7 +144,6 @@ static int __init appldata_net_init(void)
static void __exit appldata_net_exit(void)
{
appldata_unregister_ops(&ops);
P_DEBUG("%s-ops unregistered!\n", ops.name);
}

View File

@ -89,44 +89,6 @@ static struct appldata_ops ops = {
};
static inline void appldata_print_debug(struct appldata_os_data *os_data)
{
int a0, a1, a2, i;
P_DEBUG("--- OS - RECORD ---\n");
P_DEBUG("nr_threads = %u\n", os_data->nr_threads);
P_DEBUG("nr_running = %u\n", os_data->nr_running);
P_DEBUG("nr_iowait = %u\n", os_data->nr_iowait);
P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0],
os_data->avenrun[1], os_data->avenrun[2]);
a0 = os_data->avenrun[0];
a1 = os_data->avenrun[1];
a2 = os_data->avenrun[2];
P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n",
LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1),
LOAD_INT(a2), LOAD_FRAC(a2));
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < os_data->nr_cpus; i++) {
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
"idle = %u, irq = %u, softirq = %u, iowait = %u, "
"steal = %u\n",
os_data->os_cpu[i].cpu_id,
os_data->os_cpu[i].per_cpu_user,
os_data->os_cpu[i].per_cpu_nice,
os_data->os_cpu[i].per_cpu_system,
os_data->os_cpu[i].per_cpu_idle,
os_data->os_cpu[i].per_cpu_irq,
os_data->os_cpu[i].per_cpu_softirq,
os_data->os_cpu[i].per_cpu_iowait,
os_data->os_cpu[i].per_cpu_steal);
}
P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2);
P_DEBUG("timestamp = %lX\n", os_data->timestamp);
}
/*
* appldata_get_os_data()
*
@ -180,13 +142,10 @@ static void appldata_get_os_data(void *data)
APPLDATA_START_INTERVAL_REC,
(unsigned long) ops.data, new_size,
ops.mod_lvl);
if (rc != 0) {
if (rc != 0)
P_ERROR("os: START NEW DIAG 0xDC failed, "
"return code: %d, new size = %i\n", rc,
new_size);
P_INFO("os: stopping old record now\n");
} else
P_INFO("os: new record size = %i\n", new_size);
rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_STOP_REC,
@ -204,9 +163,6 @@ static void appldata_get_os_data(void *data)
}
os_data->timestamp = get_clock();
os_data->sync_count_2++;
#ifdef APPLDATA_DEBUG
appldata_print_debug(os_data);
#endif
}
@ -227,12 +183,9 @@ static int __init appldata_os_init(void)
rc = -ENOMEM;
goto out;
}
P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size,
sizeof(struct appldata_os_per_cpu));
appldata_os_data = kzalloc(max_size, GFP_DMA);
if (appldata_os_data == NULL) {
P_ERROR("No memory for %s!\n", ops.name);
rc = -ENOMEM;
goto out;
}
@ -240,17 +193,12 @@ static int __init appldata_os_init(void)
appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
os_cpu);
P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
ops.data = appldata_os_data;
ops.callback = &appldata_get_os_data;
rc = appldata_register_ops(&ops);
if (rc != 0) {
P_ERROR("Error registering ops, rc = %i\n", rc);
if (rc != 0)
kfree(appldata_os_data);
} else {
P_DEBUG("%s-ops registered!\n", ops.name);
}
out:
return rc;
}
@ -264,7 +212,6 @@ static void __exit appldata_os_exit(void)
{
appldata_unregister_ops(&ops);
kfree(appldata_os_data);
P_DEBUG("%s-ops unregistered!\n", ops.name);
}

View File

@ -185,11 +185,8 @@ static int __init prng_init(void)
prng_seed(16);
ret = misc_register(&prng_dev);
if (ret) {
printk(KERN_WARNING
"Could not register misc device for PRNG.\n");
if (ret)
goto out_buf;
}
return 0;
out_buf:

View File

@ -150,33 +150,24 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t offset)
{
char *data;
size_t len;
ssize_t ret;
struct file *filp = iocb->ki_filp;
/* XXX: temporary */
char __user *buf = iov[0].iov_base;
size_t count = iov[0].iov_len;
if (nr_segs != 1) {
count = -EINVAL;
goto out;
}
if (nr_segs != 1)
return -EINVAL;
data = filp->private_data;
len = strlen(data);
if (offset > len) {
count = 0;
goto out;
}
if (count > len - offset)
count = len - offset;
if (copy_to_user(buf, data + offset, count)) {
count = -EFAULT;
goto out;
}
iocb->ki_pos += count;
ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
if (ret <= 0)
return ret;
iocb->ki_pos += ret;
file_accessed(filp);
out:
return count;
return ret;
}
static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t offset)

View File

@ -7,9 +7,14 @@
#
CFLAGS_smp.o := -Wno-nonnull
#
# Pass UTS_MACHINE for user_regset definition
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
s390_ext.o debug.o irq.o ipl.o dis.o diag.o
s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
compat_wrapper.o compat_exec_domain.o \
binfmt_elf32.o $(compat-obj-y)
$(compat-obj-y)
obj-$(CONFIG_VIRT_TIMER) += vtime.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@ -1,214 +0,0 @@
/*
* Support for 32-bit Linux for S390 ELF binaries.
*
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Gerhard Tonn (ton@de.ibm.com)
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define __ASMS390_ELF_H
#include <linux/time.h>
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
/* ELF register definitions */
#define NUM_GPRS 16
#define NUM_FPRS 16
#define NUM_ACRS 16
/* For SVR4/S390 the function pointer to be registered with `atexit` is
passed in R14. */
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->gprs[14] = 0; \
} while(0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* Wow, the "main" arch needs arch dependent functions too.. :) */
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
now struct_user_regs, they are different) */
#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex, ibcs2) \
do { \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
set_thread_flag(TIF_31BIT); \
} while (0)
#include "compat_linux.h"
typedef _s390_fp_regs32 elf_fpregset_t;
typedef struct
{
_psw_t32 psw;
__u32 gprs[__NUM_GPRS];
__u32 acrs[__NUM_ACRS];
__u32 orig_gpr2;
} s390_regs32;
typedef s390_regs32 elf_gregset_t;
static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
{
int i;
memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = ptregs->gprs[i];
save_access_regs(regs->acrs);
regs->orig_gpr2 = ptregs->orig_gpr2;
return 1;
}
static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *ptregs = task_pt_regs(tsk);
int i;
memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = ptregs->gprs[i];
memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
regs->orig_gpr2 = ptregs->orig_gpr2;
return 1;
}
static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
if (tsk == current)
save_fp_regs((s390_fp_regs *) fpregs);
else
memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
return 1;
}
#include <asm/processor.h>
#include <asm/pgalloc.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/binfmts.h>
#include <linux/compat.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
u32 pr_sigpend; /* Set of pending signals */
u32 pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
u32 pr_flag; /* flags */
u16 pr_uid;
u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#include <linux/highuid.h>
/*
#define init_elf_binfmt init_elf32_binfmt
*/
#undef start_thread
#define start_thread start_thread31
static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw,
unsigned long new_stackp)
{
set_fs(USER_DS);
regs->psw.mask = psw_user32_bits;
regs->psw.addr = new_psw;
regs->gprs[15] = new_stackp;
crst_table_downgrade(current->mm, 1UL << 31);
}
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
" Copyright 2000 IBM Corporation");
MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static inline void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
value->tv_usec = cputime % 1000000;
value->tv_sec = cputime / 1000000;
}
#include "../../../fs/binfmt_elf.c"

View File

@ -1,7 +1,7 @@
#ifndef _PTRACE32_H
#define _PTRACE32_H
#include "compat_linux.h" /* needed for _psw_t32 */
#include "compat_linux.h" /* needed for psw_compat_t */
typedef struct {
__u32 cr[3];
@ -38,7 +38,7 @@ typedef struct {
struct user_regs_struct32
{
_psw_t32 psw;
psw_compat_t psw;
u32 gprs[NUM_GPRS];
u32 acrs[NUM_ACRS];
u32 orig_gpr2;

View File

@ -1079,7 +1079,6 @@ __init debug_init(void)
s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
mutex_lock(&debug_mutex);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
printk(KERN_INFO "debug: Initialization complete\n");
initialized = 1;
mutex_unlock(&debug_mutex);
@ -1193,7 +1192,6 @@ debug_get_uint(char *buf)
for(; isspace(*buf); buf++);
rc = simple_strtoul(buf, &buf, 10);
if(*buf){
printk("debug: no integer specified!\n");
rc = -EINVAL;
}
return rc;
@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area)
memset(id->areas[i][j], 0, PAGE_SIZE);
}
}
printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
} else if(area >= 0 && area < id->nr_areas) {
id->active_entries[area] = 0;
id->active_pages[area] = 0;
for(i = 0; i < id->pages_per_area; i++) {
memset(id->areas[area][i],0,PAGE_SIZE);
}
printk(KERN_INFO "debug: %s: area %i has been flushed\n",
id->name, area);
} else {
printk(KERN_INFO
"debug: %s: area %i cannot be flushed (range: %i - %i)\n",
id->name, area, 0, id->nr_areas-1);
}
spin_unlock_irqrestore(&id->lock,flags);
}

View File

@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
@ -26,12 +27,40 @@
/*
* Create a Kernel NSS if the SAVESYS= parameter is defined
*/
#define DEFSYS_CMD_SIZE 96
#define DEFSYS_CMD_SIZE 128
#define SAVESYS_CMD_SIZE 32
char kernel_nss_name[NSS_NAME_SIZE + 1];
static void __init setup_boot_command_line(void);
#ifdef CONFIG_SHARED_KERNEL
int __init savesys_ipl_nss(char *cmd, const int cmdlen);
asm(
" .section .init.text,\"ax\",@progbits\n"
" .align 4\n"
" .type savesys_ipl_nss, @function\n"
"savesys_ipl_nss:\n"
#ifdef CONFIG_64BIT
" stmg 6,15,48(15)\n"
" lgr 14,3\n"
" sam31\n"
" diag 2,14,0x8\n"
" sam64\n"
" lgr 2,14\n"
" lmg 6,15,48(15)\n"
#else
" stm 6,15,24(15)\n"
" lr 14,3\n"
" diag 2,14,0x8\n"
" lr 2,14\n"
" lm 6,15,24(15)\n"
#endif
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n");
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void)
unsigned int sinitrd_pfn, einitrd_pfn;
#endif
int response;
size_t len;
char *savesys_ptr;
char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE];
@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void)
return;
/* Convert COMMAND_LINE to upper case */
for (i = 0; i < strlen(COMMAND_LINE); i++)
upper_command_line[i] = toupper(COMMAND_LINE[i]);
for (i = 0; i < strlen(boot_command_line); i++)
upper_command_line[i] = toupper(boot_command_line[i]);
savesys_ptr = strstr(upper_command_line, "SAVESYS=");
@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void)
}
#endif
sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
defsys_cmd, min_size);
sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
kernel_nss_name, kernel_nss_name);
@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void)
return;
}
__cpcmd(savesys_cmd, NULL, 0, &response);
len = strlen(savesys_cmd);
ASCEBC(savesys_cmd, len);
response = savesys_ipl_nss(savesys_cmd, len);
if (response != strlen(savesys_cmd)) {
/* On success: response is equal to the command size,
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
*/
if (response > SAVESYS_CMD_SIZE) {
kernel_nss_name[0] = '\0';
return;
}
/* re-setup boot command line with new ipl vm parms */
ipl_update_parameters();
setup_boot_command_line();
ipl_flags = IPL_NSS_VALID;
}
@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void)
if (cpuinfo->cpu_id.version == 0xff)
machine_flags |= MACHINE_FLAG_VM;
/* Running on a P/390 ? */
if (cpuinfo->cpu_id.machine == 0x7490)
machine_flags |= MACHINE_FLAG_P390;
/* Running under KVM ? */
if (cpuinfo->cpu_id.version == 0xfe)
machine_flags |= MACHINE_FLAG_KVM;
}
#ifdef CONFIG_64BIT
static noinline __init int memory_fast_detect(void)
{
unsigned long val0 = 0;
unsigned long val1 = 0xc;
int ret = -ENOSYS;
if (ipl_flags & IPL_NSS_VALID)
return -ENOSYS;
asm volatile(
" diag %1,%2,0x260\n"
"0: lhi %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
if (ret || val0 != val1)
return -ENOSYS;
memory_chunk[0].size = val0 + 1;
return 0;
}
#else
static inline int memory_fast_detect(void)
{
return -ENOSYS;
}
#endif
static inline __init unsigned long __tprot(unsigned long addr)
{
int cc = -1;
asm volatile(
" tprot 0(%1),0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (cc) : "a" (addr) : "cc");
return (unsigned long)cc;
}
/* Checking memory in 128KB increments. */
#define CHUNK_INCR (1UL << 17)
#define ADDR2G (1UL << 31)
static noinline __init void find_memory_chunks(unsigned long memsize)
{
unsigned long addr = 0, old_addr = 0;
unsigned long old_cc = CHUNK_READ_WRITE;
unsigned long cc;
int chunk = 0;
while (chunk < MEMORY_CHUNKS) {
cc = __tprot(addr);
while (cc == old_cc) {
addr += CHUNK_INCR;
if (memsize && addr >= memsize)
break;
#ifndef CONFIG_64BIT
if (addr == ADDR2G)
break;
#endif
cc = __tprot(addr);
}
if (old_addr != addr &&
(old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
memory_chunk[chunk].addr = old_addr;
memory_chunk[chunk].size = addr - old_addr;
memory_chunk[chunk].type = old_cc;
chunk++;
}
old_addr = addr;
old_cc = cc;
#ifndef CONFIG_64BIT
if (addr == ADDR2G)
break;
#endif
/*
* Finish memory detection at the first hole
* if storage size is unknown.
*/
if (cc == -1UL && !memsize)
break;
if (memsize && addr >= memsize)
break;
}
}
static __init void early_pgm_check_handler(void)
{
unsigned long addr;
@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void)
#endif
}
static __init void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the bss section in case it starts
* within the bss section. So we don't overwrite it when the bss
* section gets cleared.
*/
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= (unsigned long) __bss_stop)
return;
memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) __bss_stop;
#endif
}
/* Set up boot command line */
static void __init setup_boot_command_line(void)
{
char *parm = NULL;
/* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* append IPL PARM data to the boot command line */
if (MACHINE_IS_VM) {
parm = boot_command_line + strlen(boot_command_line);
*parm++ = ' ';
get_ipl_vmparm(parm);
if (parm[0] == '=')
memmove(boot_command_line, parm + 1, strlen(parm));
}
}
/*
* Save ipl parameters, clear bss memory, initialize storage keys
* and create a kernel NSS at startup if the SAVESYS= parm is defined
*/
void __init startup_init(void)
{
unsigned long long memsize;
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
init_kernel_storage_key();
lockdep_init();
lockdep_off();
detect_machine_type();
create_kernel_nss();
sort_main_extable();
setup_lowcore_early();
detect_machine_type();
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
detect_mvpg();
detect_ieee();
detect_csp();
@ -404,18 +386,7 @@ void __init startup_init(void)
detect_diag44();
detect_machine_facilities();
setup_hpage();
sclp_read_info_early();
sclp_facilities_detect();
memsize = sclp_memory_detect();
#ifndef CONFIG_64BIT
/*
* Can't deal with more than 2G in 31 bit addressing mode, so
* limit the value in order to avoid strange side effects.
*/
if (memsize > ADDR2G)
memsize = ADDR2G;
#endif
if (memory_fast_detect() < 0)
find_memory_chunks((unsigned long) memsize);
detect_memory_layout(memory_chunk);
lockdep_on();
}

View File

@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <asm/ipl.h>
#include <asm/smp.h>
#include <asm/setup.h>
@ -22,6 +23,7 @@
#include <asm/ebcdic.h>
#include <asm/reset.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#define IPL_PARM_BLOCK_VERSION 0
@ -121,6 +123,7 @@ enum ipl_method {
REIPL_METHOD_FCP_RO_VM,
REIPL_METHOD_FCP_DUMP,
REIPL_METHOD_NSS,
REIPL_METHOD_NSS_DIAG,
REIPL_METHOD_DEFAULT,
};
@ -134,14 +137,15 @@ enum dump_method {
static int diag308_set_works = 0;
static struct ipl_parameter_block ipl_block;
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_ccw;
static char reipl_nss_name[NSS_NAME_SIZE + 1];
static struct ipl_parameter_block *reipl_block_nss;
static int dump_capabilities = DUMP_TYPE_NONE;
static enum dump_type dump_type = DUMP_TYPE_NONE;
@ -263,6 +267,56 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
static void reipl_get_ascii_vmparm(char *dest,
const struct ipl_parameter_block *ipb)
{
int i;
int len = 0;
char has_lowercase = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
len = ipb->ipl_info.ccw.vm_parm_len;
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
for (i = 0; i < len; i++)
if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
(dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
(dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
has_lowercase = 1;
break;
}
if (!has_lowercase)
EBC_TOLOWER(dest, len);
EBCASC(dest, len);
}
dest[len] = 0;
}
void get_ipl_vmparm(char *dest)
{
if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
reipl_get_ascii_vmparm(dest, &ipl_block);
else
dest[0] = 0;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
get_ipl_vmparm(parm);
return sprintf(page, "%s\n", parm);
}
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
static ssize_t sys_ipl_device_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
@ -285,14 +339,8 @@ static struct kobj_attribute sys_ipl_device_attr =
static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
unsigned int size = IPL_PARMBLOCK_SIZE;
if (off > size)
return 0;
if (off + count > size)
count = size - off;
memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
return count;
return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
IPL_PARMBLOCK_SIZE);
}
static struct bin_attribute ipl_parameter_attr = {
@ -310,12 +358,7 @@ static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *att
unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
if (off > size)
return 0;
if (off + count > size)
count = size - off;
memcpy(buf, scp_data + off, count);
return count;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
static struct bin_attribute ipl_scp_data_attr = {
@ -370,15 +413,27 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
static struct attribute *ipl_ccw_attrs[] = {
static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
NULL,
};
static struct attribute *ipl_ccw_attrs_lpar[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group ipl_ccw_attr_group = {
.attrs = ipl_ccw_attrs,
static struct attribute_group ipl_ccw_attr_group_vm = {
.attrs = ipl_ccw_attrs_vm,
};
static struct attribute_group ipl_ccw_attr_group_lpar = {
.attrs = ipl_ccw_attrs_lpar
};
/* NSS ipl device attributes */
@ -388,6 +443,8 @@ DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
static struct attribute *ipl_nss_attrs[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_nss_name_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
NULL,
};
@ -450,7 +507,12 @@ static int __init ipl_init(void)
}
switch (ipl_info.type) {
case IPL_TYPE_CCW:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
if (MACHINE_IS_VM)
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_vm);
else
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_lpar);
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
@ -481,6 +543,83 @@ static struct shutdown_action __refdata ipl_action = {
* reipl shutdown action: Reboot Linux on shutdown.
*/
/* VM IPL PARM attributes */
static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
char *page)
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
reipl_get_ascii_vmparm(vmparm, ipb);
return sprintf(page, "%s\n", vmparm);
}
static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
size_t vmparm_max,
const char *buf, size_t len)
{
int i, ip_len;
/* ignore trailing newline */
ip_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
ip_len--;
if (ip_len > vmparm_max)
return -EINVAL;
/* parm is used to store kernel options, check for common chars */
for (i = 0; i < ip_len; i++)
if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
return -EINVAL;
memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
ipb->ipl_info.ccw.vm_parm_len = ip_len;
if (ip_len > 0) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
} else {
ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
}
return len;
}
/* NSS wrapper */
static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_vmparm_show(reipl_block_nss, page);
}
static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
}
/* CCW wrapper */
static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_vmparm_show(reipl_block_ccw, page);
}
static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
}
static struct kobj_attribute sys_reipl_nss_vmparm_attr =
__ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show,
reipl_nss_vmparm_store);
static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
__ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show,
reipl_ccw_vmparm_store);
/* FCP reipl device attributes */
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
@ -513,27 +652,26 @@ static struct attribute_group reipl_fcp_attr_group = {
DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_ccw->ipl_info.ccw.devno);
static void reipl_get_ascii_loadparm(char *loadparm)
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param,
LOADPARM_LEN);
memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strstrip(loadparm);
}
static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
char *page)
{
char buf[LOADPARM_LEN + 1];
reipl_get_ascii_loadparm(buf);
reipl_get_ascii_loadparm(buf, ipb);
return sprintf(page, "%s\n", buf);
}
static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
const char *buf, size_t len)
{
int i, lp_len;
@ -552,35 +690,128 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
return -EINVAL;
}
/* initialize loadparm with blanks */
memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN);
memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len);
ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN);
memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
return len;
}
static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
reipl_ccw_loadparm_store);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_loadparm_show(reipl_block_nss, page);
}
static struct attribute *reipl_ccw_attrs[] = {
static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
}
/* CCW wrapper */
static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return reipl_generic_loadparm_show(reipl_block_ccw, page);
}
static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
}
static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
reipl_ccw_loadparm_store);
static struct attribute *reipl_ccw_attrs_vm[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
&sys_reipl_ccw_vmparm_attr.attr,
NULL,
};
static struct attribute *reipl_ccw_attrs_lpar[] = {
&sys_reipl_ccw_device_attr.attr,
&sys_reipl_ccw_loadparm_attr.attr,
NULL,
};
static struct attribute_group reipl_ccw_attr_group = {
static struct attribute_group reipl_ccw_attr_group_vm = {
.name = IPL_CCW_STR,
.attrs = reipl_ccw_attrs,
.attrs = reipl_ccw_attrs_vm,
};
static struct attribute_group reipl_ccw_attr_group_lpar = {
.name = IPL_CCW_STR,
.attrs = reipl_ccw_attrs_lpar,
};
/* NSS reipl device attributes */
static void reipl_get_ascii_nss_name(char *dst,
struct ipl_parameter_block *ipb)
{
memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
EBCASC(dst, NSS_NAME_SIZE);
dst[NSS_NAME_SIZE] = 0;
}
DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
static ssize_t reipl_nss_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char nss_name[NSS_NAME_SIZE + 1] = {};
reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
return sprintf(page, "%s\n", nss_name);
}
static ssize_t reipl_nss_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int nss_len;
/* ignore trailing newline */
nss_len = len;
if ((len > 0) && (buf[len - 1] == '\n'))
nss_len--;
if (nss_len > NSS_NAME_SIZE)
return -EINVAL;
memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
if (nss_len > 0) {
reipl_block_nss->ipl_info.ccw.vm_flags |=
DIAG308_VM_FLAGS_NSS_VALID;
memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
} else {
reipl_block_nss->ipl_info.ccw.vm_flags &=
~DIAG308_VM_FLAGS_NSS_VALID;
}
return len;
}
static struct kobj_attribute sys_reipl_nss_name_attr =
__ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show,
reipl_nss_name_store);
static struct kobj_attribute sys_reipl_nss_loadparm_attr =
__ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show,
reipl_nss_loadparm_store);
static struct attribute *reipl_nss_attrs[] = {
&sys_reipl_nss_name_attr.attr,
&sys_reipl_nss_loadparm_attr.attr,
&sys_reipl_nss_vmparm_attr.attr,
NULL,
};
@ -617,7 +848,10 @@ static int reipl_set_type(enum ipl_type type)
reipl_method = REIPL_METHOD_FCP_DUMP;
break;
case IPL_TYPE_NSS:
reipl_method = REIPL_METHOD_NSS;
if (diag308_set_works)
reipl_method = REIPL_METHOD_NSS_DIAG;
else
reipl_method = REIPL_METHOD_NSS;
break;
case IPL_TYPE_UNKNOWN:
reipl_method = REIPL_METHOD_DEFAULT;
@ -655,11 +889,38 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset;
static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
const enum ipl_method m)
{
char loadparm[LOADPARM_LEN + 1] = {};
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
char nss_name[NSS_NAME_SIZE + 1] = {};
size_t pos = 0;
reipl_get_ascii_loadparm(loadparm, ipb);
reipl_get_ascii_nss_name(nss_name, ipb);
reipl_get_ascii_vmparm(vmparm, ipb);
switch (m) {
case REIPL_METHOD_CCW_VM:
pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
break;
case REIPL_METHOD_NSS:
pos = sprintf(dst, "IPL %s", nss_name);
break;
default:
break;
}
if (strlen(loadparm) > 0)
pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
if (strlen(vmparm) > 0)
sprintf(dst + pos, " PARM %s", vmparm);
}
static void reipl_run(struct shutdown_trigger *trigger)
{
struct ccw_dev_id devid;
static char buf[100];
char loadparm[LOADPARM_LEN + 1];
static char buf[128];
switch (reipl_method) {
case REIPL_METHOD_CCW_CIO:
@ -668,13 +929,7 @@ static void reipl_run(struct shutdown_trigger *trigger)
reipl_ccw_dev(&devid);
break;
case REIPL_METHOD_CCW_VM:
reipl_get_ascii_loadparm(loadparm);
if (strlen(loadparm) == 0)
sprintf(buf, "IPL %X CLEAR",
reipl_block_ccw->ipl_info.ccw.devno);
else
sprintf(buf, "IPL %X CLEAR LOADPARM '%s'",
reipl_block_ccw->ipl_info.ccw.devno, loadparm);
get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
__cpcmd(buf, NULL, 0, NULL);
break;
case REIPL_METHOD_CCW_DIAG:
@ -691,8 +946,12 @@ static void reipl_run(struct shutdown_trigger *trigger)
case REIPL_METHOD_FCP_RO_VM:
__cpcmd("IPL", NULL, 0, NULL);
break;
case REIPL_METHOD_NSS_DIAG:
diag308(DIAG308_SET, reipl_block_nss);
diag308(DIAG308_IPL, NULL);
break;
case REIPL_METHOD_NSS:
sprintf(buf, "IPL %s", reipl_nss_name);
get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
__cpcmd(buf, NULL, 0, NULL);
break;
case REIPL_METHOD_DEFAULT:
@ -707,16 +966,36 @@ static void reipl_run(struct shutdown_trigger *trigger)
disabled_wait((unsigned long) __builtin_return_address(0));
}
static void __init reipl_probe(void)
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
void *buffer;
ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
}
buffer = (void *) get_zeroed_page(GFP_KERNEL);
if (!buffer)
return;
if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK)
diag308_set_works = 1;
free_page((unsigned long)buffer);
static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
{
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
memcpy(ipb->ipl_info.ccw.load_parm,
&sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* VM PARM */
if (MACHINE_IS_VM && diag308_set_works &&
(ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
ipb->ipl_info.ccw.vm_parm_len =
ipl_block.ipl_info.ccw.vm_parm_len;
memcpy(ipb->ipl_info.ccw.vm_parm,
ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
}
}
static int __init reipl_nss_init(void)
@ -725,10 +1004,31 @@ static int __init reipl_nss_init(void)
if (!MACHINE_IS_VM)
return 0;
reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_nss)
return -ENOMEM;
if (!diag308_set_works)
sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
if (rc)
return rc;
strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
reipl_block_ccw_init(reipl_block_nss);
if (ipl_info.type == IPL_TYPE_NSS) {
memset(reipl_block_nss->ipl_info.ccw.nss_name,
' ', NSS_NAME_SIZE);
memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
kernel_nss_name, strlen(kernel_nss_name));
ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
reipl_block_nss->ipl_info.ccw.vm_flags |=
DIAG308_VM_FLAGS_NSS_VALID;
reipl_block_ccw_fill_parms(reipl_block_nss);
}
reipl_capabilities |= IPL_TYPE_NSS;
return 0;
}
@ -740,28 +1040,27 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_ccw)
return -ENOMEM;
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group);
if (rc) {
free_page((unsigned long)reipl_block_ccw);
return rc;
if (MACHINE_IS_VM) {
if (!diag308_set_works)
sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
rc = sysfs_create_group(&reipl_kset->kobj,
&reipl_ccw_attr_group_vm);
} else {
if(!diag308_set_works)
sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
rc = sysfs_create_group(&reipl_kset->kobj,
&reipl_ccw_attr_group_lpar);
}
reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
&sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
LOADPARM_LEN);
if (!MACHINE_IS_VM && !diag308_set_works)
sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
if (ipl_info.type == IPL_TYPE_CCW)
if (rc)
return rc;
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
reipl_capabilities |= IPL_TYPE_CCW;
return 0;
}
@ -1298,7 +1597,6 @@ static void __init shutdown_actions_init(void)
static int __init s390_ipl_init(void)
{
reipl_probe();
sclp_get_ipl_info(&sclp_ipl_info);
shutdown_actions_init();
shutdown_triggers_init();
@ -1405,6 +1703,12 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
void __init ipl_update_parameters(void)
{
if (diag308(DIAG308_STORE, &ipl_block) == DIAG308_RC_OK)
diag308_set_works = 1;
}
void __init ipl_save_parameters(void)
{
struct cio_iplinfo iplinfo;

View File

@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
return -EINVAL;
if ((unsigned long)p->addr & 0x01) {
printk("Attempt to register kprobe at an unaligned address\n");
if ((unsigned long)p->addr & 0x01)
return -EINVAL;
}
/* Use the get_insn_slot() facility for correctness */
if (!(p->ainsn.insn = get_insn_slot()))

View File

@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_shutdown(void)
{
printk(KERN_INFO "kexec: machine_shutdown called\n");
}
void machine_kexec(struct kimage *image)

View File

@ -0,0 +1,100 @@
/*
* Copyright IBM Corp. 2008
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
static int memory_fast_detect(struct mem_chunk *chunk)
{
unsigned long val0 = 0;
unsigned long val1 = 0xc;
int rc = -EOPNOTSUPP;
if (ipl_flags & IPL_NSS_VALID)
return -EOPNOTSUPP;
asm volatile(
" diag %1,%2,0x260\n"
"0: lhi %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc), "+d" (val0), "+d" (val1) : : "cc");
if (rc || val0 != val1)
return -EOPNOTSUPP;
chunk->size = val0 + 1;
return 0;
}
static inline int tprot(unsigned long addr)
{
int rc = -EFAULT;
asm volatile(
" tprot 0(%1),0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "+d" (rc) : "a" (addr) : "cc");
return rc;
}
#define ADDR2G (1ULL << 31)
static void find_memory_chunks(struct mem_chunk chunk[])
{
unsigned long long memsize, rnmax, rzm;
unsigned long addr = 0, size;
int i = 0, type;
rzm = sclp_get_rzm();
rnmax = sclp_get_rnmax();
memsize = rzm * rnmax;
if (!rzm)
rzm = 1ULL << 17;
if (sizeof(long) == 4) {
rzm = min(ADDR2G, rzm);
memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
}
do {
size = 0;
type = tprot(addr);
do {
size += rzm;
if (memsize && addr + size >= memsize)
break;
} while (type == tprot(addr + size));
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
chunk[i].addr = addr;
chunk[i].size = size;
chunk[i].type = type;
i++;
}
addr += size;
} while (addr < memsize && i < MEMORY_CHUNKS);
}
void detect_memory_layout(struct mem_chunk chunk[])
{
unsigned long flags, cr0;
memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
if (memory_fast_detect(&chunk[0]) == 0)
return;
/* Disable IRQs, DAT and low address protection so tprot does the
* right thing and we don't get scheduled away with low address
* protection disabled.
*/
flags = __raw_local_irq_stnsm(0xf8);
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28);
find_memory_chunks(chunk);
__ctl_load(cr0, 0, 0);
__raw_local_irq_ssm(flags);
}
EXPORT_SYMBOL(detect_memory_layout);

View File

@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
/*
* Need to know about CPUs going idle?
*/
static ATOMIC_NOTIFIER_HEAD(idle_chain);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
int register_idle_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&idle_chain, nb);
}
EXPORT_SYMBOL(register_idle_notifier);
int unregister_idle_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&idle_chain, nb);
}
EXPORT_SYMBOL(unregister_idle_notifier);
static int s390_idle_enter(void)
{
struct s390_idle_data *idle;
int nr_calls = 0;
void *hcpu;
int rc;
hcpu = (void *)(long)smp_processor_id();
rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
&nr_calls);
if (rc == NOTIFY_BAD) {
nr_calls--;
__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu, nr_calls, NULL);
return rc;
}
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
vtime_stop_cpu_timer();
return NOTIFY_OK;
}
@ -122,13 +95,12 @@ void s390_idle_leave(void)
{
struct s390_idle_data *idle;
vtime_start_cpu_timer();
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0;
spin_unlock(&idle->lock);
atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
}
extern void s390_handle_mcck(void);

View File

@ -33,6 +33,8 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <asm/segment.h>
#include <asm/page.h>
@ -47,6 +49,11 @@
#include "compat_ptrace.h"
#endif
enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
};
static void
FixPerRegisters(struct task_struct *task)
{
@ -126,24 +133,10 @@ ptrace_disable(struct task_struct *child)
* struct user contain pad bytes that should be read as zeroes.
* Lovely...
*/
static int
peek_user(struct task_struct *child, addr_t addr, addr_t data)
static unsigned long __peek_user(struct task_struct *child, addr_t addr)
{
struct user *dummy = NULL;
addr_t offset, tmp, mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
#ifdef CONFIG_64BIT
if (addr >= (addr_t) &dummy->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2)
mask = 3;
#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
addr_t offset, tmp;
if (addr < (addr_t) &dummy->regs.acrs) {
/*
@ -197,24 +190,18 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
} else
tmp = 0;
return put_user(tmp, (addr_t __user *) data);
return tmp;
}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
* Stores to the program status word and on the floating point
* control register needs to get checked for validity.
*/
static int
poke_user(struct task_struct *child, addr_t addr, addr_t data)
peek_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
addr_t offset, mask;
addr_t tmp, mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell indeed...
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
#ifdef CONFIG_64BIT
@ -225,6 +212,21 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
tmp = __peek_user(child, addr);
return put_user(tmp, (addr_t __user *) data);
}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
* Stores to the program status word and on the floating point
* control register needs to get checked for validity.
*/
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
addr_t offset;
if (addr < (addr_t) &dummy->regs.acrs) {
/*
* psw and gprs are stored on the stack
@ -292,6 +294,28 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
return 0;
}
static int
poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
addr_t mask;
/*
* Stupid gdb peeks/pokes the access registers in 64 bit with
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
#ifdef CONFIG_64BIT
if (addr >= (addr_t) &dummy->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2)
mask = 3;
#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
return __poke_user(child, addr, data);
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
ptrace_area parea;
@ -367,18 +391,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/*
* Same as peek_user but for a 31 bit program.
*/
static int
peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{
struct user32 *dummy32 = NULL;
per_struct32 *dummy_per32 = NULL;
addr_t offset;
__u32 tmp;
if (!test_thread_flag(TIF_31BIT) ||
(addr & 3) || addr > sizeof(struct user) - 3)
return -EIO;
if (addr < (addr_t) &dummy32->regs.acrs) {
/*
* psw and gprs are stored on the stack
@ -435,25 +454,32 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
} else
tmp = 0;
return tmp;
}
static int peek_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
__u32 tmp;
if (!test_thread_flag(TIF_31BIT) ||
(addr & 3) || addr > sizeof(struct user) - 3)
return -EIO;
tmp = __peek_user_compat(child, addr);
return put_user(tmp, (__u32 __user *) data);
}
/*
* Same as poke_user but for a 31 bit program.
*/
static int
poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
static int __poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
struct user32 *dummy32 = NULL;
per_struct32 *dummy_per32 = NULL;
__u32 tmp = (__u32) data;
addr_t offset;
__u32 tmp;
if (!test_thread_flag(TIF_31BIT) ||
(addr & 3) || addr > sizeof(struct user32) - 3)
return -EIO;
tmp = (__u32) data;
if (addr < (addr_t) &dummy32->regs.acrs) {
/*
@ -528,6 +554,16 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
return 0;
}
static int poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
if (!test_thread_flag(TIF_31BIT) ||
(addr & 3) || addr > sizeof(struct user32) - 3)
return -EIO;
return __poke_user_compat(child, addr, data);
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
@ -539,11 +575,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
switch (request) {
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
return peek_user_emu31(child, addr, data);
return peek_user_compat(child, addr, data);
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
return poke_user_emu31(child, addr, data);
return poke_user_compat(child, addr, data);
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
@ -555,13 +591,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
copied = 0;
while (copied < parea.len) {
if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user_emu31(child, addr, data);
ret = peek_user_compat(child, addr, data);
else {
__u32 utmp;
if (get_user(utmp,
(__u32 __force __user *) data))
return -EFAULT;
ret = poke_user_emu31(child, addr, utmp);
ret = poke_user_compat(child, addr, utmp);
}
if (ret)
return ret;
@ -610,3 +646,240 @@ syscall_trace(struct pt_regs *regs, int entryexit)
regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
regs->gprs[4], regs->gprs[5]);
}
/*
* user_regset definitions.
*/
static int s390_regs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (target == current)
save_access_regs(target->thread.acrs);
if (kbuf) {
unsigned long *k = kbuf;
while (count > 0) {
*k++ = __peek_user(target, pos);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
unsigned long __user *u = ubuf;
while (count > 0) {
if (__put_user(__peek_user(target, pos), u++))
return -EFAULT;
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return 0;
}
static int s390_regs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int rc = 0;
if (target == current)
save_access_regs(target->thread.acrs);
if (kbuf) {
const unsigned long *k = kbuf;
while (count > 0 && !rc) {
rc = __poke_user(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const unsigned long __user *u = ubuf;
while (count > 0 && !rc) {
unsigned long word;
rc = __get_user(word, u++);
if (rc)
break;
rc = __poke_user(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
if (rc == 0 && target == current)
restore_access_regs(target->thread.acrs);
return rc;
}
static int s390_fpregs_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
if (target == current)
save_fp_regs(&target->thread.fp_regs);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
}
static int s390_fpregs_set(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, const void *kbuf,
const void __user *ubuf)
{
int rc = 0;
if (target == current)
save_fp_regs(&target->thread.fp_regs);
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
return -EINVAL;
target->thread.fp_regs.fpc = fpc[0];
}
if (rc == 0 && count > 0)
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fp_regs.fprs,
offsetof(s390_fp_regs, fprs), -1);
if (rc == 0 && target == current)
restore_fp_regs(&target->thread.fp_regs);
return rc;
}
static const struct user_regset s390_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(s390_regs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = s390_regs_get,
.set = s390_regs_set,
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
};
static const struct user_regset_view user_s390_view = {
.name = UTS_MACHINE,
.e_machine = EM_S390,
.regsets = s390_regsets,
.n = ARRAY_SIZE(s390_regsets)
};
#ifdef CONFIG_COMPAT
static int s390_compat_regs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (target == current)
save_access_regs(target->thread.acrs);
if (kbuf) {
compat_ulong_t *k = kbuf;
while (count > 0) {
*k++ = __peek_user_compat(target, pos);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
compat_ulong_t __user *u = ubuf;
while (count > 0) {
if (__put_user(__peek_user_compat(target, pos), u++))
return -EFAULT;
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return 0;
}
static int s390_compat_regs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int rc = 0;
if (target == current)
save_access_regs(target->thread.acrs);
if (kbuf) {
const compat_ulong_t *k = kbuf;
while (count > 0 && !rc) {
rc = __poke_user_compat(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
while (count > 0 && !rc) {
compat_ulong_t word;
rc = __get_user(word, u++);
if (rc)
break;
rc = __poke_user_compat(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
if (rc == 0 && target == current)
restore_access_regs(target->thread.acrs);
return rc;
}
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
.align = sizeof(compat_long_t),
.get = s390_compat_regs_get,
.set = s390_compat_regs_set,
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
.align = sizeof(compat_long_t),
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
};
static const struct user_regset_view user_s390_compat_view = {
.name = "s390",
.e_machine = EM_S390,
.regsets = s390_compat_regsets,
.n = ARRAY_SIZE(s390_compat_regsets)
};
#endif
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
return &user_s390_compat_view;
#endif
return &user_s390_view;
}

View File

@ -77,7 +77,7 @@ unsigned long machine_flags;
unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE];
struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS];
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
static unsigned long __initdata memory_end;
@ -205,12 +205,6 @@ static void __init conmode_default(void)
SET_CONSOLE_SCLP;
#endif
}
} else if (MACHINE_IS_P390) {
#if defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#endif
} else {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
@ -221,18 +215,17 @@ static void __init conmode_default(void)
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
static void __init setup_zfcpdump(unsigned int console_devno)
{
static char str[64];
static char str[41];
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return;
if (console_devno != -1)
sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
ipl_info.data.fcp.dev_id.devno, console_devno);
else
sprintf(str, "cio_ignore=all,!0.0.%04x",
sprintf(str, " cio_ignore=all,!0.0.%04x",
ipl_info.data.fcp.dev_id.devno);
strcat(COMMAND_LINE, " ");
strcat(COMMAND_LINE, str);
strcat(boot_command_line, str);
console_loglevel = 2;
}
#else
@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
/*
* "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
*/
static int __init early_parse_ipldelay(char *p)
{
unsigned long delay = 0;
delay = simple_strtoul(p, &p, 0);
switch (*p) {
case 's':
case 'S':
delay *= 1000000;
break;
case 'm':
case 'M':
delay *= 60 * 1000000;
}
/* now wait for the requested amount of time */
udelay(delay);
return 0;
}
early_param("ipldelay", early_parse_ipldelay);
#ifdef CONFIG_S390_SWITCH_AMODE
#ifdef CONFIG_PGSTE
unsigned int switch_amode = 1;
@ -804,11 +771,9 @@ setup_arch(char **cmdline_p)
printk("We are running native (64 bit mode)\n");
#endif /* CONFIG_64BIT */
/* Save unparsed command line copy for /proc/cmdline */
strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
*cmdline_p = COMMAND_LINE;
*(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
ROOT_DEV = Root_RAM0;

View File

@ -3,7 +3,7 @@
* Time of day based timer functions.
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright IBM Corp. 1999, 2008
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
@ -31,6 +31,7 @@
#include <linux/notifier.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
@ -162,7 +163,7 @@ void init_cpu_timer(void)
/* Enable clock comparator timer interrupt. */
__ctl_set_bit(0,11);
/* Always allow ETR external interrupts, even without an ETR. */
/* Always allow the timing alert external interrupt. */
__ctl_set_bit(0, 4);
}
@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code)
{
}
static void etr_timing_alert(struct etr_irq_parm *);
static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(__u16 code)
{
if (S390_lowcore.ext_params & 0x00c40000)
etr_timing_alert((struct etr_irq_parm *)
&S390_lowcore.ext_params);
if (S390_lowcore.ext_params & 0x00038000)
stp_timing_alert((struct stp_irq_parm *)
&S390_lowcore.ext_params);
}
static void etr_reset(void);
static void etr_ext_handler(__u16);
static void stp_reset(void);
/*
* Get the TOD clock running.
@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void)
u64 time;
etr_reset();
stp_reset();
if (store_clock(&time) == 0)
return time;
/* TOD clock not running. Set the clock to Unix Epoch. */
@ -231,8 +246,9 @@ void __init time_init(void)
if (clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
/* request the etr external interrupt */
if (register_early_external_interrupt(0x1406, etr_ext_handler,
/* request the timing alert external interrupt */
if (register_early_external_interrupt(0x1406,
timing_alert_interrupt,
&ext_int_etr_cc) != 0)
panic("Couldn't request external interrupt 0x1406");
@ -244,11 +260,113 @@ void __init time_init(void)
#endif
}
/*
* The time is "clock". old is what we think the time is.
* Adjust the value by a multiple of jiffies and add the delta to ntp.
* "delay" is an approximation how long the synchronization took. If
* the time correction is positive, then "delay" is subtracted from
* the time difference and only the remaining part is passed to ntp.
*/
static unsigned long long adjust_time(unsigned long long old,
unsigned long long clock,
unsigned long long delay)
{
unsigned long long delta, ticks;
struct timex adjust;
if (clock > old) {
/* It is later than we thought. */
delta = ticks = clock - old;
delta = ticks = (delta < delay) ? 0 : delta - delay;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
adjust.offset = ticks * (1000000 / HZ);
} else {
/* It is earlier than we thought. */
delta = ticks = old - clock;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
jiffies_timer_cc += delta;
if (adjust.offset != 0) {
printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
adjust.offset);
adjust.modes = ADJ_OFFSET_SINGLESHOT;
do_adjtimex(&adjust);
}
return delta;
}
static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static unsigned long clock_sync_flags;
#define CLOCK_SYNC_HAS_ETR 0
#define CLOCK_SYNC_HAS_STP 1
#define CLOCK_SYNC_ETR 2
#define CLOCK_SYNC_STP 3
/*
* The synchronous get_clock function. It will write the current clock
* value to the clock pointer and return 0 if the clock is in sync with
* the external time source. If the clock mode is local it will return
* -ENOSYS and -EAGAIN if the clock is not in sync with the external
* reference.
*/
int get_sync_clock(unsigned long long *clock)
{
atomic_t *sw_ptr;
unsigned int sw0, sw1;
sw_ptr = &get_cpu_var(clock_sync_word);
sw0 = atomic_read(sw_ptr);
*clock = get_clock();
sw1 = atomic_read(sw_ptr);
put_cpu_var(clock_sync_sync);
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -ENOSYS;
if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES;
return -EAGAIN;
}
EXPORT_SYMBOL(get_sync_clock);
/*
* Make get_sync_clock return -EAGAIN.
*/
static void disable_sync_clock(void *dummy)
{
atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
/*
* Clear the in-sync bit 2^31. All get_sync_clock calls will
* fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock.
*/
atomic_clear_mask(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
/*
* Make get_sync_clock return 0 again.
* Needs to be called from a context disabled for preemption.
*/
static void enable_sync_clock(void)
{
atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
atomic_set_mask(0x80000000, sw_ptr);
}
/*
* External Time Reference (ETR) code.
*/
static int etr_port0_online;
static int etr_port1_online;
static int etr_steai_available;
static int __init early_parse_etr(char *p)
{
@ -273,12 +391,6 @@ enum etr_event {
ETR_EVENT_UPDATE,
};
enum etr_flags {
ETR_FLAG_ENOSYS,
ETR_FLAG_EACCES,
ETR_FLAG_STEAI,
};
/*
* Valid bit combinations of the eacr register are (x = don't care):
* e0 e1 dp p0 p1 ea es sl
@ -305,73 +417,17 @@ enum etr_flags {
*/
static struct etr_eacr etr_eacr;
static u64 etr_tolec; /* time of last eacr update */
static unsigned long etr_flags;
static struct etr_aib etr_port0;
static int etr_port0_uptodate;
static struct etr_aib etr_port1;
static int etr_port1_uptodate;
static unsigned long etr_events;
static struct timer_list etr_timer;
static DEFINE_PER_CPU(atomic_t, etr_sync_word);
static void etr_timeout(unsigned long dummy);
static void etr_work_fn(struct work_struct *work);
static DECLARE_WORK(etr_work, etr_work_fn);
/*
* The etr get_clock function. It will write the current clock value
* to the clock pointer and return 0 if the clock is in sync with the
* external time source. If the clock mode is local it will return
* -ENOSYS and -EAGAIN if the clock is not in sync with the external
* reference. This function is what ETR is all about..
*/
int get_sync_clock(unsigned long long *clock)
{
atomic_t *sw_ptr;
unsigned int sw0, sw1;
sw_ptr = &get_cpu_var(etr_sync_word);
sw0 = atomic_read(sw_ptr);
*clock = get_clock();
sw1 = atomic_read(sw_ptr);
put_cpu_var(etr_sync_sync);
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
return -ENOSYS;
if (test_bit(ETR_FLAG_EACCES, &etr_flags))
return -EACCES;
return -EAGAIN;
}
EXPORT_SYMBOL(get_sync_clock);
/*
* Make get_sync_clock return -EAGAIN.
*/
static void etr_disable_sync_clock(void *dummy)
{
atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
/*
* Clear the in-sync bit 2^31. All get_sync_clock calls will
* fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock.
*/
atomic_clear_mask(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
/*
* Make get_sync_clock return 0 again.
* Needs to be called from a context disabled for preemption.
*/
static void etr_enable_sync_clock(void)
{
atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
atomic_set_mask(0x80000000, sw_ptr);
}
/*
* Reset ETR attachment.
*/
@ -381,15 +437,13 @@ static void etr_reset(void)
.e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
.p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
.es = 0, .sl = 0 };
if (etr_setr(&etr_eacr) == 0)
if (etr_setr(&etr_eacr) == 0) {
etr_tolec = get_clock();
else {
set_bit(ETR_FLAG_ENOSYS, &etr_flags);
if (etr_port0_online || etr_port1_online) {
printk(KERN_WARNING "Running on non ETR capable "
"machine, only local mode available.\n");
etr_port0_online = etr_port1_online = 0;
}
set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
} else if (etr_port0_online || etr_port1_online) {
printk(KERN_WARNING "Running on non ETR capable "
"machine, only local mode available.\n");
etr_port0_online = etr_port1_online = 0;
}
}
@ -397,14 +451,12 @@ static int __init etr_init(void)
{
struct etr_aib aib;
if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
return 0;
/* Check if this machine has the steai instruction. */
if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
set_bit(ETR_FLAG_STEAI, &etr_flags);
etr_steai_available = 1;
setup_timer(&etr_timer, etr_timeout, 0UL);
if (!etr_port0_online && !etr_port1_online)
set_bit(ETR_FLAG_EACCES, &etr_flags);
if (etr_port0_online) {
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
schedule_work(&etr_work);
@ -435,7 +487,8 @@ void etr_switch_to_local(void)
{
if (!etr_eacr.sl)
return;
etr_disable_sync_clock(NULL);
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
disable_sync_clock(NULL);
set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
schedule_work(&etr_work);
}
@ -450,23 +503,21 @@ void etr_sync_check(void)
{
if (!etr_eacr.es)
return;
etr_disable_sync_clock(NULL);
if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
disable_sync_clock(NULL);
set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
schedule_work(&etr_work);
}
/*
* ETR external interrupt. There are two causes:
* ETR timing alert. There are two causes:
* 1) port state change, check the usability of the port
* 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
* sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
* or ETR-data word 4 (edf4) has changed.
*/
static void etr_ext_handler(__u16 code)
static void etr_timing_alert(struct etr_irq_parm *intparm)
{
struct etr_interruption_parameter *intparm =
(struct etr_interruption_parameter *) &S390_lowcore.ext_params;
if (intparm->pc0)
/* ETR port 0 state change. */
set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
return 1;
}
/*
* The time is "clock". old is what we think the time is.
* Adjust the value by a multiple of jiffies and add the delta to ntp.
* "delay" is an approximation how long the synchronization took. If
* the time correction is positive, then "delay" is subtracted from
* the time difference and only the remaining part is passed to ntp.
*/
static unsigned long long etr_adjust_time(unsigned long long old,
unsigned long long clock,
unsigned long long delay)
{
unsigned long long delta, ticks;
struct timex adjust;
if (clock > old) {
/* It is later than we thought. */
delta = ticks = clock - old;
delta = ticks = (delta < delay) ? 0 : delta - delay;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
adjust.offset = ticks * (1000000 / HZ);
} else {
/* It is earlier than we thought. */
delta = ticks = old - clock;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
jiffies_timer_cc += delta;
if (adjust.offset != 0) {
printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
adjust.offset);
adjust.modes = ADJ_OFFSET_SINGLESHOT;
do_adjtimex(&adjust);
}
return delta;
}
static struct {
struct clock_sync_data {
int in_sync;
unsigned long long fixup_cc;
} etr_sync;
};
static void etr_sync_cpu_start(void *dummy)
static void clock_sync_cpu_start(void *dummy)
{
etr_enable_sync_clock();
struct clock_sync_data *sync = dummy;
enable_sync_clock();
/*
* This looks like a busy wait loop but it isn't. etr_sync_cpus
* is called on all other cpus while the TOD clocks is stopped.
* __udelay will stop the cpu on an enabled wait psw until the
* TOD is running again.
*/
while (etr_sync.in_sync == 0) {
while (sync->in_sync == 0) {
__udelay(1);
/*
* A different cpu changes *in_sync. Therefore use
@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy)
*/
barrier();
}
if (etr_sync.in_sync != 1)
if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
etr_disable_sync_clock(NULL);
disable_sync_clock(NULL);
/*
* This round of TOD syncing is done. Set the clock comparator
* to the next tick and let the processor continue.
*/
fixup_clock_comparator(etr_sync.fixup_cc);
fixup_clock_comparator(sync->fixup_cc);
}
static void etr_sync_cpu_end(void *dummy)
static void clock_sync_cpu_end(void *dummy)
{
}
@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy)
static int etr_sync_clock(struct etr_aib *aib, int port)
{
struct etr_aib *sync_port;
struct clock_sync_data etr_sync;
unsigned long long clock, old_clock, delay, delta;
int follows;
int rc;
@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
*/
memset(&etr_sync, 0, sizeof(etr_sync));
preempt_disable();
smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0);
local_irq_disable();
etr_enable_sync_clock();
enable_sync_clock();
/* Set clock to next OTE. */
__ctl_set_bit(14, 21);
@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
/* Adjust Linux timing variables. */
delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32;
delta = etr_adjust_time(old_clock, clock, delay);
delta = adjust_time(old_clock, clock, delay);
etr_sync.fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */
if (!etr_aib_follows(sync_port, aib, port)) {
/* Didn't work. */
etr_disable_sync_clock(NULL);
disable_sync_clock(NULL);
etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN;
} else {
@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
/* Could not set the clock ?!? */
__ctl_clear_bit(0, 29);
__ctl_clear_bit(14, 21);
etr_disable_sync_clock(NULL);
disable_sync_clock(NULL);
etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN;
}
local_irq_enable();
smp_call_function(etr_sync_cpu_end,NULL,0,0);
smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
preempt_enable();
return rc;
}
@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
* Do not try to get the alternate port aib if the clock
* is not in sync yet.
*/
if (!eacr.es)
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es)
return eacr;
/*
@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
* the other port immediately. If only stetr is available the
* data-port bit toggle has to be used.
*/
if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
if (etr_steai_available) {
if (eacr.p0 && !etr_port0_uptodate) {
etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
etr_port0_uptodate = 1;
@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work)
if (!eacr.ea) {
/* Both ports offline. Reset everything. */
eacr.dp = eacr.es = eacr.sl = 0;
on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
on_each_cpu(disable_sync_clock, NULL, 0, 1);
del_timer_sync(&etr_timer);
etr_update_eacr(eacr);
set_bit(ETR_FLAG_EACCES, &etr_flags);
clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
return;
}
@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work)
eacr.e1 = 1;
sync_port = (etr_port0_uptodate &&
etr_port_valid(&etr_port0, 0)) ? 0 : -1;
clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
eacr.sl = 0;
eacr.e0 = 0;
@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work)
eacr.es = 0;
sync_port = (etr_port1_uptodate &&
etr_port_valid(&etr_port1, 1)) ? 1 : -1;
clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
eacr.sl = 1;
eacr.e0 = 1;
@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work)
eacr.e1 = 1;
sync_port = (etr_port0_uptodate &&
etr_port_valid(&etr_port0, 0)) ? 0 : -1;
clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
eacr.sl = 1;
eacr.e0 = 0;
@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work)
eacr.es = 0;
sync_port = (etr_port1_uptodate &&
etr_port_valid(&etr_port1, 1)) ? 1 : -1;
clear_bit(ETR_FLAG_EACCES, &etr_flags);
} else {
/* Both ports not usable. */
eacr.es = eacr.sl = 0;
sync_port = -1;
set_bit(ETR_FLAG_EACCES, &etr_flags);
clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
}
if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
eacr.es = 0;
/*
* If the clock is in sync just update the eacr and return.
* If there is no valid sync port wait for a port update.
*/
if (eacr.es || sync_port < 0) {
if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) ||
eacr.es || sync_port < 0) {
etr_update_eacr(eacr);
etr_set_tolec_timeout(now);
return;
@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work)
* and set up a timer to try again after 0.5 seconds
*/
etr_update_eacr(eacr);
set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
if (now < etr_tolec + (1600000 << 12) ||
etr_sync_clock(&aib, sync_port) != 0) {
/* Sync failed. Try again in 1/2 second. */
eacr.es = 0;
etr_update_eacr(eacr);
clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
etr_set_sync_timeout();
} else
etr_set_tolec_timeout(now);
@ -1097,8 +1116,8 @@ static ssize_t etr_online_store(struct sys_device *dev,
value = simple_strtoul(buf, NULL, 0);
if (value != 0 && value != 1)
return -EINVAL;
if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
return -ENOSYS;
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
return -EOPNOTSUPP;
if (dev == &etr_port0_dev) {
if (etr_port0_online == value)
return count; /* Nothing to do. */
@ -1292,3 +1311,318 @@ out:
}
device_initcall(etr_init_sysfs);
/*
* Server Time Protocol (STP) code.
*/
static int stp_online;
static struct stp_sstpi stp_info;
static void *stp_page;
static void stp_work_fn(struct work_struct *work);
static DECLARE_WORK(stp_work, stp_work_fn);
static int __init early_parse_stp(char *p)
{
if (strncmp(p, "off", 3) == 0)
stp_online = 0;
else if (strncmp(p, "on", 2) == 0)
stp_online = 1;
return 0;
}
early_param("stp", early_parse_stp);
/*
* Reset STP attachment.
*/
static void stp_reset(void)
{
int rc;
stp_page = alloc_bootmem_pages(PAGE_SIZE);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
if (rc == 1)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
printk(KERN_WARNING "Running on non STP capable machine.\n");
free_bootmem((unsigned long) stp_page, PAGE_SIZE);
stp_page = NULL;
stp_online = 0;
}
}
static int __init stp_init(void)
{
if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
schedule_work(&stp_work);
return 0;
}
arch_initcall(stp_init);
/*
* STP timing alert. There are three causes:
* 1) timing status change
* 2) link availability change
* 3) time control parameter change
* In all three cases we are only interested in the clock source state.
* If a STP clock source is now available use it.
*/
static void stp_timing_alert(struct stp_irq_parm *intparm)
{
if (intparm->tsc || intparm->lac || intparm->tcpc)
schedule_work(&stp_work);
}
/*
* STP sync check machine check. This is called when the timing state
* changes from the synchronized state to the unsynchronized state.
* After a STP sync check the clock is not in sync. The machine check
* is broadcasted to all cpus at the same time.
*/
void stp_sync_check(void)
{
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return;
disable_sync_clock(NULL);
schedule_work(&stp_work);
}
/*
* STP island condition machine check. This is called when an attached
* server attempts to communicate over an STP link and the servers
* have matching CTN ids and have a valid stratum-1 configuration
* but the configurations do not match.
*/
void stp_island_check(void)
{
if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return;
disable_sync_clock(NULL);
schedule_work(&stp_work);
}
/*
* STP tasklet. Check for the STP state and take over the clock
* synchronization if the STP clock source is usable.
*/
static void stp_work_fn(struct work_struct *work)
{
struct clock_sync_data stp_sync;
unsigned long long old_clock, delta;
int rc;
if (!stp_online) {
chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
return;
}
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
if (rc)
return;
rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
if (rc || stp_info.c == 0)
return;
/*
* Catch all other cpus and make them wait until we have
* successfully synced the clock. smp_call_function will
* return after all other cpus are in clock_sync_cpu_start.
*/
memset(&stp_sync, 0, sizeof(stp_sync));
preempt_disable();
smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0);
local_irq_disable();
enable_sync_clock();
set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
schedule_work(&etr_work);
rc = 0;
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
old_clock = get_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
if (rc == 0) {
delta = adjust_time(old_clock, get_clock(), 0);
fixup_clock_comparator(delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
}
}
if (rc) {
disable_sync_clock(NULL);
stp_sync.in_sync = -EAGAIN;
clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
if (etr_port0_online || etr_port1_online)
schedule_work(&etr_work);
} else
stp_sync.in_sync = 1;
local_irq_enable();
smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
preempt_enable();
}
/*
* STP class sysfs interface functions
*/
static struct sysdev_class stp_sysclass = {
.name = "stp",
};
static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%016llx\n",
*(unsigned long long *) stp_info.ctnid);
}
static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.ctn);
}
static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x2000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
}
static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x8000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
}
static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
}
static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x0800))
return -ENODATA;
return sprintf(buf, "%i\n", (int) stp_info.tto);
}
static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x4000))
return -ENODATA;
return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
}
static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
stp_time_zone_offset_show, NULL);
static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.tmd);
}
static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
{
if (!stp_online)
return -ENODATA;
return sprintf(buf, "%i\n", stp_info.tst);
}
static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
static ssize_t stp_online_show(struct sysdev_class *class, char *buf)
{
return sprintf(buf, "%i\n", stp_online);
}
static ssize_t stp_online_store(struct sysdev_class *class,
const char *buf, size_t count)
{
unsigned int value;
value = simple_strtoul(buf, NULL, 0);
if (value != 0 && value != 1)
return -EINVAL;
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
stp_online = value;
schedule_work(&stp_work);
return count;
}
/*
* Can't use SYSDEV_CLASS_ATTR because the attribute should be named
* stp/online but attr_online already exists in this file ..
*/
static struct sysdev_class_attribute attr_stp_online = {
.attr = { .name = "online", .mode = 0600 },
.show = stp_online_show,
.store = stp_online_store,
};
static struct sysdev_class_attribute *stp_attributes[] = {
&attr_ctn_id,
&attr_ctn_type,
&attr_dst_offset,
&attr_leap_seconds,
&attr_stp_online,
&attr_stratum,
&attr_time_offset,
&attr_time_zone_offset,
&attr_timing_mode,
&attr_timing_state,
NULL
};
static int __init stp_init_sysfs(void)
{
struct sysdev_class_attribute **attr;
int rc;
rc = sysdev_class_register(&stp_sysclass);
if (rc)
goto out;
for (attr = stp_attributes; *attr; attr++) {
rc = sysdev_class_create_file(&stp_sysclass, *attr);
if (rc)
goto out_unreg;
}
return 0;
out_unreg:
for (; attr >= stp_attributes; attr--)
sysdev_class_remove_file(&stp_sysclass, *attr);
sysdev_class_unregister(&stp_sysclass);
out:
return rc;
}
device_initcall(stp_init_sysfs);

View File

@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void)
machine_has_topology_irq = 1;
tl_info = alloc_bootmem_pages(PAGE_SIZE);
if (!tl_info)
goto error;
info = tl_info;
stsi(info, 15, 1, 2);

View File

@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires)
}
#endif
static void start_cpu_timer(void)
void vtime_start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
@ -150,7 +150,7 @@ static void start_cpu_timer(void)
set_vtimer(vt_list->idle);
}
static void stop_cpu_timer(void)
void vtime_stop_cpu_timer(void)
{
struct vtimer_queue *vt_list;
@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
if (timer->cpu != smp_processor_id())
printk("internal_add_vtimer: BUG, running on wrong CPU");
BUG_ON(timer->cpu != smp_processor_id());
/* if list is empty we only have to set the timer */
if (list_empty(&vt_list->list)) {
@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer)
put_cpu();
}
static inline int prepare_vtimer(struct vtimer_list *timer)
static inline void prepare_vtimer(struct vtimer_list *timer)
{
if (!timer->function) {
printk("add_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
printk("add_virt_timer: invalid timer expire value!\n");
return -EINVAL;
}
if (vtimer_pending(timer)) {
printk("add_virt_timer: timer pending\n");
return -EBUSY;
}
BUG_ON(!timer->function);
BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
BUG_ON(vtimer_pending(timer));
timer->cpu = get_cpu();
return 0;
}
/*
@ -382,10 +368,7 @@ void add_virt_timer(void *new)
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
prepare_vtimer(timer);
timer->interval = 0;
internal_add_vtimer(timer);
}
@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new)
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
prepare_vtimer(timer);
timer->interval = timer->expires;
internal_add_vtimer(timer);
}
@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
unsigned long flags;
int cpu;
if (!timer->function) {
printk("mod_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!expires || expires > VTIMER_MAX_SLICE) {
printk("mod_virt_timer: invalid expire range\n");
return -EINVAL;
}
BUG_ON(!timer->function);
BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
/*
* This is a common optimization triggered by the
@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
cpu = get_cpu();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* check if we run on the right CPU */
BUG_ON(timer->cpu != cpu);
/* disable interrupts before test if timer is pending */
spin_lock_irqsave(&vt_list->lock, flags);
@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
return 0;
}
/* check if we run on the right CPU */
if (timer->cpu != cpu) {
printk("mod_virt_timer: running on wrong CPU, check your code\n");
spin_unlock_irqrestore(&vt_list->lock, flags);
put_cpu();
return -EINVAL;
}
list_del_init(&timer->entry);
timer->expires = expires;
@ -536,24 +504,6 @@ void init_cpu_vtimer(void)
}
static int vtimer_idle_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case S390_CPU_IDLE:
stop_cpu_timer();
break;
case S390_CPU_NOT_IDLE:
start_cpu_timer();
break;
}
return NOTIFY_OK;
}
static struct notifier_block vtimer_idle_nb = {
.notifier_call = vtimer_idle_notify,
};
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
@ -561,9 +511,6 @@ void __init vtime_init(void)
&ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005");
if (register_idle_notifier(&vtimer_idle_nb))
panic("Couldn't register idle notifier");
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
}

View File

@ -202,3 +202,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
}
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat;
struct zone *zone;
int rc;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size));
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */

View File

@ -995,14 +995,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
now = get_clock();
DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
(unsigned int) intparm);
cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) |
irb->scsw.cmd.dstat), (unsigned int) intparm);
/* check for unsolicited interrupts */
cqr = (struct dasd_ccw_req *) intparm;
if (!cqr || ((irb->scsw.cc == 1) &&
(irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
if (!cqr || ((irb->scsw.cmd.cc == 1) &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) {
if (cqr && cqr->status == DASD_CQR_IN_IO)
cqr->status = DASD_CQR_QUEUED;
device = dasd_device_from_cdev_locked(cdev);
@ -1025,7 +1025,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* Check for clear pending */
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
@ -1041,11 +1041,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr);
next = NULL;
expires = 0;
if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) {
/* request was completed successfully */
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;

View File

@ -1572,7 +1572,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
/* determine the address of the CCW to be restarted */
/* Imprecise ending is not set -> addr from IRB-SCSW */
cpa = default_erp->refers->irb.scsw.cpa;
cpa = default_erp->refers->irb.scsw.cmd.cpa;
if (cpa == 0) {
@ -1725,7 +1725,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
/* determine the address of the CCW to be restarted */
/* Imprecise ending is not set -> addr from IRB-SCSW */
cpa = previous_erp->irb.scsw.cpa;
cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) {
@ -2171,7 +2171,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK
if (erp->refers->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK
| SCHN_STAT_CHN_CTRL_CHK)) {
DEV_MESSAGE(KERN_DEBUG, device, "%s",
"channel or interface control check");
@ -2352,9 +2352,9 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
(cqr2->irb.esw.esw0.erw.cons == 0)) {
if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
if ((cqr1->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)) ==
(cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
(cqr2->irb.scsw.cmd.cstat & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)))
return 1; /* match with ifcc*/
}
@ -2622,8 +2622,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
}
/* double-check if current erp/cqr was successfull */
if ((cqr->irb.scsw.cstat == 0x00) &&
(cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
(cqr->irb.scsw.cmd.dstat ==
(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
DEV_MESSAGE(KERN_DEBUG, device,
"ERP called for successful request %p"

View File

@ -1404,13 +1404,14 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.dstat & mask) == mask) {
if ((irb->scsw.cmd.dstat & mask) == mask) {
dasd_generic_handle_state_change(device);
return;
}
/* summary unit check */
if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) {
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[7] == 0x0D)) {
dasd_alias_handle_summary_unit_check(device, irb);
return;
}
@ -2068,11 +2069,11 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
device->cdev->dev.bus_id);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cstat, irb->scsw.dstat);
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
device->cdev->dev.bus_id,
(void *) (addr_t) irb->scsw.cpa);
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@ -2122,7 +2123,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
/* scsw->cda is either valid or zero */
len = 0;
from = ++to;
fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
fail = (struct ccw1 *)(addr_t)
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");

View File

@ -222,7 +222,7 @@ static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.dstat & mask) == mask) {
if ((irb->scsw.cmd.dstat & mask) == mask) {
dasd_generic_handle_state_change(device);
return;
}
@ -449,11 +449,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
device->cdev->dev.bus_id);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cstat, irb->scsw.dstat);
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
device->cdev->dev.bus_id,
(void *) (addr_t) irb->scsw.cpa);
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
@ -498,11 +498,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print failing CCW area */
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" CCW %p: %08X %08X DAT:",

View File

@ -167,10 +167,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
struct dcssblk_dev_info *dev_info;
int rc;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
PRINT_WARN("Invalid value, must be 0 or 1\n");
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
}
down_write(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
if (atomic_read(&dev_info->use_count)) {
@ -215,7 +213,6 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
set_disk_ro(dev_info->gd, 0);
}
} else {
PRINT_WARN("Invalid value, must be 0 or 1\n");
rc = -EINVAL;
goto out;
}
@ -258,10 +255,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
{
struct dcssblk_dev_info *dev_info;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
PRINT_WARN("Invalid value, must be 0 or 1\n");
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
}
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
down_write(&dcssblk_devices_sem);
@ -289,7 +284,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
}
} else {
up_write(&dcssblk_devices_sem);
PRINT_WARN("Invalid value, must be 0 or 1\n");
return -EINVAL;
}
up_write(&dcssblk_devices_sem);
@ -441,7 +435,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
goto out;
unregister_dev:
PRINT_ERR("device_create_file() failed!\n");
list_del(&dev_info->lh);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
@ -702,10 +695,8 @@ dcssblk_check_params(void)
static void __exit
dcssblk_exit(void)
{
PRINT_DEBUG("DCSSBLOCK EXIT...\n");
s390_root_dev_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
PRINT_DEBUG("...finished!\n");
}
static int __init
@ -713,27 +704,21 @@ dcssblk_init(void)
{
int rc;
PRINT_DEBUG("DCSSBLOCK INIT...\n");
dcssblk_root_dev = s390_root_dev_register("dcssblk");
if (IS_ERR(dcssblk_root_dev)) {
PRINT_ERR("device_register() failed!\n");
if (IS_ERR(dcssblk_root_dev))
return PTR_ERR(dcssblk_root_dev);
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc) {
PRINT_ERR("device_create_file(add) failed!\n");
s390_root_dev_unregister(dcssblk_root_dev);
return rc;
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
if (rc) {
PRINT_ERR("device_create_file(remove) failed!\n");
s390_root_dev_unregister(dcssblk_root_dev);
return rc;
}
rc = register_blkdev(0, DCSSBLK_NAME);
if (rc < 0) {
PRINT_ERR("Can't get dynamic major!\n");
s390_root_dev_unregister(dcssblk_root_dev);
return rc;
}
@ -742,7 +727,6 @@ dcssblk_init(void)
dcssblk_check_params();
PRINT_DEBUG("...finished!\n");
return 0;
}

View File

@ -100,15 +100,10 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2) {
PRINT_ERR("expanded storage lost!\n");
if (cc == 2)
return -ENXIO;
}
if (cc == 1) {
PRINT_ERR("page in failed for page index %u.\n",
xpage_index);
if (cc == 1)
return -EIO;
}
return 0;
}
@ -135,15 +130,10 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
: "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
if (cc == 3)
return -ENXIO;
if (cc == 2) {
PRINT_ERR("expanded storage lost!\n");
if (cc == 2)
return -ENXIO;
}
if (cc == 1) {
PRINT_ERR("page out failed for page index %u.\n",
xpage_index);
if (cc == 1)
return -EIO;
}
return 0;
}

View File

@ -93,9 +93,6 @@ struct raw3215_info {
struct raw3215_req *queued_write;/* pointer to queued write requests */
wait_queue_head_t empty_wait; /* wait queue for flushing */
struct timer_list timer; /* timer for delayed output */
char *message; /* pending message from raw3215_irq */
int msg_dstat; /* dstat for pending message */
int msg_cstat; /* cstat for pending message */
int line_pos; /* position on the line (for tabs) */
char ubuffer[80]; /* copy_from_user buffer */
};
@ -359,11 +356,6 @@ raw3215_tasklet(void *data)
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
/* Check for pending message from raw3215_irq */
if (raw->message != NULL) {
printk(raw->message, raw->msg_dstat, raw->msg_cstat);
raw->message = NULL;
}
tty = raw->tty;
if (tty != NULL &&
RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
@ -381,20 +373,14 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3215_req *req;
struct tty_struct *tty;
int cstat, dstat;
int count, slen;
int count;
raw = cdev->dev.driver_data;
req = (struct raw3215_req *) intparm;
cstat = irb->scsw.cstat;
dstat = irb->scsw.dstat;
if (cstat != 0) {
raw->message = KERN_WARNING
"Got nonzero channel status in raw3215_irq "
"(dev sts 0x%2x, sch sts 0x%2x)";
raw->msg_dstat = dstat;
raw->msg_cstat = cstat;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
if (cstat != 0)
tasklet_schedule(&raw->tasklet);
}
if (dstat & 0x01) { /* we got a unit exception */
dstat &= ~0x01; /* we can ignore it */
}
@ -404,8 +390,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
break;
/* Attention interrupt, someone hit the enter key */
raw3215_mk_read_req(raw);
if (MACHINE_IS_P390)
memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
tasklet_schedule(&raw->tasklet);
break;
case 0x08:
@ -415,7 +399,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
return; /* That shouldn't happen ... */
if (req->type == RAW3215_READ) {
/* store residual count, then wait for device end */
req->residual = irb->scsw.count;
req->residual = irb->scsw.cmd.count;
}
if (dstat == 0x08)
break;
@ -428,11 +412,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
tty = raw->tty;
count = 160 - req->residual;
if (MACHINE_IS_P390) {
slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
if (count > slen)
count = slen;
} else
EBCASC(raw->inbuf, count);
cchar = ctrlchar_handle(raw->inbuf, count, tty);
switch (cchar & CTRLCHAR_MASK) {
@ -481,11 +460,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
raw->flags &= ~RAW3215_WORKING;
raw3215_free_req(req);
}
raw->message = KERN_WARNING
"Spurious interrupt in in raw3215_irq "
"(dev sts 0x%2x, sch sts 0x%2x)";
raw->msg_dstat = dstat;
raw->msg_cstat = cstat;
tasklet_schedule(&raw->tasklet);
}
return;
@ -883,7 +857,6 @@ con3215_init(void)
free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
raw3215[0] = NULL;
printk("Couldn't find a 3215 console device\n");
return -ENODEV;
}
register_console(&con3215);
@ -1157,7 +1130,6 @@ tty3215_init(void)
tty_set_operations(driver, &tty3215_ops);
ret = tty_register_driver(driver);
if (ret) {
printk("Couldn't register tty3215 driver\n");
put_tty_driver(driver);
return ret;
}

View File

@ -411,15 +411,15 @@ static int
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.dstat & DEV_STAT_ATTENTION)
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
con3270_issue_read(cp);
if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.count;
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}

View File

@ -216,17 +216,17 @@ static int
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fp->attention = 1;
wake_up(&fp->wait);
}
if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.count;
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
@ -512,11 +512,8 @@ fs3270_init(void)
int rc;
rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops);
if (rc) {
printk(KERN_ERR "fs3270 can't get major number %d: errno %d\n",
IBM_FS3270_MAJOR, rc);
if (rc)
return rc;
}
return 0;
}

View File

@ -3,9 +3,8 @@
*
* Character device driver for reading z/VM *MONITOR service records.
*
* Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
* Author: Gerald Schaefer <geraldsc@de.ibm.com>
* Copyright IBM Corp. 2004, 2008
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/module.h>
@ -18,12 +17,11 @@
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <net/iucv/iucv.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
#include <linux/poll.h>
#include <net/iucv/iucv.h>
//#define MON_DEBUG /* Debug messages on/off */
@ -152,10 +150,7 @@ static int mon_check_mca(struct mon_msg *monmsg)
(mon_mca_end(monmsg) > mon_dcss_end) ||
(mon_mca_start(monmsg) < mon_dcss_start) ||
((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
{
P_DEBUG("READ, IGNORED INVALID MCA\n\n");
return -EINVAL;
}
return 0;
}
@ -164,10 +159,6 @@ static int mon_send_reply(struct mon_msg *monmsg,
{
int rc;
P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
"0x%08X\n\n",
monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
rc = iucv_message_reply(monpriv->path, &monmsg->msg,
IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count);
@ -202,15 +193,12 @@ static struct mon_private *mon_alloc_mem(void)
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv) {
P_ERROR("no memory for monpriv\n");
if (!monpriv)
return NULL;
}
for (i = 0; i < MON_MSGLIM; i++) {
monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
GFP_KERNEL);
if (!monpriv->msg_array[i]) {
P_ERROR("open, no memory for msg_array\n");
mon_free_mem(monpriv);
return NULL;
}
@ -218,41 +206,10 @@ static struct mon_private *mon_alloc_mem(void)
return monpriv;
}
static inline void mon_read_debug(struct mon_msg *monmsg,
struct mon_private *monpriv)
{
#ifdef MON_DEBUG
u8 msg_type[2], mca_type;
unsigned long records_len;
records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
memcpy(msg_type, &monmsg->msg.class, 2);
EBCASC(msg_type, 2);
mca_type = mon_mca_type(monmsg, 0);
EBCASC(&mca_type, 1);
P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
monpriv->read_index, monpriv->write_index);
P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n",
mon_mca_start(monmsg), mon_mca_end(monmsg));
P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n",
mon_rec_start(monmsg), mon_rec_end(monmsg), records_len);
if (mon_mca_size(monmsg) > 12)
P_DEBUG("READ, MORE THAN ONE MCA\n\n");
#endif
}
static inline void mon_next_mca(struct mon_msg *monmsg)
{
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return;
P_DEBUG("READ, NEXT MCA\n\n");
monmsg->mca_offset += 12;
monmsg->pos = 0;
}
@ -269,7 +226,6 @@ static struct mon_msg *mon_next_message(struct mon_private *monpriv)
monmsg->msglim_reached = 0;
monmsg->pos = 0;
monmsg->mca_offset = 0;
P_WARNING("read, message limit reached\n");
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
@ -286,10 +242,6 @@ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
P_DEBUG("IUCV connection completed\n");
P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
"0x%02X, Sample = 0x%02X\n",
ipuser[0], ipuser[1], ipuser[2]);
atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue);
}
@ -310,7 +262,6 @@ static void mon_iucv_message_pending(struct iucv_path *path,
{
struct mon_private *monpriv = path->private;
P_DEBUG("IUCV message pending\n");
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
@ -375,7 +326,6 @@ static int mon_open(struct inode *inode, struct file *filp)
rc = -EIO;
goto out_path;
}
P_INFO("open, established connection to *MONITOR service\n\n");
filp->private_data = monpriv;
return nonseekable_open(inode, filp);
@ -400,8 +350,6 @@ static int mon_close(struct inode *inode, struct file *filp)
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
else
P_INFO("close, terminated connection to *MONITOR service\n");
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
@ -442,10 +390,8 @@ static ssize_t mon_read(struct file *filp, char __user *data,
monmsg = monpriv->msg_array[monpriv->read_index];
}
if (!monmsg->pos) {
if (!monmsg->pos)
monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
mon_read_debug(monmsg, monpriv);
}
if (mon_check_mca(monmsg))
goto reply;
@ -531,7 +477,6 @@ static int __init mon_init(void)
P_ERROR("failed to register with iucv driver\n");
return rc;
}
P_INFO("open, registered with IUCV\n");
rc = segment_type(mon_dcss_name);
if (rc < 0) {
@ -555,13 +500,8 @@ static int __init mon_init(void)
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
rc = misc_register(&mon_dev);
if (rc < 0 ) {
P_ERROR("misc_register failed, rc = %i\n", rc);
if (rc < 0 )
goto out;
}
P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
mon_dcss_end - mon_dcss_start + 1);
return 0;
out:

View File

@ -153,19 +153,10 @@ struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
struct raw3270_request *rq;
rq = alloc_bootmem_low(sizeof(struct raw3270));
if (!rq)
return ERR_PTR(-ENOMEM);
memset(rq, 0, sizeof(struct raw3270_request));
/* alloc output buffer. */
if (size > 0) {
if (size > 0)
rq->buffer = alloc_bootmem_low(size);
if (!rq->buffer) {
free_bootmem((unsigned long) rq,
sizeof(struct raw3270));
return ERR_PTR(-ENOMEM);
}
}
rq->size = size;
INIT_LIST_HEAD(&rq->list);
@ -372,17 +363,17 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (IS_ERR(irb))
rc = RAW3270_IO_RETRY;
else if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
rq->rc = -EIO;
rc = RAW3270_IO_DONE;
} else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP)) {
} else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
DEV_STAT_UNIT_EXCEP)) {
/* Handle CE-DE-UE and subsequent UDE */
set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
rc = RAW3270_IO_BUSY;
} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* Wait for UDE if busy flag is set. */
if (irb->scsw.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
/* Got it, now retry. */
rc = RAW3270_IO_RETRY;
@ -497,7 +488,7 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
* Unit-Check Processing:
* Expect Command Reject or Intervention Required.
*/
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
/* Request finished abnormally. */
if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
@ -505,16 +496,16 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
}
}
if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
rq->rc = -EOPNOTSUPP;
else
rq->rc = -EIO;
} else
/* Request finished normally. Copy residual count. */
rq->rescnt = irb->scsw.count;
rq->rescnt = irb->scsw.cmd.count;
}
if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
wake_up(&raw3270_wait_queue);
}
@ -619,7 +610,6 @@ __raw3270_size_device_vm(struct raw3270 *rp)
rp->cols = 132;
break;
default:
printk(KERN_WARNING "vrdccrmd is 0x%.8x\n", model);
rc = -EOPNOTSUPP;
break;
}

View File

@ -506,6 +506,8 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
if (scbuf->validity_sclp_send_mask)
sclp_send_mask = scbuf->sclp_send_mask;
spin_unlock_irqrestore(&sclp_lock, flags);
if (scbuf->validity_sclp_active_facility_mask)
sclp_facilities = scbuf->sclp_active_facility_mask;
sclp_dispatch_state_change();
}
@ -782,11 +784,9 @@ sclp_check_handler(__u16 code)
/* Is this the interrupt we are waiting for? */
if (finished_sccb == 0)
return;
if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
"for buffer at 0x%x\n", finished_sccb);
return;
}
if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
finished_sccb);
spin_lock(&sclp_lock);
if (sclp_running_state == sclp_running_state_running) {
sclp_init_req.status = SCLP_REQ_DONE;
@ -883,8 +883,6 @@ sclp_init(void)
unsigned long flags;
int rc;
if (!MACHINE_HAS_SCLP)
return -ENODEV;
spin_lock_irqsave(&sclp_lock, flags);
/* Check for previous or running initialization */
if (sclp_init_state != sclp_init_state_uninitialized) {

View File

@ -11,6 +11,9 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include "sclp.h"
@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
static unsigned long long rzm;
static unsigned long long rnmax;
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{
@ -62,7 +67,7 @@ out:
return rc;
}
void __init sclp_read_info_early(void)
static void __init sclp_read_info_early(void)
{
int rc;
int i;
@ -92,34 +97,33 @@ void __init sclp_read_info_early(void)
void __init sclp_facilities_detect(void)
{
if (!early_read_info_sccb_valid)
return;
sclp_facilities = early_read_info_sccb.facilities;
sclp_fac84 = early_read_info_sccb.fac84;
}
unsigned long long __init sclp_memory_detect(void)
{
unsigned long long memsize;
struct read_info_sccb *sccb;
sclp_read_info_early();
if (!early_read_info_sccb_valid)
return 0;
return;
sccb = &early_read_info_sccb;
if (sccb->rnsize)
memsize = sccb->rnsize << 20;
else
memsize = sccb->rnsize2 << 20;
if (sccb->rnmax)
memsize *= sccb->rnmax;
else
memsize *= sccb->rnmax2;
return memsize;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
}
unsigned long long sclp_get_rnmax(void)
{
return rnmax;
}
unsigned long long sclp_get_rzm(void)
{
return rzm;
}
/*
* This function will be called after sclp_memory_detect(), which gets called
* early from early.c code. Therefore the sccb should have valid contents.
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
@ -278,6 +282,305 @@ int sclp_cpu_deconfigure(u8 cpu)
return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
struct memory_increment {
struct list_head list;
u16 rn;
int standby;
int usecount;
};
struct assign_storage_sccb {
struct sccb_header header;
u16 rn;
} __packed;
static unsigned long long rn2addr(u16 rn)
{
return (unsigned long long) (rn - 1) * rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
{
struct assign_storage_sccb *sccb;
int rc;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
rc = do_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
return do_assign_storage(0x000d0001, rn);
}
static int sclp_unassign_storage(u16 rn)
{
return do_assign_storage(0x000c0001, rn);
}
struct attach_storage_sccb {
struct sccb_header header;
u16 :16;
u16 assigned;
u32 :32;
u32 entries[0];
} __packed;
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
int rc;
int i;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
rc = do_sync_request(0x00080001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++)
sclp_unassign_storage(sccb->entries[i] >> 16);
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_mem_change_state(unsigned long start, unsigned long size,
int online)
{
struct memory_increment *incr;
unsigned long long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
if (start > istart + rzm - 1)
continue;
if (online) {
if (incr->usecount++)
continue;
/*
* Don't break the loop if one assign fails. Loop may
* be walked again on CANCEL and we can't save
* information if state changed before or not.
* So continue and increase usecount for all increments.
*/
rc |= sclp_assign_storage(incr->rn);
} else {
if (--incr->usecount)
continue;
sclp_unassign_storage(incr->rn);
}
}
return rc ? -EIO : 0;
}
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long start, size;
struct memory_notify *arg;
unsigned char id;
int rc = 0;
arg = data;
start = arg->start_pfn << PAGE_SHIFT;
size = arg->nr_pages << PAGE_SHIFT;
mutex_lock(&sclp_mem_mutex);
for (id = 0; id <= sclp_max_storage_id; id++)
if (!test_bit(id, sclp_storage_ids))
sclp_attach_storage(id);
switch (action) {
case MEM_ONLINE:
break;
case MEM_GOING_ONLINE:
rc = sclp_mem_change_state(start, size, 1);
break;
case MEM_CANCEL_ONLINE:
sclp_mem_change_state(start, size, 0);
break;
default:
rc = -EINVAL;
break;
}
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
static void __init add_memory_merged(u16 rn)
{
static u16 first_rn, num;
unsigned long long start, size;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
return;
}
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long ) num * rzm;
if (start >= VMEM_MAX_PHYS)
goto skip_add;
if (start + size > VMEM_MAX_PHYS)
size = VMEM_MAX_PHYS - start;
add_memory(0, start, size);
skip_add:
first_rn = rn;
num = 1;
}
static void __init sclp_add_standby_memory(void)
{
struct memory_increment *incr;
list_for_each_entry(incr, &sclp_mem_list, list)
if (incr->standby)
add_memory_merged(incr->rn);
add_memory_merged(0);
}
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
struct list_head *prev;
u16 last_rn;
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
if (!new_incr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
if (assigned && incr->rn > rn)
break;
if (!assigned && incr->rn - last_rn > 1)
break;
last_rn = incr->rn;
prev = &incr->list;
}
if (!assigned)
new_incr->rn = last_rn + 1;
if (new_incr->rn > rnmax) {
kfree(new_incr);
return;
}
list_add(&new_incr->list, prev);
}
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static int __init sclp_detect_standby_memory(void)
{
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
if (!early_read_info_sccb_valid)
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = do_sync_request(0x00040001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0010:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 0, 1);
}
break;
case 0x0310:
break;
case 0x0410:
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 1, 1);
}
break;
default:
rc = -EIO;
break;
}
if (!rc)
sclp_max_storage_id = sccb->max_id;
}
if (rc || list_empty(&sclp_mem_list))
goto out;
for (i = 1; i <= rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
sclp_add_standby_memory();
out:
free_page((unsigned long) sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Channel path configuration related functions.
*/

View File

@ -14,14 +14,13 @@
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
#define SCLP_CON_PRINT_HEADER "sclp console driver: "
#define sclp_console_major 4 /* TTYAUX_MAJOR */
#define sclp_console_minor 64
#define sclp_console_name "ttyS"
@ -222,8 +221,6 @@ sclp_console_init(void)
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
page = alloc_bootmem_low_pages(PAGE_SIZE);
if (page == NULL)
return -ENOMEM;
list_add_tail((struct list_head *) page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);

View File

@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/sysdev.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
@ -40,9 +41,19 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
put_online_cpus();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
static int sclp_cpu_kthread(void *data)
{
smp_rescan_cpus();
return 0;
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
/* Can't call smp_rescan_cpus() from workqueue context since it may
* deadlock in case of cpu hotplug. So we have to create a kernel
* thread in order to call it.
*/
kthread_run(sclp_cpu_kthread, NULL, "cpu_rescan");
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
@ -74,10 +85,8 @@ static int __init sclp_conf_init(void)
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
rc = sclp_register(&sclp_conf_register);
if (rc) {
printk(KERN_ERR TAG "failed to register (%d).\n", rc);
if (rc)
return rc;
}
if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
printk(KERN_WARNING TAG "no configuration management.\n");

View File

@ -27,6 +27,8 @@
#define CPI_LENGTH_NAME 8
#define CPI_LENGTH_LEVEL 16
static DEFINE_MUTEX(sclp_cpi_mutex);
struct cpi_evbuf {
struct evbuf_header header;
u8 id_format;
@ -124,21 +126,15 @@ static int cpi_req(void)
int response;
rc = sclp_register(&sclp_cpi_event);
if (rc) {
printk(KERN_WARNING "cpi: could not register "
"to hardware console.\n");
if (rc)
goto out;
}
if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
printk(KERN_WARNING "cpi: no control program "
"identification support\n");
rc = -EOPNOTSUPP;
goto out_unregister;
}
req = cpi_prepare_req();
if (IS_ERR(req)) {
printk(KERN_WARNING "cpi: could not allocate request\n");
rc = PTR_ERR(req);
goto out_unregister;
}
@ -148,10 +144,8 @@ static int cpi_req(void)
/* Add request to sclp queue */
rc = sclp_add_request(req);
if (rc) {
printk(KERN_WARNING "cpi: could not start request\n");
if (rc)
goto out_free_req;
}
wait_for_completion(&completion);
@ -223,7 +217,12 @@ static void set_string(char *attr, const char *value)
static ssize_t system_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", system_name);
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_name_store(struct kobject *kobj,
@ -237,7 +236,9 @@ static ssize_t system_name_store(struct kobject *kobj,
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
@ -248,7 +249,12 @@ static struct kobj_attribute system_name_attr =
static ssize_t sysplex_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t sysplex_name_store(struct kobject *kobj,
@ -262,7 +268,9 @@ static ssize_t sysplex_name_store(struct kobject *kobj,
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(sysplex_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
@ -273,7 +281,12 @@ static struct kobj_attribute sysplex_name_attr =
static ssize_t system_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", system_type);
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_type_store(struct kobject *kobj,
@ -287,7 +300,9 @@ static ssize_t system_type_store(struct kobject *kobj,
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_type, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
@ -298,8 +313,11 @@ static struct kobj_attribute system_type_attr =
static ssize_t system_level_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
unsigned long long level = system_level;
unsigned long long level;
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
}
@ -320,8 +338,9 @@ static ssize_t system_level_store(struct kobject *kobj,
if (*endp)
return -EINVAL;
mutex_lock(&sclp_cpi_mutex);
system_level = level;
mutex_unlock(&sclp_cpi_mutex);
return len;
}
@ -334,7 +353,9 @@ static ssize_t set_store(struct kobject *kobj,
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
if (rc)
return rc;
@ -373,12 +394,16 @@ int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, system);
set_string(sysplex_name, sysplex);
set_string(system_type, type);
system_level = level;
return cpi_req();
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
EXPORT_SYMBOL(sclp_cpi_set_data);

View File

@ -51,13 +51,7 @@ static struct sclp_register sclp_quiesce_event = {
static int __init
sclp_quiesce_init(void)
{
int rc;
rc = sclp_register(&sclp_quiesce_event);
if (rc)
printk(KERN_WARNING "sclp: could not register quiesce handler "
"(rc=%d)\n", rc);
return rc;
return sclp_register(&sclp_quiesce_event);
}
module_init(sclp_quiesce_init);

View File

@ -19,8 +19,6 @@
#include "sclp.h"
#include "sclp_rw.h"
#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
/*
* The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation)

View File

@ -239,10 +239,8 @@ int __init sclp_sdias_init(void)
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
rc = sclp_register(&sclp_sdias_register);
if (rc) {
ERROR_MSG("sclp register failed\n");
if (rc)
return rc;
}
init_waitqueue_head(&sdias_wq);
TRACE("init done\n");
return 0;

View File

@ -13,7 +13,6 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/init.h>
@ -25,8 +24,6 @@
#include "sclp_rw.h"
#include "sclp_tty.h"
#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
/*
* size of a buffer that collects single characters coming in
* via sclp_tty_put_char()
@ -50,8 +47,6 @@ static int sclp_tty_buffer_count;
static struct sclp_buffer *sclp_ttybuf;
/* Timer for delayed output of console messages. */
static struct timer_list sclp_tty_timer;
/* Waitqueue to wait for buffers to get empty. */
static wait_queue_head_t sclp_tty_waitq;
static struct tty_struct *sclp_tty;
static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
@ -59,19 +54,11 @@ static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
static struct sclp_ioctls sclp_ioctls;
static struct sclp_ioctls sclp_ioctls_init =
{
8, /* 1 hor. tab. = 8 spaces */
0, /* no echo of input by this driver */
80, /* 80 characters/line */
1, /* write after 1/10 s without final new line */
MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
0, /* do not convert to lower case */
0x6c /* to seprate upper and lower case */
/* ('%' in EBCDIC) */
};
static int sclp_tty_tolower;
static int sclp_tty_columns = 80;
#define SPACES_PER_TAB 8
#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
/* This routine is called whenever we try to open a SCLP terminal. */
static int
@ -92,136 +79,6 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
sclp_tty = NULL;
}
/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
static int
sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
unsigned long flags;
unsigned int obuf;
int check;
int rc;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
rc = 0;
check = 0;
switch (cmd) {
case TIOCSCLPSHTAB:
/* set width of horizontal tab */
if (get_user(sclp_ioctls.htab, (unsigned short __user *) arg))
rc = -EFAULT;
else
check = 1;
break;
case TIOCSCLPGHTAB:
/* get width of horizontal tab */
if (put_user(sclp_ioctls.htab, (unsigned short __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSECHO:
/* enable/disable echo of input */
if (get_user(sclp_ioctls.echo, (unsigned char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPGECHO:
/* Is echo of input enabled ? */
if (put_user(sclp_ioctls.echo, (unsigned char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSCOLS:
/* set number of columns for output */
if (get_user(sclp_ioctls.columns, (unsigned short __user *) arg))
rc = -EFAULT;
else
check = 1;
break;
case TIOCSCLPGCOLS:
/* get number of columns for output */
if (put_user(sclp_ioctls.columns, (unsigned short __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSNL:
/* enable/disable writing without final new line character */
if (get_user(sclp_ioctls.final_nl, (signed char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPGNL:
/* Is writing without final new line character enabled ? */
if (put_user(sclp_ioctls.final_nl, (signed char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSOBUF:
/*
* set the maximum buffers size for output, will be rounded
* up to next 4kB boundary and stored as number of SCCBs
* (4kB Buffers) limitation: 256 x 4kB
*/
if (get_user(obuf, (unsigned int __user *) arg) == 0) {
if (obuf & 0xFFF)
sclp_ioctls.max_sccb = (obuf >> 12) + 1;
else
sclp_ioctls.max_sccb = (obuf >> 12);
} else
rc = -EFAULT;
break;
case TIOCSCLPGOBUF:
/* get the maximum buffers size for output */
obuf = sclp_ioctls.max_sccb << 12;
if (put_user(obuf, (unsigned int __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPGKBUF:
/* get the number of buffers got from kernel at startup */
if (put_user(sclp_ioctls.kmem_sccb, (unsigned short __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSCASE:
/* enable/disable conversion from upper to lower case */
if (get_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPGCASE:
/* Is conversion from upper to lower case of input enabled? */
if (put_user(sclp_ioctls.tolower, (unsigned char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSDELIM:
/*
* set special character used for separating upper and
* lower case, 0x00 disables this feature
*/
if (get_user(sclp_ioctls.delim, (unsigned char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPGDELIM:
/*
* get special character used for separating upper and
* lower case, 0x00 disables this feature
*/
if (put_user(sclp_ioctls.delim, (unsigned char __user *) arg))
rc = -EFAULT;
break;
case TIOCSCLPSINIT:
/* set initial (default) sclp ioctls */
sclp_ioctls = sclp_ioctls_init;
check = 1;
break;
default:
rc = -ENOIOCTLCMD;
break;
}
if (check) {
spin_lock_irqsave(&sclp_tty_lock, flags);
if (sclp_ttybuf != NULL) {
sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
}
return rc;
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
@ -268,7 +125,6 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
struct sclp_buffer, list);
spin_unlock_irqrestore(&sclp_tty_lock, flags);
} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
wake_up(&sclp_tty_waitq);
/* check if the tty needs a wake up call */
if (sclp_tty != NULL) {
tty_wakeup(sclp_tty);
@ -316,37 +172,37 @@ sclp_tty_timeout(unsigned long data)
/*
* Write a string to the sclp tty.
*/
static void
sclp_tty_write_string(const unsigned char *str, int count)
static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
struct sclp_buffer *buf;
if (count <= 0)
return;
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
do {
/* Create a sclp output buffer if none exists yet */
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (in_interrupt())
sclp_sync_wait();
if (may_fail)
goto out;
else
wait_event(sclp_tty_waitq,
!list_empty(&sclp_tty_pages));
sclp_sync_wait();
spin_lock_irqsave(&sclp_tty_lock, flags);
}
page = sclp_tty_pages.next;
list_del((struct list_head *) page);
sclp_ttybuf = sclp_make_buffer(page,
sclp_ioctls.columns,
sclp_ioctls.htab);
sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
SPACES_PER_TAB);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_ttybuf, str, count);
overall_written += written;
if (written == count)
break;
/*
@ -363,27 +219,17 @@ sclp_tty_write_string(const unsigned char *str, int count)
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ioctls.final_nl) {
if (sclp_ttybuf != NULL &&
sclp_chars_in_buffer(sclp_ttybuf) != 0 &&
!timer_pending(&sclp_tty_timer)) {
init_timer(&sclp_tty_timer);
sclp_tty_timer.function = sclp_tty_timeout;
sclp_tty_timer.data = 0UL;
sclp_tty_timer.expires = jiffies + HZ/10;
add_timer(&sclp_tty_timer);
}
} else {
if (sclp_ttybuf != NULL &&
sclp_chars_in_buffer(sclp_ttybuf) != 0) {
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
__sclp_ttybuf_emit(buf);
spin_lock_irqsave(&sclp_tty_lock, flags);
}
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
init_timer(&sclp_tty_timer);
sclp_tty_timer.function = sclp_tty_timeout;
sclp_tty_timer.data = 0UL;
sclp_tty_timer.expires = jiffies + HZ/10;
add_timer(&sclp_tty_timer);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
return overall_written;
}
/*
@ -395,11 +241,10 @@ static int
sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
sclp_tty_write_string(buf, count);
return count;
return sclp_tty_write_string(buf, count, 1);
}
/*
@ -417,9 +262,10 @@ sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
sclp_tty_chars[sclp_tty_chars_count++] = ch;
if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
} return 1;
}
return 1;
}
/*
@ -430,7 +276,7 @@ static void
sclp_tty_flush_chars(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
@ -469,7 +315,7 @@ static void
sclp_tty_flush_buffer(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
@ -517,9 +363,7 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
* modifiy original string,
* returns length of resulting string
*/
static int
sclp_switch_cases(unsigned char *buf, int count,
unsigned char delim, int tolower)
static int sclp_switch_cases(unsigned char *buf, int count)
{
unsigned char *ip, *op;
int toggle;
@ -529,9 +373,9 @@ sclp_switch_cases(unsigned char *buf, int count,
ip = op = buf;
while (count-- > 0) {
/* compare with special character */
if (*ip == delim) {
if (*ip == CASE_DELIMITER) {
/* followed by another special character? */
if (count && ip[1] == delim) {
if (count && ip[1] == CASE_DELIMITER) {
/*
* ... then put a single copy of the special
* character to the output string
@ -550,7 +394,7 @@ sclp_switch_cases(unsigned char *buf, int count,
/* not the special character */
if (toggle)
/* but case switching is on */
if (tolower)
if (sclp_tty_tolower)
/* switch to uppercase */
*op++ = _ebc_toupper[(int) *ip++];
else
@ -570,30 +414,12 @@ sclp_get_input(unsigned char *start, unsigned char *end)
int count;
count = end - start;
/*
* if set in ioctl convert EBCDIC to lower case
* (modify original input in SCCB)
*/
if (sclp_ioctls.tolower)
if (sclp_tty_tolower)
EBC_TOLOWER(start, count);
/*
* if set in ioctl find out characters in lower or upper case
* (depends on current case) separated by a special character,
* works on EBCDIC
*/
if (sclp_ioctls.delim)
count = sclp_switch_cases(start, count,
sclp_ioctls.delim,
sclp_ioctls.tolower);
count = sclp_switch_cases(start, count);
/* convert EBCDIC to ASCII (modify original input in SCCB) */
sclp_ebcasc_str(start, count);
/* if set in ioctl write operators input to console */
if (sclp_ioctls.echo)
sclp_tty_write(sclp_tty, start, count);
/* transfer input to high level driver */
sclp_tty_input(start, count);
}
@ -717,7 +543,6 @@ static const struct tty_operations sclp_ops = {
.write_room = sclp_tty_write_room,
.chars_in_buffer = sclp_tty_chars_in_buffer,
.flush_buffer = sclp_tty_flush_buffer,
.ioctl = sclp_tty_ioctl,
};
static int __init
@ -736,9 +561,6 @@ sclp_tty_init(void)
rc = sclp_rw_init();
if (rc) {
printk(KERN_ERR SCLP_TTY_PRINT_HEADER
"could not register tty - "
"sclp_rw_init returned %d\n", rc);
put_tty_driver(driver);
return rc;
}
@ -754,7 +576,6 @@ sclp_tty_init(void)
}
INIT_LIST_HEAD(&sclp_tty_outqueue);
spin_lock_init(&sclp_tty_lock);
init_waitqueue_head(&sclp_tty_waitq);
init_timer(&sclp_tty_timer);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
@ -763,11 +584,10 @@ sclp_tty_init(void)
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_ioctls_init.columns = 76;
sclp_tty_columns = 76;
/* case input lines to lowercase */
sclp_ioctls_init.tolower = 1;
sclp_tty_tolower = 1;
}
sclp_ioctls = sclp_ioctls_init;
sclp_tty_chars_count = 0;
sclp_tty = NULL;
@ -792,9 +612,6 @@ sclp_tty_init(void)
tty_set_operations(driver, &sclp_ops);
rc = tty_register_driver(driver);
if (rc) {
printk(KERN_ERR SCLP_TTY_PRINT_HEADER
"could not register tty - "
"tty_register_driver returned %d\n", rc);
put_tty_driver(driver);
return rc;
}

View File

@ -11,61 +11,8 @@
#ifndef __SCLP_TTY_H__
#define __SCLP_TTY_H__
#include <linux/ioctl.h>
#include <linux/termios.h>
#include <linux/tty_driver.h>
/* This is the type of data structures storing sclp ioctl setting. */
struct sclp_ioctls {
unsigned short htab;
unsigned char echo;
unsigned short columns;
unsigned char final_nl;
unsigned short max_sccb;
unsigned short kmem_sccb; /* can't be modified at run time */
unsigned char tolower;
unsigned char delim;
};
/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
#define SCLP_IOCTL_LETTER 'B'
/* set width of horizontal tabulator */
#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
/* enable/disable echo of input (independent from line discipline) */
#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
/* set number of colums for output */
#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
/* enable/disable writing without final new line character */
#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
/* set the maximum buffers size for output, rounded up to next 4kB boundary */
#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
/* set initial (default) sclp ioctls */
#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
/* enable/disable conversion from upper to lower case of input */
#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
/* set special character used for separating upper and lower case, */
/* 0x00 disables this feature */
#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
/* get width of horizontal tabulator */
#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
/* Is echo of input enabled ? (independent from line discipline) */
#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
/* get number of colums for output */
#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
/* Is writing without final new line character enabled ? */
#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
/* get the maximum buffers size for output */
#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
/* Is conversion from upper to lower case of input enabled ? */
#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
/* get special character used for separating upper and lower case, */
/* 0x00 disables this feature */
#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
/* get the number of buffers/pages got from kernel at startup */
#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
extern struct tty_driver *sclp_tty_driver;
#endif /* __SCLP_TTY_H__ */

View File

@ -27,7 +27,6 @@
#include <asm/uaccess.h>
#include "sclp.h"
#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
#define SCLP_VT220_MAJOR TTY_MAJOR
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
@ -82,8 +81,8 @@ static struct sclp_vt220_request *sclp_vt220_current_request;
/* Number of characters in current request buffer */
static int sclp_vt220_buffered_chars;
/* Flag indicating whether this driver has already been initialized */
static int sclp_vt220_initialized = 0;
/* Counter controlling core driver initialization. */
static int __initdata sclp_vt220_init_count;
/* Flag indicating that sclp_vt220_current_request should really
* have been already queued but wasn't because the SCLP was processing
@ -609,10 +608,8 @@ sclp_vt220_flush_buffer(struct tty_struct *tty)
sclp_vt220_emit_current();
}
/*
* Initialize all relevant components and register driver with system.
*/
static void __init __sclp_vt220_cleanup(void)
/* Release allocated pages. */
static void __init __sclp_vt220_free_pages(void)
{
struct list_head *page, *p;
@ -623,21 +620,30 @@ static void __init __sclp_vt220_cleanup(void)
else
free_bootmem((unsigned long) page, PAGE_SIZE);
}
if (!list_empty(&sclp_vt220_register.list))
sclp_unregister(&sclp_vt220_register);
sclp_vt220_initialized = 0;
}
static int __init __sclp_vt220_init(void)
/* Release memory and unregister from sclp core. Controlled by init counting -
* only the last invoker will actually perform these actions. */
static void __init __sclp_vt220_cleanup(void)
{
sclp_vt220_init_count--;
if (sclp_vt220_init_count != 0)
return;
sclp_unregister(&sclp_vt220_register);
__sclp_vt220_free_pages();
}
/* Allocate buffer pages and register with sclp core. Controlled by init
* counting - only the first invoker will actually perform these actions. */
static int __init __sclp_vt220_init(int num_pages)
{
void *page;
int i;
int num_pages;
int rc;
if (sclp_vt220_initialized)
sclp_vt220_init_count++;
if (sclp_vt220_init_count != 1)
return 0;
sclp_vt220_initialized = 1;
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
@ -649,24 +655,22 @@ static int __init __sclp_vt220_init(void)
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES;
for (i = 0; i < num_pages; i++) {
if (slab_is_available())
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
else
page = alloc_bootmem_low_pages(PAGE_SIZE);
if (!page) {
__sclp_vt220_cleanup();
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
if (rc) {
printk(KERN_ERR SCLP_VT220_PRINT_HEADER
"could not register vt220 - "
"sclp_register returned %d\n", rc);
__sclp_vt220_cleanup();
__sclp_vt220_free_pages();
sclp_vt220_init_count--;
}
return rc;
}
@ -689,15 +693,13 @@ static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
int cleanup;
/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
* symmetry between VM and LPAR systems regarding ttyS1. */
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
cleanup = !sclp_vt220_initialized;
rc = __sclp_vt220_init();
rc = __sclp_vt220_init(MAX_KMEM_PAGES);
if (rc)
goto out_driver;
@ -713,18 +715,13 @@ static int __init sclp_vt220_tty_init(void)
tty_set_operations(driver, &sclp_vt220_ops);
rc = tty_register_driver(driver);
if (rc) {
printk(KERN_ERR SCLP_VT220_PRINT_HEADER
"could not register tty - "
"tty_register_driver returned %d\n", rc);
if (rc)
goto out_init;
}
sclp_vt220_driver = driver;
return 0;
out_init:
if (cleanup)
__sclp_vt220_cleanup();
__sclp_vt220_cleanup();
out_driver:
put_tty_driver(driver);
return rc;
@ -773,10 +770,9 @@ sclp_vt220_con_init(void)
{
int rc;
INIT_LIST_HEAD(&sclp_vt220_register.list);
if (!CONSOLE_IS_SCLP)
return 0;
rc = __sclp_vt220_init();
rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
if (rc)
return rc;
/* Attach linux console */

View File

@ -196,7 +196,7 @@ tape_34xx_erp_retry(struct tape_request *request)
static int
tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
{
if (irb->scsw.dstat == 0x85 /* READY */) {
if (irb->scsw.cmd.dstat == 0x85) { /* READY */
/* A medium was inserted in the drive. */
DBF_EVENT(6, "xuud med\n");
tape_34xx_delete_sbid_from(device, 0);
@ -844,22 +844,22 @@ tape_34xx_irq(struct tape_device *device, struct tape_request *request,
if (request == NULL)
return tape_34xx_unsolicited_irq(device, irb);
if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.dstat & DEV_STAT_DEV_END) &&
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
(request->op == TO_WRI)) {
/* Write at end of volume */
PRINT_INFO("End of volume\n"); /* XXX */
return tape_34xx_erp_failed(request, -ENOSPC);
}
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
return tape_34xx_unit_check(device, request, irb);
if (irb->scsw.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/*
* A unit exception occurs on skipping over a tapemark block.
*/
if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
if (request->op == TO_BSB || request->op == TO_FSB)
request->rescnt++;
else

View File

@ -837,13 +837,13 @@ tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
static int
tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
{
if (irb->scsw.dstat == DEV_STAT_CHN_END)
if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
/* Probably result of halt ssch */
return TAPE_IO_PENDING;
else if (irb->scsw.dstat == 0x85)
else if (irb->scsw.cmd.dstat == 0x85)
/* Device Ready */
DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
else if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
tape_3590_schedule_work(device, TO_READ_ATTMSG);
} else {
DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
@ -1515,18 +1515,19 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
if (request == NULL)
return tape_3590_unsolicited_irq(device, irb);
if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) {
if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
(irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
(request->op == TO_WRI)) {
/* Write at end of volume */
DBF_EVENT(2, "End of volume\n");
return tape_3590_erp_failed(device, request, irb, -ENOSPC);
}
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
return tape_3590_unit_check(device, request, irb);
if (irb->scsw.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) {
if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
if (request->op == TO_FSB || request->op == TO_BSB)
request->rescnt++;
else
@ -1536,12 +1537,12 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
return tape_3590_done(device, request);
}
if (irb->scsw.dstat & DEV_STAT_CHN_END) {
if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
DBF_EVENT(2, "cannel end\n");
return TAPE_IO_PENDING;
}
if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
DBF_EVENT(2, "Unit Attention when busy..\n");
return TAPE_IO_PENDING;
}

View File

@ -839,7 +839,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request,
PRINT_INFO("-------------------------------------------------\n");
PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
if (request != NULL)
PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
@ -867,7 +867,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
else
op = "---";
DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
irb->scsw.dstat,irb->scsw.cstat);
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
sptr = (unsigned int *) irb->ecw;
DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
@ -1083,10 +1083,11 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
* error might still apply. So we just schedule the request to be
* started later.
*/
if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
if (irb->scsw.cmd.cc != 0 &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(request->status == TAPE_REQUEST_IN_IO)) {
DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
device->cdev_id, irb->scsw.cc, irb->scsw.fctl);
device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
request->status = TAPE_REQUEST_QUEUED;
schedule_delayed_work(&device->tape_dnr, HZ);
return;
@ -1094,8 +1095,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* May be an unsolicited irq */
if(request != NULL)
request->rescnt = irb->scsw.count;
else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) &&
request->rescnt = irb->scsw.cmd.count;
else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
!list_empty(&device->req_queue)) {
/* Not Ready to Ready after long busy ? */
struct tape_request *req;
@ -1111,7 +1112,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
return;
}
}
if (irb->scsw.dstat != 0x0c) {
if (irb->scsw.cmd.dstat != 0x0c) {
/* Set the 'ONLINE' flag depending on sense byte 1 */
if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
device->tape_generic_status |= GMT_ONLINE(~0);

View File

@ -663,7 +663,7 @@ static int
tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
if (!tp->throttle)
tty3270_issue_read(tp, 0);
else
@ -671,11 +671,11 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
}
if (rq) {
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.count;
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
@ -1792,15 +1792,12 @@ static int __init tty3270_init(void)
tty_set_operations(driver, &tty3270_ops);
ret = tty_register_driver(driver);
if (ret) {
printk(KERN_ERR "tty3270 registration failed with %d\n", ret);
put_tty_driver(driver);
return ret;
}
tty3270_driver = driver;
ret = raw3270_register_notifier(tty3270_notifier);
if (ret) {
printk(KERN_ERR "tty3270 notifier registration failed "
"with %d\n", ret);
put_tty_driver(driver);
return ret;

View File

@ -61,30 +61,24 @@ static int vmcp_release(struct inode *inode, struct file *file)
static ssize_t
vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
size_t tocopy;
ssize_t ret;
size_t size;
struct vmcp_session *session;
session = (struct vmcp_session *)file->private_data;
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
mutex_unlock(&session->mutex);
return 0;
}
if (*ppos > session->resp_size) {
mutex_unlock(&session->mutex);
return 0;
}
tocopy = min(session->resp_size - (size_t) (*ppos), count);
tocopy = min(tocopy, session->bufsize - (size_t) (*ppos));
size = min_t(size_t, session->resp_size, session->bufsize);
ret = simple_read_from_buffer(buff, count, ppos,
session->response, size);
if (copy_to_user(buff, session->response + (*ppos), tocopy)) {
mutex_unlock(&session->mutex);
return -EFAULT;
}
mutex_unlock(&session->mutex);
*ppos += tocopy;
return tocopy;
return ret;
}
static ssize_t
@ -198,27 +192,23 @@ static int __init vmcp_init(void)
PRINT_WARN("z/VM CP interface is only available under z/VM\n");
return -ENODEV;
}
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug) {
PRINT_ERR("z/VM CP interface not loaded. Could not register "
"debug feature\n");
if (!vmcp_debug)
return -ENOMEM;
}
ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
if (ret) {
PRINT_ERR("z/VM CP interface not loaded. Could not register "
"debug feature view. Error code: %d\n", ret);
debug_unregister(vmcp_debug);
return ret;
}
ret = misc_register(&vmcp_dev);
if (ret) {
PRINT_ERR("z/VM CP interface not loaded. Could not register "
"misc device. Error code: %d\n", ret);
debug_unregister(vmcp_debug);
return ret;
}
PRINT_INFO("z/VM CP interface loaded\n");
return 0;
}
@ -226,7 +216,6 @@ static void __exit vmcp_exit(void)
{
misc_deregister(&vmcp_dev);
debug_unregister(vmcp_debug);
PRINT_INFO("z/VM CP interface unloaded.\n");
}
module_init(vmcp_init);

View File

@ -216,9 +216,7 @@ static int vmlogrdr_get_recording_class_AB(void)
char *tail;
int len,i;
printk (KERN_DEBUG "vmlogrdr: query command: %s\n", cp_command);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
printk (KERN_DEBUG "vmlogrdr: response: %s", cp_response);
len = strnlen(cp_response,sizeof(cp_response));
// now the parsing
tail=strnchr(cp_response,len,'=');
@ -268,11 +266,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
logptr->recording_name,
qid_string);
printk (KERN_DEBUG "vmlogrdr: recording command: %s\n",
cp_command);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
printk (KERN_DEBUG "vmlogrdr: recording response: %s",
cp_response);
}
memset(cp_command, 0x00, sizeof(cp_command));
@ -282,10 +276,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
onoff,
qid_string);
printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
printk (KERN_DEBUG "vmlogrdr: recording response: %s",
cp_response);
/* The recording command will usually answer with 'Command complete'
* on success, but when the specific service was never connected
* before then there might be an additional informational message
@ -567,10 +558,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
"RECORDING %s PURGE ",
priv->recording_name);
printk (KERN_DEBUG "vmlogrdr: recording command: %s\n", cp_command);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
printk (KERN_DEBUG "vmlogrdr: recording response: %s",
cp_response);
return count;
}
@ -682,28 +670,20 @@ static int vmlogrdr_register_driver(void)
/* Register with iucv driver */
ret = iucv_register(&vmlogrdr_iucv_handler, 1);
if (ret) {
printk (KERN_ERR "vmlogrdr: failed to register with "
"iucv driver\n");
if (ret)
goto out;
}
ret = driver_register(&vmlogrdr_driver);
if (ret) {
printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
if (ret)
goto out_iucv;
}
ret = driver_create_file(&vmlogrdr_driver,
&driver_attr_recording_status);
if (ret) {
printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
if (ret)
goto out_driver;
}
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
printk(KERN_ERR "vmlogrdr: failed to create class.\n");
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
goto out_attr;
@ -871,12 +851,10 @@ static int __init vmlogrdr_init(void)
rc = vmlogrdr_register_cdev(dev);
if (rc)
goto cleanup;
printk (KERN_INFO "vmlogrdr: driver loaded\n");
return 0;
cleanup:
vmlogrdr_cleanup();
printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
return rc;
}
@ -884,7 +862,6 @@ cleanup:
static void __exit vmlogrdr_exit(void)
{
vmlogrdr_cleanup();
printk (KERN_INFO "vmlogrdr: driver unloaded\n");
return;
}

View File

@ -277,7 +277,8 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct urdev *urd;
TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count);
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n");
@ -288,7 +289,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb);
else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
urd->io_request_rc = 0;
else
urd->io_request_rc = -EIO;

View File

@ -92,23 +92,15 @@ static int vmwdt_keepalive(void)
func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
if (ret) {
printk(KERN_WARNING "%s: problem setting interval %d, "
"cmd %s\n", __func__, vmwdt_interval,
vmwdt_cmd);
}
return ret;
}
static int vmwdt_disable(void)
{
int ret = __diag288(wdt_cancel, 0, "", 0);
if (ret) {
printk(KERN_WARNING "%s: problem disabling watchdog\n",
__func__);
}
WARN_ON(ret != 0);
return ret;
}
@ -121,10 +113,8 @@ static int __init vmwdt_probe(void)
static char __initdata ebc_begin[] = {
194, 197, 199, 201, 213
};
if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0) {
printk(KERN_INFO "z/VM watchdog not available\n");
if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
return -EINVAL;
}
return vmwdt_disable();
}

View File

@ -223,12 +223,10 @@ static int __init init_cpu_info(enum arch_id arch)
/* get info for boot cpu from lowcore, stored in the HSA */
sa = kmalloc(sizeof(*sa), GFP_KERNEL);
if (!sa) {
ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
if (!sa)
return -ENOMEM;
}
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
ERROR_MSG("could not copy from HSA\n");
TRACE("could not copy from HSA\n");
kfree(sa);
return -EIO;
}
@ -511,6 +509,8 @@ static void __init set_s390x_lc_mask(union save_area *map)
*/
static int __init sys_info_init(enum arch_id arch)
{
int rc;
switch (arch) {
case ARCH_S390X:
MSG("DETECTED 'S390X (64 bit) OS'\n");
@ -529,10 +529,9 @@ static int __init sys_info_init(enum arch_id arch)
return -EINVAL;
}
sys_info.arch = arch;
if (init_cpu_info(arch)) {
ERROR_MSG("get cpu info failed\n");
return -ENOMEM;
}
rc = init_cpu_info(arch);
if (rc)
return rc;
sys_info.mem_size = real_memory_size;
return 0;
@ -544,12 +543,12 @@ static int __init check_sdias(void)
rc = sclp_sdias_blk_count();
if (rc < 0) {
ERROR_MSG("Could not determine HSA size\n");
TRACE("Could not determine HSA size\n");
return rc;
}
act_hsa_size = (rc - 1) * PAGE_SIZE;
if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
TRACE("HSA size too small: %i\n", act_hsa_size);
return -EINVAL;
}
return 0;
@ -590,16 +589,12 @@ static int __init zcore_init(void)
goto fail;
rc = check_sdias();
if (rc) {
ERROR_MSG("Dump initialization failed\n");
if (rc)
goto fail;
}
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc) {
ERROR_MSG("sdial memcpy for arch id failed\n");
if (rc)
goto fail;
}
#ifndef __s390x__
if (arch == ARCH_S390X) {
@ -610,10 +605,8 @@ static int __init zcore_init(void)
#endif
rc = sys_info_init(arch);
if (rc) {
ERROR_MSG("arch init failed\n");
if (rc)
goto fail;
}
zcore_header_init(arch, &zcore_header);

View File

@ -2,9 +2,11 @@
# Makefile for the S/390 common i/o drivers
#
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
fcx.o itcw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
obj-$(CONFIG_QDIO) += qdio.o

View File

@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
@ -33,15 +34,15 @@ struct airq_t {
void *drv_data;
};
static union indicator_t indicators;
static struct airq_t *airqs[NR_AIRQS];
static union indicator_t indicators[MAX_ISC];
static struct airq_t *airqs[MAX_ISC][NR_AIRQS];
static int register_airq(struct airq_t *airq)
static int register_airq(struct airq_t *airq, u8 isc)
{
int i;
for (i = 0; i < NR_AIRQS; i++)
if (!cmpxchg(&airqs[i], NULL, airq))
if (!cmpxchg(&airqs[isc][i], NULL, airq))
return i;
return -ENOMEM;
}
@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq)
* s390_register_adapter_interrupt() - register adapter interrupt handler
* @handler: adapter handler to be registered
* @drv_data: driver data passed with each call to the handler
* @isc: isc for which the handler should be called
*
* Returns:
* Pointer to the indicator to be used on success
* ERR_PTR() if registration failed
*/
void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
void *drv_data)
void *drv_data, u8 isc)
{
struct airq_t *airq;
char dbf_txt[16];
int ret;
if (isc > MAX_ISC)
return ERR_PTR(-EINVAL);
airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
if (!airq) {
ret = -ENOMEM;
@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
}
airq->handler = handler;
airq->drv_data = drv_data;
ret = register_airq(airq);
if (ret < 0)
kfree(airq);
ret = register_airq(airq, isc);
out:
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
CIO_TRACE_EVENT(4, dbf_txt);
if (ret < 0)
if (ret < 0) {
kfree(airq);
return ERR_PTR(ret);
else
return &indicators.byte[ret];
} else
return &indicators[isc].byte[ret];
}
EXPORT_SYMBOL(s390_register_adapter_interrupt);
/**
* s390_unregister_adapter_interrupt - unregister adapter interrupt handler
* @ind: indicator for which the handler is to be unregistered
* @isc: interruption subclass
*/
void s390_unregister_adapter_interrupt(void *ind)
void s390_unregister_adapter_interrupt(void *ind, u8 isc)
{
struct airq_t *airq;
char dbf_txt[16];
int i;
i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]);
i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
CIO_TRACE_EVENT(4, dbf_txt);
indicators.byte[i] = 0;
airq = xchg(&airqs[i], NULL);
indicators[isc].byte[i] = 0;
airq = xchg(&airqs[isc][i], NULL);
/*
* Allow interrupts to complete. This will ensure that the airq handle
* is no longer referenced by any interrupt handler.
@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
void do_adapter_IO(void)
void do_adapter_IO(u8 isc)
{
int w;
int i;
@ -120,22 +125,22 @@ void do_adapter_IO(void)
* fetch operations.
*/
for (w = 0; w < NR_AIRQ_WORDS; w++) {
word = indicators.word[w];
word = indicators[isc].word[w];
i = w * NR_AIRQS_PER_WORD;
/*
* Check bytes within word for active indicators.
*/
while (word) {
if (word & INDICATOR_MASK) {
airq = airqs[i];
airq = airqs[isc][i];
if (likely(airq))
airq->handler(&indicators.byte[i],
airq->handler(&indicators[isc].byte[i],
airq->drv_data);
else
/*
* Reset ill-behaved indicator.
*/
indicators.byte[i] = 0;
indicators[isc].byte[i] = 0;
}
word <<= 8;
i++;

View File

@ -18,6 +18,7 @@
#include <asm/chpid.h>
#include <asm/sclp.h>
#include "../s390mach.h"
#include "cio.h"
#include "css.h"
#include "ioasm.h"
@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
}
return opm;
}
EXPORT_SYMBOL_GPL(chp_get_sch_opm);
/**
* chp_is_registered - check if a channel-path is registered
@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
CIO_TRACE_EVENT(2, dbf_text);
status = chp_get_status(chpid);
if (!on && !status) {
printk(KERN_ERR "cio: chpid %x.%02x is already offline\n",
chpid.cssid, chpid.id);
return -EINVAL;
}
if (!on && !status)
return 0;
set_chp_logically_online(chpid, on);
chsc_chp_vary(chpid, on);
@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
{
struct channel_path *chp;
struct device *device;
unsigned int size;
device = container_of(kobj, struct device, kobj);
chp = to_channelpath(device);
if (!chp->cmg_chars)
return 0;
size = sizeof(struct cmg_chars);
if (off > size)
return 0;
if (off + count > size)
count = size - off;
memcpy(buf, chp->cmg_chars + off, count);
return count;
return memory_read_from_buffer(buf, count, &off,
chp->cmg_chars, sizeof(struct cmg_chars));
}
static struct bin_attribute chp_measurement_chars_attr = {
@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid)
chpid.id);
/* Obtain channel path description and fill it in. */
ret = chsc_determine_channel_path_description(chpid, &chp->desc);
ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid)
goto out_free;
}
/* Get channel-measurement characteristics. */
if (css_characteristics_avail && css_chsc_characteristics.scmc
&& css_chsc_characteristics.secm) {
if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
ret = chsc_get_channel_measurement_chars(chp);
if (ret)
goto out_free;
@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid)
/**
* chp_process_crw - process channel-path status change
* @id: channel-path ID number
* @status: non-zero if channel-path has become available, zero otherwise
* @crw0: channel report-word to handler
* @crw1: second channel-report word (always NULL)
* @overflow: crw overflow indication
*
* Handle channel-report-words indicating that the status of a channel-path
* has changed.
*/
void chp_process_crw(int id, int status)
static void chp_process_crw(struct crw *crw0, struct crw *crw1,
int overflow)
{
struct chp_id chpid;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
/*
* Check for solicited machine checks. These are
* created by reset channel path and need not be
* handled here.
*/
if (crw0->slct) {
CIO_CRW_EVENT(2, "solicited machine check for "
"channel path %02X\n", crw0->rsid);
return;
}
chp_id_init(&chpid);
chpid.id = id;
if (status) {
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
if (!chp_is_registered(chpid))
chp_new(chpid);
chsc_chp_online(chpid);
} else
break;
case CRW_ERC_PERRI: /* Path has gone. */
case CRW_ERC_PERRN:
chsc_chp_offline(chpid);
break;
default:
CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
crw0->erc);
}
}
int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (!(ssd->path_mask & mask))
continue;
if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
continue;
if ((ssd->fla_valid_mask & mask) &&
((ssd->fla[i] & link->fla_mask) != link->fla))
continue;
return mask;
}
return 0;
}
EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
static inline int info_bit_num(struct chp_id id)
{
return id.id + id.cssid * (__MAX_CHPID + 1);
@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work)
{
struct chp_id chpid;
enum cfg_task_t t;
int rc;
mutex_lock(&cfg_lock);
t = cfg_none;
@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work)
switch (t) {
case cfg_configure:
sclp_chp_configure(chpid);
info_expire();
chsc_chp_online(chpid);
rc = sclp_chp_configure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_online(chpid);
}
break;
case cfg_deconfigure:
sclp_chp_deconfigure(chpid);
info_expire();
chsc_chp_offline(chpid);
rc = sclp_chp_deconfigure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_offline(chpid);
}
break;
case cfg_none:
/* Get updated information after last change. */
@ -654,10 +704,16 @@ static int cfg_wait_idle(void)
static int __init chp_init(void)
{
struct chp_id chpid;
int ret;
ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
chp_wq = create_singlethread_workqueue("cio_chp");
if (!chp_wq)
if (!chp_wq) {
s390_unregister_crw_handler(CRW_RSC_CPATH);
return -ENOMEM;
}
INIT_WORK(&cfg_work, cfg_func);
init_waitqueue_head(&cfg_wait_queue);
if (info_update())

View File

@ -12,12 +12,24 @@
#include <linux/device.h>
#include <asm/chpid.h>
#include "chsc.h"
#include "css.h"
#define CHP_STATUS_STANDBY 0
#define CHP_STATUS_CONFIGURED 1
#define CHP_STATUS_RESERVED 2
#define CHP_STATUS_NOT_RECOGNIZED 3
#define CHP_ONLINE 0
#define CHP_OFFLINE 1
#define CHP_VARY_ON 2
#define CHP_VARY_OFF 3
struct chp_link {
struct chp_id chpid;
u32 fla_mask;
u16 fla;
};
static inline int chp_test_bit(u8 *bitmap, int num)
{
int byte = num >> 3;
@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid);
u8 chp_get_sch_opm(struct subchannel *sch);
int chp_is_registered(struct chp_id chpid);
void *chp_get_chp_desc(struct chp_id chpid);
void chp_process_crw(int id, int available);
void chp_remove_cmg_attr(struct channel_path *chp);
int chp_add_cmg_attr(struct channel_path *chp);
int chp_new(struct chp_id chpid);
void chp_cfg_schedule(struct chp_id chpid, int configure);
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
int chp_info_get_status(struct chp_id chpid);
int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
#endif /* S390_CHP_H */

View File

@ -2,8 +2,7 @@
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Copyright IBM Corp. 1999,2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@ -16,7 +15,9 @@
#include <asm/cio.h>
#include <asm/chpid.h>
#include <asm/chsc.h>
#include "../s390mach.h"
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
@ -127,77 +128,12 @@ out_free:
return ret;
}
static int check_for_io_on_path(struct subchannel *sch, int mask)
{
int cc;
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
return 1;
return 0;
}
static void terminate_internal_io(struct subchannel *sch)
{
if (cio_clear(sch)) {
/* Recheck device in case clear failed. */
sch->lpm = 0;
if (device_trigger_verify(sch) != 0)
css_schedule_eval(sch->schid);
return;
}
/* Request retry of internal operation. */
device_set_intretry(sch);
/* Call handler. */
if (sch->driver && sch->driver->termination)
sch->driver->termination(sch);
}
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
int j;
int mask;
struct chp_id *chpid = data;
struct schib schib;
for (j = 0; j < 8; j++) {
mask = 0x80 >> j;
if ((sch->schib.pmcw.pim & mask) &&
(sch->schib.pmcw.chpid[j] == chpid->id))
break;
}
if (j >= 8)
return 0;
spin_lock_irq(sch->lock);
stsch(sch->schid, &schib);
if (!css_sch_is_valid(&schib))
goto out_unreg;
memcpy(&sch->schib, &schib, sizeof(struct schib));
/* Check for single path devices. */
if (sch->schib.pmcw.pim == 0x80)
goto out_unreg;
if (check_for_io_on_path(sch, mask)) {
if (device_is_online(sch))
device_kill_io(sch);
else {
terminate_internal_io(sch);
/* Re-start path verification. */
if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
}
} else {
/* trigger path verification. */
if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
else if (sch->lpm == mask)
if (sch->driver && sch->driver->chp_event)
if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
goto out_unreg;
}
spin_unlock_irq(sch->lock);
return 0;
@ -211,15 +147,18 @@ out_unreg:
void chsc_chp_offline(struct chp_id chpid)
{
char dbf_txt[15];
struct chp_link link;
sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) <= 0)
return;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
}
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
return 0;
}
struct res_acc_data {
struct chp_id chpid;
u32 fla_mask;
u16 fla;
};
static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
struct res_acc_data *data)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (!(ssd->path_mask & mask))
continue;
if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
continue;
if ((ssd->fla_valid_mask & mask) &&
((ssd->fla[i] & data->fla_mask) != data->fla))
continue;
return mask;
}
return 0;
}
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
int chp_mask, old_lpm;
struct res_acc_data *res_data = data;
spin_lock_irq(sch->lock);
chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
if (chp_mask == 0)
goto out;
if (stsch(sch->schid, &sch->schib))
goto out;
old_lpm = sch->lpm;
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| chp_mask) & sch->opm;
if (!old_lpm && sch->lpm)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
out:
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, data, CHP_ONLINE);
spin_unlock_irq(sch->lock);
return 0;
}
static void s390_process_res_acc (struct res_acc_data *res_data)
static void s390_process_res_acc(struct chp_link *link)
{
char dbf_txt[15];
sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
res_data->chpid.id);
sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
link->chpid.id);
CIO_TRACE_EVENT( 2, dbf_txt);
if (res_data->fla != 0) {
sprintf(dbf_txt, "fla%x", res_data->fla);
if (link->fla != 0) {
sprintf(dbf_txt, "fla%x", link->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
}
/* Wait until previous actions have settled. */
@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
* will we have to do.
*/
for_each_subchannel_staged(__s390_process_res_acc,
s390_process_res_acc_new_sch, res_data);
s390_process_res_acc_new_sch, link);
}
static int
@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
{
struct res_acc_data res_data;
struct chp_link link;
struct chp_id chpid;
int status;
@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
chp_new(chpid);
else if (!status)
return;
memset(&res_data, 0, sizeof(struct res_acc_data));
res_data.chpid = chpid;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
res_data.fla = sei_area->fla;
link.fla = sei_area->fla;
if ((sei_area->vf & 0xc0) == 0xc0)
/* full link address */
res_data.fla_mask = 0xffff;
link.fla_mask = 0xffff;
else
/* link address */
res_data.fla_mask = 0xff00;
link.fla_mask = 0xff00;
}
s390_process_res_acc(&res_data);
s390_process_res_acc(&link);
}
struct chp_config_data {
@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
}
}
void chsc_process_crw(void)
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct chsc_sei_area *sei_area;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
if (!sei_page)
return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
sei_area = sei_page;
CIO_TRACE_EVENT( 2, "prcss");
CIO_TRACE_EVENT(2, "prcss");
do {
memset(sei_area, 0, sizeof(*sei_area));
sei_area->request.length = 0x0010;
@ -509,114 +414,36 @@ void chsc_process_crw(void)
} while (sei_area->flags & 0x80);
}
static int __chp_add_new_sch(struct subchannel_id schid, void *data)
{
struct schib schib;
if (stsch_err(schid, &schib))
/* We're through */
return -ENXIO;
/* Put it on the slow path. */
css_schedule_eval(schid);
return 0;
}
static int __chp_add(struct subchannel *sch, void *data)
{
int i, mask;
struct chp_id *chpid = data;
spin_lock_irq(sch->lock);
for (i=0; i<8; i++) {
mask = 0x80 >> i;
if ((sch->schib.pmcw.pim & mask) &&
(sch->schib.pmcw.chpid[i] == chpid->id))
break;
}
if (i==8) {
spin_unlock_irq(sch->lock);
return 0;
}
if (stsch(sch->schid, &sch->schib)) {
spin_unlock_irq(sch->lock);
css_schedule_eval(sch->schid);
return 0;
}
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| mask) & sch->opm;
if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
spin_unlock_irq(sch->lock);
return 0;
}
void chsc_chp_online(struct chp_id chpid)
{
char dbf_txt[15];
struct chp_link link;
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) != 0) {
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
&chpid);
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
}
}
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
struct chp_id chpid, int on)
{
int chp, old_lpm;
int mask;
unsigned long flags;
struct chp_link link;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) {
mask = 0x80 >> chp;
if (!(sch->ssd_info.path_mask & mask))
continue;
if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
continue;
if (on) {
sch->opm |= mask;
sch->lpm |= mask;
if (!old_lpm)
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
break;
}
sch->opm &= ~mask;
sch->lpm &= ~mask;
if (check_for_io_on_path(sch, mask)) {
if (device_is_online(sch))
/* Path verification is done after killing. */
device_kill_io(sch);
else {
/* Kill and retry internal I/O. */
terminate_internal_io(sch);
/* Re-start path verification. */
if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
}
} else if (!sch->lpm) {
if (device_trigger_verify(sch) != 0)
css_schedule_eval(sch->schid);
} else if (sch->driver && sch->driver->verify)
sch->driver->verify(sch);
break;
}
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, &link,
on ? CHP_VARY_ON : CHP_VARY_OFF);
spin_unlock_irqrestore(sch->lock, flags);
}
@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct chp_link link;
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
if (on)
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
__s390_vary_chpid_on, &chpid);
__s390_vary_chpid_on, &link);
else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &chpid);
NULL, &link);
return 0;
}
@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable)
return ret;
}
int chsc_determine_channel_path_description(struct chp_id chpid,
struct channel_path_desc *desc)
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
int c, int m,
struct chsc_response_struct *resp)
{
int ccode, ret;
struct {
struct chsc_header request;
u32 : 24;
u32 : 2;
u32 m : 1;
u32 c : 1;
u32 fmt : 4;
u32 cssid : 8;
u32 : 4;
u32 rfmt : 4;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 zeroes1;
struct chsc_header response;
u32 zeroes2;
struct channel_path_desc desc;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpd_area;
if ((rfmt == 1) && !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scpd_area)
return -ENOMEM;
@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
scpd_area->request.length = 0x0010;
scpd_area->request.code = 0x0002;
scpd_area->cssid = chpid.cssid;
scpd_area->first_chpid = chpid.id;
scpd_area->last_chpid = chpid.id;
scpd_area->m = m;
scpd_area->c = c;
scpd_area->fmt = fmt;
scpd_area->rfmt = rfmt;
ccode = chsc(scpd_area);
if (ccode > 0) {
@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
ret = chsc_error_from_response(scpd_area->response.code);
if (ret == 0)
/* Success. */
memcpy(desc, &scpd_area->desc,
sizeof(struct channel_path_desc));
memcpy(resp, &scpd_area->response, scpd_area->response.length);
else
CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
@ -842,6 +687,25 @@ out:
free_page((unsigned long)scpd_area);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
struct chsc_response_struct *chsc_resp;
int ret;
chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
if (!chsc_resp)
return -ENOMEM;
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
if (ret)
goto out_free;
memcpy(desc, &chsc_resp->data, chsc_resp->length);
out_free:
kfree(chsc_resp);
return ret;
}
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
@ -937,15 +801,23 @@ out:
int __init chsc_alloc_sei_area(void)
{
int ret;
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sei_page)
if (!sei_page) {
CIO_MSG_EVENT(0, "Can't allocate page for processing of "
"chsc machine checks!\n");
return (sei_page ? 0 : -ENOMEM);
return -ENOMEM;
}
ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
if (ret)
kfree(sei_page);
return ret;
}
void __init chsc_free_sei_area(void)
{
s390_unregister_crw_handler(CRW_RSC_CSS);
kfree(sei_page);
}
@ -1043,3 +915,52 @@ exit:
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
struct {
struct chsc_header request;
unsigned int rsvd0;
unsigned int op : 8;
unsigned int rsvd1 : 8;
unsigned int ctrl : 16;
unsigned int rsvd2[5];
struct chsc_header response;
unsigned int rsvd3[7];
} __attribute__ ((packed)) *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0020;
rr->request.code = 0x0033;
rr->op = op;
rr->ctrl = ctrl;
rc = chsc(rr);
if (rc)
return -EIO;
rc = (rr->response.code == 0x0001) ? 0 : -EIO;
return rc;
}
int chsc_sstpi(void *page, void *result, size_t size)
{
struct {
struct chsc_header request;
unsigned int rsvd0[3];
struct chsc_header response;
char data[size];
} __attribute__ ((packed)) *rr;
int rc;
memset(page, 0, PAGE_SIZE);
rr = page;
rr->request.length = 0x0010;
rr->request.code = 0x0038;
rc = chsc(rr);
if (rc)
return -EIO;
memcpy(result, &rr->data, size);
return (rr->response.code == 0x0001) ? 0 : -EIO;
}

View File

@ -4,7 +4,8 @@
#include <linux/types.h>
#include <linux/device.h>
#include <asm/chpid.h>
#include "schid.h"
#include <asm/chsc.h>
#include <asm/schid.h>
#define CHSC_SDA_OC_MSS 0x2
@ -36,14 +37,15 @@ struct channel_path_desc {
struct channel_path;
extern void chsc_process_crw(void);
struct css_general_char {
u64 : 41;
u64 : 12;
u32 dynio : 1; /* bit 12 */
u32 : 28;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 : 2;
u32 fcs : 1; /* bit 46 */
u32 : 1;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
@ -51,7 +53,11 @@ struct css_general_char {
u32 qebsm : 1; /* bit 58 */
u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 28;
u32 : 14;
u32 cib : 1; /* bit 82 */
u32 : 5;
u32 fcx : 1; /* bit 88 */
u32 : 7;
}__attribute__((packed));
struct css_chsc_char {
@ -78,7 +84,6 @@ struct chsc_ssd_info {
extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
extern int chsc_alloc_sei_area(void);
extern void chsc_free_sei_area(void);
@ -87,8 +92,11 @@ struct channel_subsystem;
extern int chsc_secm(struct channel_subsystem *, int);
int chsc_chp_vary(struct chp_id chpid, int on);
int chsc_determine_channel_path_description(struct chp_id chpid,
struct channel_path_desc *desc);
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
int c, int m,
struct chsc_response_struct *resp);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc);
void chsc_chp_online(struct chp_id chpid);
void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);

820
drivers/s390/cio/chsc_sch.c Normal file
View File

@ -0,0 +1,820 @@
/*
* Driver for s390 chsc subchannels
*
* Copyright IBM Corp. 2008
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "chsc_sch.h"
#include "ioasm.h"
static debug_info_t *chsc_debug_msg_id;
static debug_info_t *chsc_debug_log_id;
#define CHSC_MSG(imp, args...) do { \
debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
} while (0)
#define CHSC_LOG(imp, txt) do { \
debug_text_event(chsc_debug_log_id, imp , txt); \
} while (0)
static void CHSC_LOG_HEX(int level, void *data, int length)
{
while (length > 0) {
debug_event(chsc_debug_log_id, level, data, length);
length -= chsc_debug_log_id->buf_size;
data += chsc_debug_log_id->buf_size;
}
}
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("driver for s390 chsc subchannels");
MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
struct chsc_private *private = sch->private;
struct chsc_request *request = private->request;
struct irb *irb = (struct irb *)__LC_IRB;
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
/* Copy irb to provided request and set done. */
if (!request) {
CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
sch->schid.ssid, sch->schid.sch_no);
return;
}
private->request = NULL;
memcpy(&request->irb, irb, sizeof(*irb));
stsch(sch->schid, &sch->schib);
complete(&request->completion);
put_device(&sch->dev);
}
static int chsc_subchannel_probe(struct subchannel *sch)
{
struct chsc_private *private;
int ret;
CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
sch->schid.ssid, sch->schid.sch_no);
sch->isc = CHSC_SCH_ISC;
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
kfree(private);
} else {
sch->private = private;
if (sch->dev.uevent_suppress) {
sch->dev.uevent_suppress = 0;
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
}
return ret;
}
static int chsc_subchannel_remove(struct subchannel *sch)
{
struct chsc_private *private;
cio_disable_subchannel(sch);
private = sch->private;
sch->private = NULL;
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
}
kfree(private);
return 0;
}
static void chsc_subchannel_shutdown(struct subchannel *sch)
{
cio_disable_subchannel(sch);
}
static struct css_device_id chsc_subchannel_ids[] = {
{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
.owner = THIS_MODULE,
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
.remove = chsc_subchannel_remove,
.shutdown = chsc_subchannel_shutdown,
.name = "chsc_subchannel",
};
static int __init chsc_init_dbfs(void)
{
chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
16 * sizeof(long));
if (!chsc_debug_msg_id)
goto out;
debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
debug_set_level(chsc_debug_msg_id, 2);
chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
if (!chsc_debug_log_id)
goto out;
debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
debug_set_level(chsc_debug_log_id, 2);
return 0;
out:
if (chsc_debug_msg_id)
debug_unregister(chsc_debug_msg_id);
return -ENOMEM;
}
static void chsc_remove_dbfs(void)
{
debug_unregister(chsc_debug_log_id);
debug_unregister(chsc_debug_msg_id);
}
static int __init chsc_init_sch_driver(void)
{
return css_driver_register(&chsc_subchannel_driver);
}
static void chsc_cleanup_sch_driver(void)
{
css_driver_unregister(&chsc_subchannel_driver);
}
static DEFINE_SPINLOCK(chsc_lock);
static int chsc_subchannel_match_next_free(struct device *dev, void *data)
{
struct subchannel *sch = to_subchannel(dev);
return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
}
static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
{
struct device *dev;
dev = driver_find_device(&chsc_subchannel_driver.drv,
sch ? &sch->dev : NULL, NULL,
chsc_subchannel_match_next_free);
return dev ? to_subchannel(dev) : NULL;
}
/**
* chsc_async() - try to start a chsc request asynchronously
* @chsc_area: request to be started
* @request: request structure to associate
*
* Tries to start a chsc request on one of the existing chsc subchannels.
* Returns:
* %0 if the request was performed synchronously
* %-EINPROGRESS if the request was successfully started
* %-EBUSY if all chsc subchannels are busy
* %-ENODEV if no chsc subchannels are available
* Context:
* interrupts disabled, chsc_lock held
*/
static int chsc_async(struct chsc_async_area *chsc_area,
struct chsc_request *request)
{
int cc;
struct chsc_private *private;
struct subchannel *sch = NULL;
int ret = -ENODEV;
char dbf[10];
chsc_area->header.key = PAGE_DEFAULT_KEY;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
private = sch->private;
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
continue;
}
chsc_area->header.sid = sch->schid;
CHSC_LOG(2, "schid");
CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
cc = chsc(chsc_area);
sprintf(dbf, "cc:%d", cc);
CHSC_LOG(2, dbf);
switch (cc) {
case 0:
ret = 0;
break;
case 1:
sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
ret = -EINPROGRESS;
private->request = request;
break;
case 2:
ret = -EBUSY;
break;
default:
ret = -ENODEV;
}
spin_unlock(sch->lock);
CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
sch->schid.ssid, sch->schid.sch_no, cc);
if (ret == -EINPROGRESS)
return -EINPROGRESS;
put_device(&sch->dev);
if (ret == 0)
return 0;
}
return ret;
}
static void chsc_log_command(struct chsc_async_area *chsc_area)
{
char dbf[10];
sprintf(dbf, "CHSC:%x", chsc_area->header.code);
CHSC_LOG(0, dbf);
CHSC_LOG_HEX(0, chsc_area, 32);
}
static int chsc_examine_irb(struct chsc_request *request)
{
int backed_up;
if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)
return -EIO;
backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
if (scsw_cstat(&request->irb.scsw) == 0)
return 0;
if (!backed_up)
return 0;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
return -EIO;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
return -EPERM;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
return -EAGAIN;
if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
return -EAGAIN;
return -EIO;
}
static int chsc_ioctl_start(void __user *user_area)
{
struct chsc_request *request;
struct chsc_async_area *chsc_area;
int ret;
char dbf[10];
if (!css_general_characteristics.dynio)
/* It makes no sense to try. */
return -EOPNOTSUPP;
chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request) {
ret = -ENOMEM;
goto out_free;
}
init_completion(&request->completion);
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
ret = -EFAULT;
goto out_free;
}
chsc_log_command(chsc_area);
spin_lock_irq(&chsc_lock);
ret = chsc_async(chsc_area, request);
spin_unlock_irq(&chsc_lock);
if (ret == -EINPROGRESS) {
wait_for_completion(&request->completion);
ret = chsc_examine_irb(request);
}
/* copy area back to user */
if (!ret)
if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
ret = -EFAULT;
out_free:
sprintf(dbf, "ret:%d", ret);
CHSC_LOG(0, dbf);
kfree(request);
free_page((unsigned long)chsc_area);
return ret;
}
static int chsc_ioctl_info_channel_path(void __user *user_cd)
{
struct chsc_chp_cd *cd;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 8;
u32 first_chpid : 8;
u32 : 24;
u32 last_chpid : 8;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpcd_area;
scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scpcd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cd, user_cd, sizeof(*cd))) {
ret = -EFAULT;
goto out_free;
}
scpcd_area->request.length = 0x0010;
scpcd_area->request.code = 0x0028;
scpcd_area->m = cd->m;
scpcd_area->fmt1 = cd->fmt;
scpcd_area->cssid = cd->chpid.cssid;
scpcd_area->first_chpid = cd->chpid.id;
scpcd_area->last_chpid = cd->chpid.id;
ccode = chsc(scpcd_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (scpcd_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "scpcd: response code=%x\n",
scpcd_area->response.code);
goto out_free;
}
memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
if (copy_to_user(user_cd, cd, sizeof(*cd)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cd);
free_page((unsigned long)scpcd_area);
return ret;
}
static int chsc_ioctl_info_cu(void __user *user_cd)
{
struct chsc_cu_cd *cd;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 8;
u32 first_cun : 8;
u32 : 24;
u32 last_cun : 8;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scucd_area;
scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scucd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cd, user_cd, sizeof(*cd))) {
ret = -EFAULT;
goto out_free;
}
scucd_area->request.length = 0x0010;
scucd_area->request.code = 0x0028;
scucd_area->m = cd->m;
scucd_area->fmt1 = cd->fmt;
scucd_area->cssid = cd->cssid;
scucd_area->first_cun = cd->cun;
scucd_area->last_cun = cd->cun;
ccode = chsc(scucd_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (scucd_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "scucd: response code=%x\n",
scucd_area->response.code);
goto out_free;
}
memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
if (copy_to_user(user_cd, cd, sizeof(*cd)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cd);
free_page((unsigned long)scucd_area);
return ret;
}
static int chsc_ioctl_info_sch_cu(void __user *user_cud)
{
struct chsc_sch_cud *cud;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 5;
u32 fmt1 : 4;
u32 : 2;
u32 ssid : 2;
u32 first_sch : 16;
u32 : 8;
u32 cssid : 8;
u32 last_sch : 16;
u32 : 32;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sscud_area;
sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sscud_area)
return -ENOMEM;
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
if (!cud) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(cud, user_cud, sizeof(*cud))) {
ret = -EFAULT;
goto out_free;
}
sscud_area->request.length = 0x0010;
sscud_area->request.code = 0x0006;
sscud_area->m = cud->schid.m;
sscud_area->fmt1 = cud->fmt;
sscud_area->ssid = cud->schid.ssid;
sscud_area->first_sch = cud->schid.sch_no;
sscud_area->cssid = cud->schid.cssid;
sscud_area->last_sch = cud->schid.sch_no;
ccode = chsc(sscud_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sscud_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sscud: response code=%x\n",
sscud_area->response.code);
goto out_free;
}
memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
if (copy_to_user(user_cud, cud, sizeof(*cud)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(cud);
free_page((unsigned long)sscud_area);
return ret;
}
static int chsc_ioctl_conf_info(void __user *user_ci)
{
struct chsc_conf_info *ci;
int ret, ccode;
struct {
struct chsc_header request;
u32 : 2;
u32 m : 1;
u32 : 1;
u32 fmt1 : 4;
u32 cssid : 8;
u32 : 6;
u32 ssid : 2;
u32 : 8;
u64 : 64;
struct chsc_header response;
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sci_area;
sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sci_area)
return -ENOMEM;
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
if (!ci) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(ci, user_ci, sizeof(*ci))) {
ret = -EFAULT;
goto out_free;
}
sci_area->request.length = 0x0010;
sci_area->request.code = 0x0012;
sci_area->m = ci->id.m;
sci_area->fmt1 = ci->fmt;
sci_area->cssid = ci->id.cssid;
sci_area->ssid = ci->id.ssid;
ccode = chsc(sci_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sci_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sci: response code=%x\n",
sci_area->response.code);
goto out_free;
}
memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
if (copy_to_user(user_ci, ci, sizeof(*ci)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(ci);
free_page((unsigned long)sci_area);
return ret;
}
static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
{
struct chsc_comp_list *ccl;
int ret, ccode;
struct {
struct chsc_header request;
u32 ctype : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
u64 : 64;
u32 list_parm[2];
u64 : 64;
struct chsc_header response;
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sccl_area;
struct {
u32 m : 1;
u32 : 31;
u32 cssid : 8;
u32 : 16;
u32 chpid : 8;
} __attribute__ ((packed)) *chpid_parm;
struct {
u32 f_cssid : 8;
u32 l_cssid : 8;
u32 : 16;
u32 res;
} __attribute__ ((packed)) *cssids_parm;
sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccl_area)
return -ENOMEM;
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
if (!ccl) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
ret = -EFAULT;
goto out_free;
}
sccl_area->request.length = 0x0020;
sccl_area->request.code = 0x0030;
sccl_area->fmt = ccl->req.fmt;
sccl_area->ctype = ccl->req.ctype;
switch (sccl_area->ctype) {
case CCL_CU_ON_CHP:
case CCL_IOP_CHP:
chpid_parm = (void *)&sccl_area->list_parm;
chpid_parm->m = ccl->req.chpid.m;
chpid_parm->cssid = ccl->req.chpid.chp.cssid;
chpid_parm->chpid = ccl->req.chpid.chp.id;
break;
case CCL_CSS_IMG:
case CCL_CSS_IMG_CONF_CHAR:
cssids_parm = (void *)&sccl_area->list_parm;
cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
break;
}
ccode = chsc(sccl_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sccl_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sccl: response code=%x\n",
sccl_area->response.code);
goto out_free;
}
memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(ccl);
free_page((unsigned long)sccl_area);
return ret;
}
static int chsc_ioctl_chpd(void __user *user_chpd)
{
struct chsc_cpd_info *chpd;
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
if (!chpd)
return -ENOMEM;
if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
ret = -EFAULT;
goto out_free;
}
ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
chpd->rfmt, chpd->c, chpd->m,
&chpd->chpdb);
if (ret)
goto out_free;
if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
ret = -EFAULT;
out_free:
kfree(chpd);
return ret;
}
static int chsc_ioctl_dcal(void __user *user_dcal)
{
struct chsc_dcal *dcal;
int ret, ccode;
struct {
struct chsc_header request;
u32 atype : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
u32 res0[2];
u32 list_parm[2];
u32 res1[2];
struct chsc_header response;
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sdcal_area;
sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sdcal_area)
return -ENOMEM;
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
if (!dcal) {
ret = -ENOMEM;
goto out_free;
}
if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
ret = -EFAULT;
goto out_free;
}
sdcal_area->request.length = 0x0020;
sdcal_area->request.code = 0x0034;
sdcal_area->atype = dcal->req.atype;
sdcal_area->fmt = dcal->req.fmt;
memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
sizeof(sdcal_area->list_parm));
ccode = chsc(sdcal_area);
if (ccode != 0) {
ret = -EIO;
goto out_free;
}
if (sdcal_area->response.code != 0x0001) {
ret = -EIO;
CHSC_MSG(0, "sdcal: response code=%x\n",
sdcal_area->response.code);
goto out_free;
}
memcpy(&dcal->sdcal, &sdcal_area->response,
sdcal_area->response.length);
if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
ret = -EFAULT;
else
ret = 0;
out_free:
kfree(dcal);
free_page((unsigned long)sdcal_area);
return ret;
}
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
switch (cmd) {
case CHSC_START:
return chsc_ioctl_start((void __user *)arg);
case CHSC_INFO_CHANNEL_PATH:
return chsc_ioctl_info_channel_path((void __user *)arg);
case CHSC_INFO_CU:
return chsc_ioctl_info_cu((void __user *)arg);
case CHSC_INFO_SCH_CU:
return chsc_ioctl_info_sch_cu((void __user *)arg);
case CHSC_INFO_CI:
return chsc_ioctl_conf_info((void __user *)arg);
case CHSC_INFO_CCL:
return chsc_ioctl_conf_comp_list((void __user *)arg);
case CHSC_INFO_CPD:
return chsc_ioctl_chpd((void __user *)arg);
case CHSC_INFO_DCAL:
return chsc_ioctl_dcal((void __user *)arg);
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
}
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
};
static struct miscdevice chsc_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "chsc",
.fops = &chsc_fops,
};
static int __init chsc_misc_init(void)
{
return misc_register(&chsc_misc_device);
}
static void chsc_misc_cleanup(void)
{
misc_deregister(&chsc_misc_device);
}
static int __init chsc_sch_init(void)
{
int ret;
ret = chsc_init_dbfs();
if (ret)
return ret;
isc_register(CHSC_SCH_ISC);
ret = chsc_init_sch_driver();
if (ret)
goto out_dbf;
ret = chsc_misc_init();
if (ret)
goto out_driver;
return ret;
out_driver:
chsc_cleanup_sch_driver();
out_dbf:
isc_unregister(CHSC_SCH_ISC);
chsc_remove_dbfs();
return ret;
}
static void __exit chsc_sch_exit(void)
{
chsc_misc_cleanup();
chsc_cleanup_sch_driver();
isc_unregister(CHSC_SCH_ISC);
chsc_remove_dbfs();
}
module_init(chsc_sch_init);
module_exit(chsc_sch_exit);

View File

@ -0,0 +1,13 @@
#ifndef _CHSC_SCH_H
#define _CHSC_SCH_H
struct chsc_request {
struct completion completion;
struct irb irb;
};
struct chsc_private {
struct chsc_request *request;
};
#endif

View File

@ -2,7 +2,7 @@
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
*
* Copyright (C) IBM Corp. 1999,2006
* Copyright IBM Corp. 1999,2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@ -24,7 +24,9 @@
#include <asm/ipl.h>
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include <asm/cpu.h>
#include <asm/fcx.h>
#include "cio.h"
#include "css.h"
#include "chsc.h"
@ -72,7 +74,6 @@ out_unregister:
debug_unregister(cio_debug_trace_id);
if (cio_debug_crw_id)
debug_unregister(cio_debug_crw_id);
printk(KERN_WARNING"cio: could not initialize debugging\n");
return -1;
}
@ -128,7 +129,7 @@ cio_tpi(void)
local_bh_disable();
irq_enter ();
spin_lock(sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
spin_unlock(sch->lock);
@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
{
char dbf_txt[15];
int ccode;
struct orb *orb;
union orb *orb;
CIO_TRACE_EVENT(4, "stIO");
CIO_TRACE_EVENT(4, sch->dev.bus_id);
orb = &to_io_private(sch)->orb;
/* sch is always under 2G. */
orb->intparm = (u32)(addr_t)sch;
orb->fmt = 1;
orb->cmd.intparm = (u32)(addr_t)sch;
orb->cmd.fmt = 1;
orb->pfch = sch->options.prefetch == 0;
orb->spnd = sch->options.suspend;
orb->ssic = sch->options.suspend && sch->options.inter;
orb->lpm = (lpm != 0) ? lpm : sch->lpm;
orb->cmd.pfch = sch->options.prefetch == 0;
orb->cmd.spnd = sch->options.suspend;
orb->cmd.ssic = sch->options.suspend && sch->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_64BIT
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
*/
orb->c64 = 1;
orb->i2k = 0;
orb->cmd.c64 = 1;
orb->cmd.i2k = 0;
#endif
orb->key = key >> 4;
orb->cmd.key = key >> 4;
/* issue "Start Subchannel" */
orb->cpa = (__u32) __pa(cpa);
orb->cmd.cpa = (__u32) __pa(cpa);
ccode = ssch(sch->schid, orb);
/* process condition code */
@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
/*
* initialize device status information
*/
sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
return 0;
case 1: /* status pending */
case 2: /* busy */
@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch)
switch (ccode) {
case 0:
sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND;
sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
return 0;
case 1:
return -EBUSY;
@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch)
switch (ccode) {
case 0:
sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
return 0;
case 1: /* status pending */
case 2: /* busy */
@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch)
switch (ccode) {
case 0:
sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
return 0;
default: /* device not operational */
return -ENODEV;
@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch)
return ret;
}
/*
* Enable subchannel.
/**
* cio_enable_subchannel - enable a subchannel.
* @sch: subchannel to be enabled
* @intparm: interruption parameter to set
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
CIO_TRACE_EVENT (2, dbf_txt);
return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
/*
* Disable subchannel.
/**
* cio_disable_subchannel - disable a subchannel.
* @sch: subchannel to disable
*/
int
cio_disable_subchannel (struct subchannel *sch)
int cio_disable_subchannel(struct subchannel *sch)
{
char dbf_txt[15];
int ccode;
@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch)
if (ccode == 3) /* Not operational. */
return -ENODEV;
if (sch->schib.scsw.actl != 0)
if (scsw_actl(&sch->schib.scsw) != 0)
/*
* the disable function must not be called while there are
* requests pending for completion !
@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch)
CIO_TRACE_EVENT (2, dbf_txt);
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
int cio_create_sch_lock(struct subchannel *sch)
{
@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch)
return 0;
}
/*
* cio_validate_subchannel()
static int cio_check_devno_blacklisted(struct subchannel *sch)
{
if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
/*
* This device must not be known to Linux. So we simply
* say that there is no device and return ENODEV.
*/
CIO_MSG_EVENT(6, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid);
return -ENODEV;
}
return 0;
}
static int cio_validate_io_subchannel(struct subchannel *sch)
{
/* Initialization for io subchannels. */
if (!css_sch_is_valid(&sch->schib))
return -ENODEV;
/* Devno is valid. */
return cio_check_devno_blacklisted(sch);
}
static int cio_validate_msg_subchannel(struct subchannel *sch)
{
/* Initialization for message subchannels. */
if (!css_sch_is_valid(&sch->schib))
return -ENODEV;
/* Devno is valid. */
return cio_check_devno_blacklisted(sch);
}
/**
* cio_validate_subchannel - basic validation of subchannel
* @sch: subchannel structure to be filled out
* @schid: subchannel id
*
* Find out subchannel type and initialize struct subchannel.
* Return codes:
* SUBCHANNEL_TYPE_IO for a normal io subchannel
* SUBCHANNEL_TYPE_CHSC for a chsc subchannel
* SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
* SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
* 0 on success
* -ENXIO for non-defined subchannels
* -ENODEV for subchannels with invalid device number or blacklisted devices
* -ENODEV for invalid subchannels or blacklisted devices
* -EIO for subchannels in an invalid subchannel set
*/
int
cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
{
char dbf_txt[15];
int ccode;
int err;
sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt);
sprintf(dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT(4, dbf_txt);
/* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel));
@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
/*
* ... just being curious we check for non I/O subchannels
*/
if (sch->st != 0) {
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports "
"non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
err = sch->st;
switch (sch->st) {
case SUBCHANNEL_TYPE_IO:
err = cio_validate_io_subchannel(sch);
break;
case SUBCHANNEL_TYPE_MSG:
err = cio_validate_msg_subchannel(sch);
break;
default:
err = 0;
}
if (err)
goto out;
}
/* Initialization for io subchannels. */
if (!css_sch_is_valid(&sch->schib)) {
err = -ENODEV;
goto out;
}
/* Devno is valid. */
if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/*
* This device must not be known to Linux. So we simply
* say that there is no device and return ENODEV.
*/
CIO_MSG_EVENT(6, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid);
err = -ENODEV;
goto out;
}
if (cio_is_console(sch->schid)) {
sch->opm = 0xff;
sch->isc = 1;
} else {
sch->opm = chp_get_sch_opm(sch);
sch->isc = 3;
}
sch->lpm = sch->schib.pmcw.pam & sch->opm;
CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
"- PIM = %02X, PAM = %02X, POM = %02X\n",
sch->schib.pmcw.dev, sch->schid.ssid,
sch->schid.sch_no, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
/*
* We now have to initially ...
* ... enable "concurrent sense"
* ... enable "multipath mode" if more than one
* CHPID is available. This is done regardless
* whether multiple paths are available for us.
*/
sch->schib.pmcw.csense = 1; /* concurrent sense */
sch->schib.pmcw.ena = 0;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
/* clean up possible residual cmf stuff */
sch->schib.pmcw.mme = 0;
sch->schib.pmcw.mbfc = 0;
sch->schib.pmcw.mbi = 0;
sch->schib.mba = 0;
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
return 0;
out:
if (!cio_is_console(schid))
@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs)
*/
if (tpi_info->adapter_IO == 1 &&
tpi_info->int_type == IO_INTERRUPT_TYPE) {
do_adapter_IO();
do_adapter_IO(tpi_info->isc);
continue;
}
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@ -706,9 +699,9 @@ void wait_cons_dev(void)
if (!console_subchannel_in_use)
return;
/* disable all but isc 1 (console device) */
/* disable all but the console isc */
__ctl_store (save_cr6, 6, 6);
cr6 = 0x40000000;
cr6 = 1UL << (31 - CONSOLE_ISC);
__ctl_load (cr6, 6, 6);
do {
@ -716,7 +709,7 @@ void wait_cons_dev(void)
if (!cio_tpi())
cpu_relax();
spin_lock(console_subchannel.lock);
} while (console_subchannel.schib.scsw.actl != 0);
} while (console_subchannel.schib.scsw.cmd.actl != 0);
/*
* restore previous isc value
*/
@ -761,7 +754,6 @@ cio_get_console_sch_no(void)
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized.
* With some luck, the HWC console can take over */
printk(KERN_WARNING "cio: No ccw console found!\n");
return -1;
}
return console_irq;
@ -778,6 +770,7 @@ cio_probe_console(void)
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
console_subchannel_in_use = 0;
printk(KERN_WARNING "cio: No ccw console found!\n");
return ERR_PTR(-ENODEV);
}
memset(&console_subchannel, 0, sizeof(struct subchannel));
@ -790,15 +783,15 @@ cio_probe_console(void)
}
/*
* enable console I/O-interrupt subclass 1
* enable console I/O-interrupt subclass
*/
ctl_set_bit(6, 30);
console_subchannel.isc = 1;
console_subchannel.schib.pmcw.isc = 1;
isc_register(CONSOLE_ISC);
console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
console_subchannel.schib.pmcw.intparm =
(u32)(addr_t)&console_subchannel;
ret = cio_modify(&console_subchannel);
if (ret) {
isc_unregister(CONSOLE_ISC);
console_subchannel_in_use = 0;
return ERR_PTR(ret);
}
@ -810,7 +803,7 @@ cio_release_console(void)
{
console_subchannel.schib.pmcw.intparm = 0;
cio_modify(&console_subchannel);
ctl_clear_bit(6, 24);
isc_unregister(CONSOLE_ISC);
console_subchannel_in_use = 0;
}
@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs)
}
static int
__clear_subchannel_easy(struct subchannel_id schid)
__clear_io_subchannel_easy(struct subchannel_id schid)
{
int retry;
@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
return -EBUSY;
}
static void __clear_chsc_subchannel_easy(void)
{
/* It seems we can only wait for a bit here :/ */
udelay_reset(100);
}
static int pgm_check_occured;
static void cio_reset_pgm_check_handler(void)
@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
case -ENODEV:
break;
default: /* -EBUSY */
if (__clear_subchannel_easy(schid))
break; /* give up... */
switch (schib.pmcw.st) {
case SUBCHANNEL_TYPE_IO:
if (__clear_io_subchannel_easy(schid))
goto out; /* give up... */
break;
case SUBCHANNEL_TYPE_CHSC:
__clear_chsc_subchannel_easy();
break;
default:
/* No default clear strategy */
break;
}
stsch(schid, &schib);
__disable_subchannel_easy(schid, &schib);
}
out:
return 0;
}
@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
iplinfo->is_qdio = schib.pmcw.qf;
return 0;
}
/**
* cio_tm_start_key - perform start function
* @sch: subchannel on which to perform the start function
* @tcw: transport-command word to be started
* @lpm: mask of paths to use
* @key: storage key to use for storage access
*
* Start the tcw on the given subchannel. Return zero on success, non-zero
* otherwise.
*/
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
{
int cc;
union orb *orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
orb->tm.intparm = (u32) (addr_t) sch;
orb->tm.key = key >> 4;
orb->tm.b = 1;
orb->tm.lpm = lpm ? lpm : sch->lpm;
orb->tm.tcw = (u32) (addr_t) tcw;
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
return 0;
case 1:
case 2:
return -EBUSY;
default:
return cio_start_handle_notoper(sch, lpm);
}
}
/**
* cio_tm_intrg - perform interrogate function
* @sch - subchannel on which to perform the interrogate function
*
* If the specified subchannel is running in transport-mode, perform the
* interrogate function. Return zero on success, non-zero otherwie.
*/
int cio_tm_intrg(struct subchannel *sch)
{
int cc;
if (!to_io_private(sch)->orb.tm.b)
return -EINVAL;
cc = xsch(sch->schid);
switch (cc) {
case 0:
case 2:
return 0;
case 1:
return -EBUSY;
default:
return -ENODEV;
}
}

View File

@ -3,9 +3,12 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <asm/chpid.h>
#include <asm/cio.h>
#include <asm/fcx.h>
#include <asm/schid.h>
#include "chsc.h"
#include "schid.h"
/*
* path management control word
@ -13,7 +16,7 @@
struct pmcw {
u32 intparm; /* interruption parameter */
u32 qf : 1; /* qdio facility */
u32 res0 : 1; /* reserved zeros */
u32 w : 1;
u32 isc : 3; /* interruption sublass */
u32 res5 : 3; /* reserved zeros */
u32 ena : 1; /* enabled */
@ -47,7 +50,7 @@ struct pmcw {
*/
struct schib {
struct pmcw pmcw; /* path management control word */
struct scsw scsw; /* subchannel status word */
union scsw scsw; /* subchannel status word */
__u64 mba; /* measurement block address */
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(void);
void do_adapter_IO(u8 isc);
void do_IRQ(struct pt_regs *);
/* Use with care. */

View File

@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev)
if (stsch(sch->schid, &sch->schib))
return -ENODEV;
if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) {
if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
/* Don't copy if a start function is in progress. */
if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) &&
(sch->schib.scsw.actl &
if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
(scsw_actl(&sch->schib.scsw) &
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
(!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)))
(!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
return -EBUSY;
}
cmb_data = cdev->private->cmb;
@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev)
free_pages((unsigned long)mem, get_order(size));
} else if (!mem) {
/* no luck */
printk(KERN_WARNING "cio: failed to allocate area "
"for measuring %d subchannels\n",
cmb_area.num_channels);
ret = -ENOMEM;
goto out;
} else {
@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev,
switch (val) {
case 0:
ret = disable_cmf(cdev);
if (ret)
dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
break;
case 1:
ret = enable_cmf(cdev);
if (ret && ret != -EBUSY)
dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
break;
}
@ -1344,8 +1337,7 @@ static int __init init_cmf(void)
* to basic mode.
*/
if (format == CMF_AUTODETECT) {
if (!css_characteristics_avail ||
!css_general_characteristics.ext_mb) {
if (!css_general_characteristics.ext_mb) {
format = CMF_BASIC;
} else {
format = CMF_EXTENDED;
@ -1365,8 +1357,6 @@ static int __init init_cmf(void)
cmbops = &cmbops_extended;
break;
default:
printk(KERN_ERR "cio: Invalid format %d for channel "
"measurement facility\n", format);
return 1;
}

View File

@ -2,8 +2,7 @@
* drivers/s390/cio/css.c
* driver for channel subsystem
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Copyright IBM Corp. 2002,2008
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
@ -14,7 +13,9 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <asm/isc.h>
#include "../s390mach.h"
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
@ -30,8 +31,6 @@ static int max_ssid = 0;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
int css_characteristics_avail = 0;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid)
kfree(sch);
return ERR_PTR(ret);
}
if (sch->st != SUBCHANNEL_TYPE_IO) {
/* For now we ignore all non-io subchannels. */
kfree(sch);
return ERR_PTR(-EINVAL);
}
/*
* Set intparm to subchannel address.
* This is fine even on 64bit since the subchannel is always located
* under 2G.
*/
sch->schib.pmcw.intparm = (u32)(addr_t)sch;
ret = cio_modify(sch);
if (ret) {
kfree(sch->lock);
kfree(sch);
return ERR_PTR(ret);
}
return sch;
}
@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch)
return ret;
}
/**
* css_sch_device_unregister - unregister a subchannel
* @sch: subchannel to be unregistered
*/
void css_sch_device_unregister(struct subchannel *sch)
{
mutex_lock(&sch->reg_mutex);
device_unregister(&sch->dev);
if (device_is_registered(&sch->dev))
device_unregister(&sch->dev);
mutex_unlock(&sch->reg_mutex);
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch)
}
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "%01x\n", sch->st);
}
static DEVICE_ATTR(type, 0444, type_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "css:t%01X\n", sch->st);
}
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
NULL,
};
static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
static struct attribute_group *default_subch_attr_groups[] = {
&subch_attr_group,
NULL,
};
static int css_register_subchannel(struct subchannel *sch)
{
int ret;
@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch)
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = subch_attr_groups;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
* have a working ccw device behind them since they will be
* unregistered before they can be used anyway, so we delay the add
* uevent until after device recognition was successful.
* Note that we suppress the uevent for all subchannel types;
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence.
*/
if (!cio_is_console(sch->schid))
/* Console is special, no need to suppress. */
sch->dev.uevent_suppress = 1;
sch->dev.uevent_suppress = 1;
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch)
sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
if (!sch->driver) {
/*
* No driver matched. Generate the uevent now so that
* a fitting driver module may be loaded based on the
* modalias.
*/
sch->dev.uevent_suppress = 0;
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
return ret;
}
static int css_probe_device(struct subchannel_id schid)
int css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib)
{
if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
return 0;
if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_get_subchannel_status(struct subchannel *sch)
{
struct schib schib;
if (stsch(sch->schid, &schib))
return CIO_GONE;
if (!css_sch_is_valid(&schib))
return CIO_GONE;
if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
return CIO_REVALIDATE;
if (!sch->lpm)
return CIO_NO_PATH;
return CIO_OPER;
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
int event, ret, disc;
unsigned long flags;
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
spin_lock_irqsave(sch->lock, flags);
disc = device_is_disconnected(sch);
if (disc && slow) {
/* Disconnected devices are evaluated directly only.*/
spin_unlock_irqrestore(sch->lock, flags);
return 0;
}
/* No interrupt after machine check - kill pending timers. */
device_kill_pending_timer(sch);
if (!disc && !slow) {
/* Non-disconnected devices are evaluated on the slow path. */
spin_unlock_irqrestore(sch->lock, flags);
return -EAGAIN;
}
event = css_get_subchannel_status(sch);
CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
sch->schid.ssid, sch->schid.sch_no, event,
disc ? "disconnected" : "normal",
slow ? "slow" : "fast");
/* Analyze subchannel status. */
action = NONE;
switch (event) {
case CIO_NO_PATH:
if (disc) {
/* Check if paths have become available. */
action = REPROBE;
break;
}
/* fall through */
case CIO_GONE:
/* Prevent unwanted effects when opening lock. */
cio_disable_subchannel(sch);
device_set_disconnected(sch);
/* Ask driver what to do with device. */
action = UNREGISTER;
if (sch->driver && sch->driver->notify) {
spin_unlock_irqrestore(sch->lock, flags);
ret = sch->driver->notify(sch, event);
spin_lock_irqsave(sch->lock, flags);
if (ret)
action = NONE;
}
break;
case CIO_REVALIDATE:
/* Device will be removed, so no notify necessary. */
if (disc)
/* Reprobe because immediate unregister might block. */
action = REPROBE;
else
action = UNREGISTER_PROBE;
break;
case CIO_OPER:
if (disc)
/* Get device operational again. */
action = REPROBE;
break;
}
/* Perform action. */
ret = 0;
switch (action) {
case UNREGISTER:
case UNREGISTER_PROBE:
/* Unregister device (will use subchannel lock). */
spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
break;
case REPROBE:
device_trigger_reprobe(sch);
break;
default:
break;
}
spin_unlock_irqrestore(sch->lock, flags);
/* Probe if necessary. */
if (action == UNREGISTER_PROBE)
ret = css_probe_device(sch->schid);
return ret;
}
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
return css_probe_device(schid);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
int ret = 0;
if (sch->driver) {
if (sch->driver->sch_event)
ret = sch->driver->sch_event(sch, slow);
else
dev_dbg(&sch->dev,
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
return ret;
}
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
struct subchannel *sch;
@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
* Called from the machine check handler for subchannel report words.
*/
void css_process_crw(int rsid1, int rsid2)
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct subchannel_id mchk_schid;
CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
rsid1, rsid2);
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
if (crw1)
CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
crw1->anc, crw1->erc, crw1->rsid);
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = rsid1;
if (rsid2 != 0)
mchk_schid.ssid = (rsid2 >> 8) & 3;
mchk_schid.sch_no = crw0->rsid;
if (crw1)
mchk_schid.ssid = (crw1->rsid >> 8) & 3;
/*
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
if (css_characteristics_avail && css_general_characteristics.mcss) {
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
} else {
@ -795,8 +748,6 @@ init_channel_subsystem (void)
ret = chsc_determine_css_characteristics();
if (ret == -ENOMEM)
goto out; /* No need to continue. */
if (ret == 0)
css_characteristics_avail = 1;
ret = chsc_alloc_sei_area();
if (ret)
@ -806,6 +757,10 @@ init_channel_subsystem (void)
if (ret)
goto out;
ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
if (ret)
goto out;
if ((ret = bus_register(&css_bus_type)))
goto out;
@ -836,8 +791,7 @@ init_channel_subsystem (void)
ret = device_register(&css->device);
if (ret)
goto out_free_all;
if (css_characteristics_avail &&
css_chsc_characteristics.secm) {
if (css_chsc_characteristics.secm) {
ret = device_create_file(&css->device,
&dev_attr_cm_enable);
if (ret)
@ -852,7 +806,8 @@ init_channel_subsystem (void)
goto out_pseudo;
css_init_done = 1;
ctl_set_bit(6, 28);
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
@ -875,7 +830,7 @@ out_unregister:
i--;
css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
if (css_characteristics_avail && css_chsc_characteristics.secm)
if (css_chsc_characteristics.secm)
device_remove_file(&css->device,
&dev_attr_cm_enable);
device_unregister(&css->device);
@ -883,6 +838,7 @@ out_unregister:
out_bus:
bus_unregister(&css_bus_type);
out:
s390_unregister_crw_handler(CRW_RSC_CSS);
chsc_free_sei_area();
kfree(slow_subchannel_set);
printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch)
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
/*
* find a driver for a subchannel. They identify by the subchannel
* type with the exception that the console subchannel driver has its own
* subchannel type although the device is an i/o subchannel
*/
static int
css_bus_match (struct device *dev, struct device_driver *drv)
static int css_bus_match(struct device *dev, struct device_driver *drv)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
if (sch->st == driver->subchannel_type)
return 1;
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
}
return 0;
}
@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev)
sch->driver->shutdown(sch);
}
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct subchannel *sch = to_subchannel(dev);
int ret;
ret = add_uevent_var(env, "ST=%01X", sch->st);
if (ret)
return ret;
ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
return ret;
}
struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
.remove = css_remove,
.shutdown = css_shutdown,
.uevent = css_uevent,
};
/**
@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
EXPORT_SYMBOL_GPL(css_characteristics_avail);

View File

@ -9,8 +9,7 @@
#include <asm/cio.h>
#include <asm/chpid.h>
#include "schid.h"
#include <asm/schid.h>
/*
* path grouping stuff
@ -58,20 +57,28 @@ struct pgid {
__u32 tod_high; /* high word TOD clock */
} __attribute__ ((packed));
/*
* A css driver handles all subchannels of one type.
* Currently, we only care about I/O subchannels (type 0), these
* have a ccw_device connected to them.
*/
struct subchannel;
struct chp_link;
/**
* struct css_driver - device driver for subchannels
* @owner: owning module
* @subchannel_type: subchannel type supported by this driver
* @drv: embedded device driver structure
* @irq: called on interrupts
* @chp_event: called for events affecting a channel path
* @sch_event: called for events affecting the subchannel
* @probe: function called on probe
* @remove: function called on remove
* @shutdown: called at device shutdown
* @name: name of the device driver
*/
struct css_driver {
struct module *owner;
unsigned int subchannel_type;
struct css_device_id *subchannel_type;
struct device_driver drv;
void (*irq)(struct subchannel *);
int (*notify)(struct subchannel *, int);
void (*verify)(struct subchannel *);
void (*termination)(struct subchannel *);
int (*chp_event)(struct subchannel *, struct chp_link *, int);
int (*sch_event)(struct subchannel *, int);
int (*probe)(struct subchannel *);
int (*remove)(struct subchannel *);
void (*shutdown)(struct subchannel *);
@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
extern void css_sch_device_unregister(struct subchannel *);
extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
extern int css_probe_device(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
extern void css_process_crw(int, int);
extern void css_reiterate_subchannels(void);
void css_update_ssd_info(struct subchannel *sch);
@ -121,20 +128,6 @@ struct channel_subsystem {
extern struct bus_type css_bus_type;
extern struct channel_subsystem *channel_subsystems[];
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
void device_set_disconnected(struct subchannel *);
void device_trigger_reprobe(struct subchannel *);
/* Helper functions for vary on/off. */
int device_is_online(struct subchannel *);
void device_kill_io(struct subchannel *);
void device_set_intretry(struct subchannel *sch);
int device_trigger_verify(struct subchannel *sch);
/* Machine check helper function. */
void device_kill_pending_timer(struct subchannel *);
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq;
void css_wait_for_slow_path(void);
extern struct attribute_group *subch_attr_groups[];
#endif

View File

@ -2,8 +2,7 @@
* drivers/s390/cio/device.c
* bus driver for ccw devices
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Copyright IBM Corp. 2002,2008
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
@ -23,7 +22,9 @@
#include <asm/cio.h>
#include <asm/param.h> /* HZ */
#include <asm/cmb.h>
#include <asm/isc.h>
#include "chp.h"
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
@ -125,19 +126,24 @@ struct bus_type ccw_bus_type;
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static int io_subchannel_remove(struct subchannel *);
static int io_subchannel_notify(struct subchannel *, int);
static void io_subchannel_verify(struct subchannel *);
static void io_subchannel_ioterm(struct subchannel *);
static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, io_subchannel_ids);
static struct css_driver io_subchannel_driver = {
.owner = THIS_MODULE,
.subchannel_type = SUBCHANNEL_TYPE_IO,
.subchannel_type = io_subchannel_ids,
.name = "io_subchannel",
.irq = io_subchannel_irq,
.notify = io_subchannel_notify,
.verify = io_subchannel_verify,
.termination = io_subchannel_ioterm,
.sch_event = io_subchannel_sch_event,
.chp_event = io_subchannel_chp_event,
.probe = io_subchannel_probe,
.remove = io_subchannel_remove,
.shutdown = io_subchannel_shutdown,
@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
ccw_device_set_online(cdev);
return 0;
}
static void online_store_handle_online(struct ccw_device *cdev, int force)
static int online_store_handle_online(struct ccw_device *cdev, int force)
{
int ret;
ret = online_store_recog_and_online(cdev);
if (ret)
return;
return ret;
if (force && cdev->private->state == DEV_STATE_BOXED) {
ret = ccw_device_stlck(cdev);
if (ret) {
dev_warn(&cdev->dev,
"ccw_device_stlck returned %d!\n", ret);
return;
}
if (ret)
return ret;
if (cdev->id.cu_type == 0)
cdev->private->state = DEV_STATE_NOT_OPER;
online_store_recog_and_online(cdev);
}
return 0;
}
static ssize_t online_store (struct device *dev, struct device_attribute *attr,
@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
ret = count;
break;
case 1:
online_store_handle_online(cdev, force);
ret = count;
ret = online_store_handle_online(cdev, force);
if (!ret)
ret = count;
break;
default:
ret = -EINVAL;
@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static struct attribute * subch_attrs[] = {
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
NULL,
};
static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
struct attribute_group *subch_attr_groups[] = {
&subch_attr_group,
NULL,
static struct attribute_group io_subchannel_attr_group = {
.attrs = io_subchannel_attrs,
};
static struct attribute * ccwdev_attrs[] = {
@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch,
sch_set_cdev(sch, cdev);
cdev->private->schid = sch->schid;
cdev->ccwlock = sch->lock;
device_trigger_reprobe(sch);
ccw_device_trigger_reprobe(cdev);
spin_unlock_irq(sch->lock);
}
@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
struct ccw_device_private *priv;
sch_set_cdev(sch, cdev);
sch->driver = &io_subchannel_driver;
cdev->ccwlock = sch->lock;
/* Init private data. */
@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
static int
io_subchannel_probe (struct subchannel *sch)
static void io_subchannel_init_fields(struct subchannel *sch)
{
if (cio_is_console(sch->schid))
sch->opm = 0xff;
else
sch->opm = chp_get_sch_opm(sch);
sch->lpm = sch->schib.pmcw.pam & sch->opm;
sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
sch->schib.pmcw.dev, sch->schid.ssid,
sch->schid.sch_no, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
/* Initially set up some fields in the pmcw. */
sch->schib.pmcw.ena = 0;
sch->schib.pmcw.csense = 1; /* concurrent sense */
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */
/* clean up possible residual cmf stuff */
sch->schib.pmcw.mme = 0;
sch->schib.pmcw.mbfc = 0;
sch->schib.pmcw.mbi = 0;
sch->schib.mba = 0;
}
static int io_subchannel_probe(struct subchannel *sch)
{
struct ccw_device *cdev;
int rc;
@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch)
cdev = sch_get_cdev(sch);
if (cdev) {
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
CIO_MSG_EVENT(0, "Failed to create io subchannel "
"attributes for subchannel "
"0.%x.%04x (rc=%d)\n",
sch->schid.ssid, sch->schid.sch_no, rc);
/*
* This subchannel already has an associated ccw_device.
* Register it and exit. This happens for all early
* device, e.g. the console.
* Throw the delayed uevent for the subchannel, register
* the ccw_device and exit. This happens for all early
* devices, e.g. the console.
*/
sch->dev.uevent_suppress = 0;
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
ccw_device_register(cdev);
@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch)
get_device(&cdev->dev);
return 0;
}
io_subchannel_init_fields(sch);
/*
* First check if a fitting device may be found amongst the
* disconnected devices or in the orphanage.
*/
dev_id.devno = sch->schib.pmcw.dev;
dev_id.ssid = sch->schid.ssid;
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
return rc;
/* Allocate I/O subchannel private data. */
sch->private = kzalloc(sizeof(struct io_subchannel_private),
GFP_KERNEL | GFP_DMA);
if (!sch->private)
return -ENOMEM;
if (!sch->private) {
rc = -ENOMEM;
goto out_err;
}
cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
if (!cdev)
cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch)
}
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
kfree(sch->private);
return PTR_ERR(cdev);
rc = PTR_ERR(cdev);
goto out_err;
}
rc = io_subchannel_recog(cdev, sch);
if (rc) {
@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch)
spin_unlock_irqrestore(sch->lock, flags);
if (cdev->dev.release)
cdev->dev.release(&cdev->dev);
kfree(sch->private);
goto out_err;
}
return 0;
out_err:
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return rc;
}
@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch)
ccw_device_unregister(cdev);
put_device(&cdev->dev);
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event)
cdev = sch_get_cdev(sch);
if (!cdev)
return 0;
if (!cdev->drv)
return 0;
if (!cdev->online)
return 0;
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
return ccw_device_notify(cdev, event);
}
static void io_subchannel_verify(struct subchannel *sch)
@ -1240,20 +1280,94 @@ static void io_subchannel_verify(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
static void io_subchannel_ioterm(struct subchannel *sch)
static int check_for_io_on_path(struct subchannel *sch, int mask)
{
int cc;
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
return 1;
return 0;
}
static void terminate_internal_io(struct subchannel *sch,
struct ccw_device *cdev)
{
if (cio_clear(sch)) {
/* Recheck device in case clear failed. */
sch->lpm = 0;
if (cdev->online)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
else
css_schedule_eval(sch->schid);
return;
}
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
/* Request retry of internal operation. */
cdev->private->flags.intretry = 1;
/* Call handler. */
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return;
/* Internal I/O will be retried by the interrupt handler. */
if (cdev->private->flags.intretry)
return;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
if (check_for_io_on_path(sch, mask)) {
if (cdev->private->state == DEV_STATE_ONLINE)
ccw_device_kill_io(cdev);
else {
terminate_internal_io(sch, cdev);
/* Re-start path verification. */
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
} else
/* trigger path verification. */
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
}
static int io_subchannel_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
int mask;
mask = chp_ssd_get_mask(&sch->ssd_info, link);
if (!mask)
return 0;
switch (event) {
case CHP_VARY_OFF:
sch->opm &= ~mask;
sch->lpm &= ~mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_VARY_ON:
sch->opm |= mask;
sch->lpm |= mask;
io_subchannel_verify(sch);
break;
case CHP_OFFLINE:
if (stsch(sch->schid, &sch->schib))
return -ENXIO;
if (!css_sch_is_valid(&sch->schib))
return -ENODEV;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_ONLINE:
if (stsch(sch->schid, &sch->schib))
return -ENXIO;
sch->lpm |= mask & sch->opm;
io_subchannel_verify(sch);
break;
}
return 0;
}
static void
@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch)
cio_disable_subchannel(sch);
}
static int io_subchannel_get_status(struct subchannel *sch)
{
struct schib schib;
if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
return CIO_GONE;
if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
return CIO_REVALIDATE;
if (!sch->lpm)
return CIO_NO_PATH;
return CIO_OPER;
}
static int device_is_disconnected(struct ccw_device *cdev)
{
if (!cdev)
return 0;
return (cdev->private->state == DEV_STATE_DISCONNECTED ||
cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
}
static int recovery_check(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
int *redo = data;
spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) {
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
*redo = 1;
break;
case DEV_STATE_DISCONNECTED_SENSE_ID:
*redo = 1;
break;
}
spin_unlock_irq(cdev->ccwlock);
return 0;
}
static void recovery_work_func(struct work_struct *unused)
{
int redo = 0;
bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
if (redo) {
spin_lock_irq(&recovery_lock);
if (!timer_pending(&recovery_timer)) {
if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
recovery_phase++;
mod_timer(&recovery_timer, jiffies +
recovery_delay[recovery_phase] * HZ);
}
spin_unlock_irq(&recovery_lock);
} else
CIO_MSG_EVENT(4, "recovery: end\n");
}
static DECLARE_WORK(recovery_work, recovery_work_func);
static void recovery_func(unsigned long data)
{
/*
* We can't do our recovery in softirq context and it's not
* performance critical, so we schedule it.
*/
schedule_work(&recovery_work);
}
static void ccw_device_schedule_recovery(void)
{
unsigned long flags;
CIO_MSG_EVENT(4, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0;
mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
}
spin_unlock_irqrestore(&recovery_lock, flags);
}
static void device_set_disconnected(struct ccw_device *cdev)
{
if (!cdev)
return;
ccw_device_set_timeout(cdev, 0);
cdev->private->flags.fake_irb = 0;
cdev->private->state = DEV_STATE_DISCONNECTED;
if (cdev->online)
ccw_device_schedule_recovery();
}
static int io_subchannel_sch_event(struct subchannel *sch, int slow)
{
int event, ret, disc;
unsigned long flags;
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
struct ccw_device *cdev;
spin_lock_irqsave(sch->lock, flags);
cdev = sch_get_cdev(sch);
disc = device_is_disconnected(cdev);
if (disc && slow) {
/* Disconnected devices are evaluated directly only.*/
spin_unlock_irqrestore(sch->lock, flags);
return 0;
}
/* No interrupt after machine check - kill pending timers. */
if (cdev)
ccw_device_set_timeout(cdev, 0);
if (!disc && !slow) {
/* Non-disconnected devices are evaluated on the slow path. */
spin_unlock_irqrestore(sch->lock, flags);
return -EAGAIN;
}
event = io_subchannel_get_status(sch);
CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
sch->schid.ssid, sch->schid.sch_no, event,
disc ? "disconnected" : "normal",
slow ? "slow" : "fast");
/* Analyze subchannel status. */
action = NONE;
switch (event) {
case CIO_NO_PATH:
if (disc) {
/* Check if paths have become available. */
action = REPROBE;
break;
}
/* fall through */
case CIO_GONE:
/* Prevent unwanted effects when opening lock. */
cio_disable_subchannel(sch);
device_set_disconnected(cdev);
/* Ask driver what to do with device. */
action = UNREGISTER;
spin_unlock_irqrestore(sch->lock, flags);
ret = io_subchannel_notify(sch, event);
spin_lock_irqsave(sch->lock, flags);
if (ret)
action = NONE;
break;
case CIO_REVALIDATE:
/* Device will be removed, so no notify necessary. */
if (disc)
/* Reprobe because immediate unregister might block. */
action = REPROBE;
else
action = UNREGISTER_PROBE;
break;
case CIO_OPER:
if (disc)
/* Get device operational again. */
action = REPROBE;
break;
}
/* Perform action. */
ret = 0;
switch (action) {
case UNREGISTER:
case UNREGISTER_PROBE:
/* Unregister device (will use subchannel lock). */
spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
break;
case REPROBE:
ccw_device_trigger_reprobe(cdev);
break;
default:
break;
}
spin_unlock_irqrestore(sch->lock, flags);
/* Probe if necessary. */
if (action == UNREGISTER_PROBE)
ret = css_probe_device(sch->schid);
return ret;
}
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
static struct ccw_device_private console_private;
@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void)
return &ccw_console_lock;
}
static int
ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
static int ccw_device_console_enable(struct ccw_device *cdev,
struct subchannel *sch)
{
int rc;
/* Attach subchannel private data. */
sch->private = cio_get_console_priv();
memset(sch->private, 0, sizeof(struct io_subchannel_private));
io_subchannel_init_fields(sch);
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
rc = io_subchannel_recog(cdev, sch);
@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
return sch->schid;
}
static int recovery_check(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
int *redo = data;
spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) {
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
*redo = 1;
break;
case DEV_STATE_DISCONNECTED_SENSE_ID:
*redo = 1;
break;
}
spin_unlock_irq(cdev->ccwlock);
return 0;
}
static void recovery_work_func(struct work_struct *unused)
{
int redo = 0;
bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
if (redo) {
spin_lock_irq(&recovery_lock);
if (!timer_pending(&recovery_timer)) {
if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
recovery_phase++;
mod_timer(&recovery_timer, jiffies +
recovery_delay[recovery_phase] * HZ);
}
spin_unlock_irq(&recovery_lock);
} else
CIO_MSG_EVENT(4, "recovery: end\n");
}
static DECLARE_WORK(recovery_work, recovery_work_func);
static void recovery_func(unsigned long data)
{
/*
* We can't do our recovery in softirq context and it's not
* performance critical, so we schedule it.
*/
schedule_work(&recovery_work);
}
void ccw_device_schedule_recovery(void)
{
unsigned long flags;
CIO_MSG_EVENT(4, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0;
mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
}
spin_unlock_irqrestore(&recovery_lock, flags);
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);

View File

@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
void ccw_device_schedule_recovery(void);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
/* Helper function for machine check handling. */
void ccw_device_trigger_reprobe(struct ccw_device *);
void ccw_device_kill_io(struct ccw_device *);
int ccw_device_notify(struct ccw_device *, int);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);

View File

@ -2,8 +2,7 @@
* drivers/s390/cio/device_fsm.c
* finite state machine for device handling
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
* Copyright IBM Corp. 2002,2008
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@ -27,65 +26,6 @@
static int timeout_log_enabled;
int
device_is_online(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return 0;
return (cdev->private->state == DEV_STATE_ONLINE);
}
int
device_is_disconnected(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return 0;
return (cdev->private->state == DEV_STATE_DISCONNECTED ||
cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
}
void
device_set_disconnected(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return;
ccw_device_set_timeout(cdev, 0);
cdev->private->flags.fake_irb = 0;
cdev->private->state = DEV_STATE_DISCONNECTED;
if (cdev->online)
ccw_device_schedule_recovery();
}
void device_set_intretry(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return;
cdev->private->flags.intretry = 1;
}
int device_trigger_verify(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev || !cdev->online)
return -EINVAL;
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return 0;
}
static int __init ccw_timeout_log_setup(char *unused)
{
timeout_log_enabled = 1;
@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev)
struct schib schib;
struct subchannel *sch;
struct io_subchannel_private *private;
union orb *orb;
int cc;
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
cc = stsch(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
"device information:\n", get_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
&private->orb, sizeof(private->orb), 0);
orb, sizeof(*orb), 0);
printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
"vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
(void *)(addr_t)private->orb.cpa == cdev->private->iccws)
printk(KERN_WARNING "cio: last channel program (intern):\n");
else
printk(KERN_WARNING "cio: last channel program:\n");
if (orb->tm.b) {
printk(KERN_WARNING "cio: orb indicates transport mode\n");
printk(KERN_WARNING "cio: last tcw:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
(void *)(addr_t)orb->tm.tcw,
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
(void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
printk(KERN_WARNING "cio: last channel program:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
(void *)(addr_t)private->orb.cpa,
sizeof(struct ccw1), 0);
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
(void *)(addr_t)orb->cmd.cpa,
sizeof(struct ccw1), 0);
}
printk(KERN_WARNING "cio: ccw device state: %d\n",
cdev->private->state);
printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
add_timer(&cdev->private->timer);
}
/* Kill any pending timers after machine check. */
void
device_kill_pending_timer(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return;
ccw_device_set_timeout(cdev, 0);
}
/*
* Cancel running i/o. This is called repeatedly since halt/clear are
* asynchronous operations. We do one try with cio_cancel, two tries
@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
/* Not operational -> done. */
return 0;
/* Stage 1: cancel io. */
if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
ret = cio_cancel(sch);
if (ret != -EINVAL)
return ret;
/* cancel io unsuccessful. From now on it is asynchronous. */
if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
if (!scsw_is_tm(&sch->schib.scsw)) {
ret = cio_cancel(sch);
if (ret != -EINVAL)
return ret;
}
/* cancel io unsuccessful or not applicable (transport mode).
* Continue with asynchronous instructions. */
cdev->private->iretry = 3; /* 3 halt retries. */
}
if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
/* Stage 2: halt io. */
if (cdev->private->iretry) {
cdev->private->iretry--;
@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
}
}
int ccw_device_notify(struct ccw_device *cdev, int event)
{
if (!cdev->drv)
return 0;
if (!cdev->online)
return 0;
return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
}
static void
ccw_device_oper_notify(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent);
if (sch->driver && sch->driver->notify) {
spin_unlock_irqrestore(cdev->ccwlock, flags);
ret = sch->driver->notify(sch, CIO_OPER);
spin_lock_irqsave(cdev->ccwlock, flags);
} else
ret = 0;
ret = ccw_device_notify(cdev, CIO_OPER);
if (ret) {
/* Reenable channel measurements, if needed. */
spin_unlock_irqrestore(cdev->ccwlock, flags);
cmf_reenable(cdev);
spin_lock_irqsave(cdev->ccwlock, flags);
wake_up(&cdev->private->wait_q);
}
spin_unlock_irqrestore(cdev->ccwlock, flags);
if (!ret)
} else
/* Driver doesn't want device back. */
ccw_device_do_unreg_rereg(work);
}
@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
memset(&cdev->private->irb, 0, sizeof(struct irb));
cdev->private->irb.scsw.cc = 1;
cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
cdev->private->irb.scsw.cmd.cc = 1;
cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
cdev->private->irb.scsw.cmd.stctl =
SCSW_STCTL_STATUS_PEND;
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE) {
if (sch->schib.scsw.actl != 0)
return -EBUSY;
return -EINVAL;
}
if (sch->schib.scsw.actl != 0)
if (scsw_actl(&sch->schib.scsw) != 0)
return -EBUSY;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
/* Are we doing path grouping? */
if (!cdev->private->options.pgroup) {
/* No, set state offline immediately. */
@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
*/
stsch(sch->schid, &sch->schib);
if (sch->schib.scsw.actl != 0 ||
(sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
(cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
(scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verfication now,
@ -823,13 +760,13 @@ static void
ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
int is_cmd;
irb = (struct irb *) __LC_IRB;
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if ((irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
&& (!irb->scsw.cc)) {
if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
if (!scsw_is_solicited(&irb->scsw)) {
if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
!irb->esw.esw0.erw.cons) {
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
@ -848,7 +785,7 @@ call_handler_unsol:
}
/* Accumulate status and find out if a basic sense is needed. */
ccw_device_accumulate_irb(cdev, irb);
if (cdev->private->flags.dosense) {
if (is_cmd && cdev->private->flags.dosense) {
if (ccw_device_do_sense(cdev, irb) == 0) {
cdev->private->state = DEV_STATE_W4SENSE;
}
@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
irb = (struct irb *) __LC_IRB;
/* Check for unsolicited interrupt. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (irb->scsw.cc == 1)
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (scsw_cc(&irb->scsw) == 1)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
* only deliver the halt/clear interrupt to the device driver as if it
* had killed the original request.
*/
if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
/* Retry Basic Sense if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
ERR_PTR(-EIO));
}
void device_kill_io(struct subchannel *sch)
void ccw_device_kill_io(struct ccw_device *cdev)
{
int ret;
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
case DEV_EVENT_INTERRUPT:
irb = (struct irb *) __LC_IRB;
/* Check for unsolicited interrupt. */
if ((irb->scsw.stctl ==
if ((scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
(!irb->scsw.cc))
(!scsw_cc(&irb->scsw)))
/* FIXME: we should restart stlck here, but this
* is extremely unlikely ... */
goto out_wakeup;
@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_sense_id_start(cdev);
}
void
device_trigger_reprobe(struct subchannel *sch)
void ccw_device_trigger_reprobe(struct ccw_device *cdev)
{
struct ccw_device *cdev;
struct subchannel *sch;
cdev = sch_get_cdev(sch);
if (!cdev)
return;
if (cdev->private->state != DEV_STATE_DISCONNECTED)
return;
sch = to_subchannel(cdev->dev.parent);
/* Update some values. */
if (stsch(sch->schid, &sch->schib))
return;
@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch)
sch->schib.pmcw.ena = 0;
if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1;
sch->schib.pmcw.intparm = (u32)(addr_t)sch;
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {

View File

@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
irb = &cdev->private->irb;
/* Check the error cases. */
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Sense ID if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
if (irb->scsw.cmd.cc == 3) {
u8 lpm;
lpm = to_io_private(sch)->orb.lpm;
lpm = to_io_private(sch)->orb.cmd.lpm;
if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
"on subchannel 0.%x.%04x is "
@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
}
/* Did we get a proper answer ? */
if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
cdev->private->senseid.reserved == 0xFF) {
if (irb->scsw.count < sizeof(struct senseid) - 8)
if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
cdev->private->flags.esid = 1;
return 0; /* Success */
}
@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
"subchannel 0.%x.%04x returns status %02X%02X\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no,
irb->scsw.dstat, irb->scsw.cstat);
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
return -EAGAIN;
}
@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
irb = (struct irb *) __LC_IRB;
/* Retry sense id, if needed. */
if (irb->scsw.stctl ==
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if ((irb->scsw.cc == 1) || !irb->scsw.actl) {
if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);

View File

@ -17,6 +17,7 @@
#include <asm/ccwdev.h>
#include <asm/idals.h>
#include <asm/chpid.h>
#include <asm/fcx.h>
#include "cio.h"
#include "cio_debug.h"
@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EBUSY;
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
cdev->private->flags.doverify)
return -EBUSY;
ret = cio_set_options (sch, flags);
@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev)
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
!(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
return -EINVAL;
return cio_resume(sch);
}
@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev)
* - fast notification was requested (primary status)
* - unsolicited interrupts
*/
stctl = cdev->private->irb.scsw.stctl;
stctl = scsw_stctl(&cdev->private->irb.scsw);
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
(stctl == SCSW_STCTL_STATUS_PEND);
@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev)
cio_disable_subchannel(sch); //FIXME: return code?
goto out_unlock;
}
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
wait_event(cdev->private->wait_q,
cdev->private->irb.scsw.cmd.actl == 0);
spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat !=
if ((cdev->private->irb.scsw.cmd.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(cdev->private->irb.scsw.cstat != 0))
(cdev->private->irb.scsw.cmd.cstat != 0))
ret = -EIO;
/* Clear irb. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
}
EXPORT_SYMBOL(ccw_device_get_id);
/**
* ccw_device_tm_start_key - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @key: storage key to use for storage access
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key)
{
struct subchannel *sch;
int rc;
sch = to_subchannel(cdev->dev.parent);
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
/* Adjust requested path mask to excluded varied off paths. */
if (lpm) {
lpm &= sch->opm;
if (lpm == 0)
return -EACCES;
}
rc = cio_tm_start_key(sch, tcw, lpm, key);
if (rc == 0)
cdev->private->intparm = intparm;
return rc;
}
EXPORT_SYMBOL(ccw_device_tm_start_key);
/**
* ccw_device_tm_start_timeout_key - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @key: storage key to use for storage access
* @expires: time span in jiffies after which to abort request
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key,
int expires)
{
int ret;
ccw_device_set_timeout(cdev, expires);
ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
if (ret != 0)
ccw_device_set_timeout(cdev, 0);
return ret;
}
EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
/**
* ccw_device_tm_start - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm)
{
return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
PAGE_DEFAULT_KEY);
}
EXPORT_SYMBOL(ccw_device_tm_start);
/**
* ccw_device_tm_start_timeout - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @expires: time span in jiffies after which to abort request
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, int expires)
{
return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
PAGE_DEFAULT_KEY, expires);
}
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
* ccw_device_tm_intrg - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
* Perform an interrogate function on the given ccw device. Return zero on
* success, non-zero otherwise.
*/
int ccw_device_tm_intrg(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
if (!scsw_is_tm(&sch->schib.scsw) ||
!(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND))
return -EINVAL;
return cio_tm_intrg(sch);
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
// FIXME: these have to go:
int

View File

@ -28,13 +28,13 @@
* Helper function called from interrupt context to decide whether an
* operation should be tried again.
*/
static int __ccw_device_should_retry(struct scsw *scsw)
static int __ccw_device_should_retry(union scsw *scsw)
{
/* CC is only valid if start function bit is set. */
if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1)
if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
return 1;
/* No more activity. For sense and set PGID we stubbornly try again. */
if (!scsw->actl)
if (!scsw->cmd.actl)
return 1;
return 0;
}
@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Sense PGID if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
if (irb->scsw.cmd.cc == 3) {
u8 lpm;
lpm = to_io_private(sch)->orb.lpm;
lpm = to_io_private(sch)->orb.cmd.lpm;
CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
irb = (struct irb *) __LC_IRB;
if (irb->scsw.stctl ==
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (__ccw_device_should_retry(&irb->scsw)) {
ret = __ccw_device_sense_pgid_start(cdev);
@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Set PGID if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
irb->ecw[6], irb->ecw[7]);
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
if (irb->scsw.cmd.cc == 3) {
CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry NOP if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
}
return -ETIME;
}
if (irb->scsw.cc == 3) {
if (irb->scsw.cmd.cc == 3) {
CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
irb = (struct irb *) __LC_IRB;
if (irb->scsw.stctl ==
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (__ccw_device_should_retry(&irb->scsw))
__ccw_device_verify_start(cdev);
@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
irb = (struct irb *) __LC_IRB;
if (irb->scsw.stctl ==
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (__ccw_device_should_retry(&irb->scsw))
__ccw_device_disband_start(cdev);

View File

@ -29,9 +29,11 @@
static void
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
{
if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)))
char dbf_text[15];
if (!scsw_is_valid_cstat(&irb->scsw) ||
!(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
return;
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
": %02X sch_stat : %02X\n",
cdev->private->dev_id.devno, cdev->private->schid.ssid,
cdev->private->schid.sch_no,
irb->scsw.dstat, irb->scsw.cstat);
if (irb->scsw.cc != 3) {
char dbf_text[15];
sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof (struct irb));
}
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof(struct irb));
}
/*
@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
cdev->private->irb.scsw.ectl = 0;
if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS))
cdev->private->irb.scsw.ectl = irb->scsw.ectl;
cdev->private->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
if (!cdev->private->irb.scsw.ectl)
if (!cdev->private->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
static int
ccw_device_accumulate_esw_valid(struct irb *irb)
{
if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
if (!irb->scsw.cmd.eswf &&
(irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
return 0;
if (irb->scsw.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
!(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
!(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
return 0;
return 1;
}
@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
/* Copy subchannel logout information if esw is of format 0. */
if (irb->scsw.eswf) {
if (irb->scsw.cmd.eswf) {
cdev_sublog = &cdev_irb->esw.esw0.sublog;
sublog = &irb->esw.esw0.sublog;
/* Copy extended status flags. */
@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
* Copy fields that have a meaning for channel data check
* channel control check and interface control check.
*/
if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)) {
/* Copy ancillary report bit. */
@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
/* Copy i/o-error alert. */
cdev_sublog->ioerr = sublog->ioerr;
/* Copy channel path timeout bit. */
if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK)
if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
/* Copy failing storage address validity flag. */
cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (irb->scsw.pno && irb->scsw.fctl != 0 &&
(!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
memcpy(&cdev->private->irb, irb, sizeof(struct irb));
return;
}
/*
* Don't accumulate unsolicited interrupts.
*/
if ((irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
(!irb->scsw.cc))
if (!scsw_is_solicited(&irb->scsw))
return;
cdev_irb = &cdev->private->irb;
@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* status at the subchannel has been cleared and we must not pass
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC)
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) {
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
/* Copy key. */
cdev_irb->scsw.key = irb->scsw.key;
cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
/* Copy suspend control bit. */
cdev_irb->scsw.sctl = irb->scsw.sctl;
cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
/* Accumulate deferred condition code. */
cdev_irb->scsw.cc |= irb->scsw.cc;
cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
/* Copy ccw format bit. */
cdev_irb->scsw.fmt = irb->scsw.fmt;
cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
/* Copy prefetch bit. */
cdev_irb->scsw.pfch = irb->scsw.pfch;
cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
/* Copy initial-status-interruption-control. */
cdev_irb->scsw.isic = irb->scsw.isic;
cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
/* Copy address limit checking control. */
cdev_irb->scsw.alcc = irb->scsw.alcc;
cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
/* Copy suppress suspend bit. */
cdev_irb->scsw.ssi = irb->scsw.ssi;
cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
}
/* Take care of the extended control bit and extended control word. */
ccw_device_accumulate_ecw(cdev, irb);
/* Accumulate function control. */
cdev_irb->scsw.fctl |= irb->scsw.fctl;
cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
/* Copy activity control. */
cdev_irb->scsw.actl= irb->scsw.actl;
cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
/* Accumulate status control. */
cdev_irb->scsw.stctl |= irb->scsw.stctl;
cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
/*
* Copy ccw address if it is valid. This is a bit simplified
* but should be close enough for all practical purposes.
*/
if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) ||
((irb->scsw.stctl ==
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
((irb->scsw.cmd.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
(irb->scsw.actl & SCSW_ACTL_DEVACT) &&
(irb->scsw.actl & SCSW_ACTL_SCHACT)) ||
(irb->scsw.actl & SCSW_ACTL_SUSPENDED))
cdev_irb->scsw.cpa = irb->scsw.cpa;
(irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
(irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
/* Accumulate device status, but not the device busy flag. */
cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY;
cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
/* dstat is not always valid. */
if (irb->scsw.stctl &
if (irb->scsw.cmd.stctl &
(SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
| SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
cdev_irb->scsw.dstat |= irb->scsw.dstat;
cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
/* Accumulate subchannel status. */
cdev_irb->scsw.cstat |= irb->scsw.cstat;
cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
/* Copy residual count if it is valid. */
if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
(irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0)
cdev_irb->scsw.count = irb->scsw.count;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
(irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
== 0)
cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
/* Take care of bits in the extended status word. */
ccw_device_accumulate_esw(cdev, irb);
@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
* sense facility available/supported when enabling the
* concurrent sense facility.
*/
if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
!(cdev_irb->esw.esw0.erw.cons))
cdev->private->flags.dosense = 1;
}
@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch = to_subchannel(cdev->dev.parent);
/* A sense is required, can we do it now ? */
if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
/*
* we received an Unit Check but we have no final
* status yet, therefore we must delay the SENSE
@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND))
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (irb->scsw.pno && irb->scsw.fctl != 0 &&
(!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
ccw_device_path_notoper(cdev);
if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.dstat & DEV_STAT_CHN_END)) {
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
@ -386,11 +383,11 @@ int
ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
{
ccw_device_accumulate_irb(cdev, irb);
if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
return -EBUSY;
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;

350
drivers/s390/cio/fcx.c Normal file
View File

@ -0,0 +1,350 @@
/*
* Functions for assembling fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <asm/fcx.h>
#include "cio.h"
/**
* tcw_get_intrg - return pointer to associated interrogate tcw
* @tcw: pointer to the original tcw
*
* Return a pointer to the interrogate tcw associated with the specified tcw
* or %NULL if there is no associated interrogate tcw.
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
return (struct tcw *) ((addr_t) tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
/**
* tcw_get_data - return pointer to input/output data associated with tcw
* @tcw: pointer to the tcw
*
* Return the input or output data address specified in the tcw depending
* on whether the r-bit or the w-bit is set. If neither bit is set, return
* %NULL.
*/
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
return (void *) ((addr_t) tcw->input);
if (tcw->w)
return (void *) ((addr_t) tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
/**
* tcw_get_tccb - return pointer to tccb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tccb associated with this tcw.
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
return (struct tccb *) ((addr_t) tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
/**
* tcw_get_tsb - return pointer to tsb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tsb associated with this tcw.
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
return (struct tsb *) ((addr_t) tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
/**
* tcw_init - initialize tcw data structure
* @tcw: pointer to the tcw to be initialized
* @r: initial value of the r-bit
* @w: initial value of the w-bit
*
* Initialize all fields of the specified tcw data structure with zero and
* fill in the format, flags, r and w fields.
*/
void tcw_init(struct tcw *tcw, int r, int w)
{
memset(tcw, 0, sizeof(struct tcw));
tcw->format = TCW_FORMAT_DEFAULT;
tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
if (r)
tcw->r = 1;
if (w)
tcw->w = 1;
}
EXPORT_SYMBOL(tcw_init);
static inline size_t tca_size(struct tccb *tccb)
{
return tccb->tcah.tcal - 12;
}
static u32 calc_dcw_count(struct tccb *tccb)
{
int offset;
struct dcw *dcw;
u32 count = 0;
size_t size;
size = tca_size(tccb);
for (offset = 0; offset < size;) {
dcw = (struct dcw *) &tccb->tca[offset];
count += dcw->count;
if (!(dcw->flags & DCW_FLAGS_CC))
break;
offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
}
return count;
}
static u32 calc_cbc_size(struct tidaw *tidaw, int num)
{
int i;
u32 cbc_data;
u32 cbc_count = 0;
u64 data_count = 0;
for (i = 0; i < num; i++) {
if (tidaw[i].flags & TIDAW_FLAGS_LAST)
break;
/* TODO: find out if padding applies to total of data
* transferred or data transferred by this tidaw. Assumption:
* applies to total. */
data_count += tidaw[i].count;
if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
cbc_data = 4 + ALIGN(data_count, 4) - data_count;
cbc_count += cbc_data;
data_count += cbc_data;
}
}
return cbc_count;
}
/**
* tcw_finalize - finalize tcw length fields and tidaw list
* @tcw: pointer to the tcw
* @num_tidaws: the number of tidaws used to address input/output data or zero
* if no tida is used
*
* Calculate the input-/output-count and tccbl field in the tcw, add a
* tcat the tccb and terminate the data tidaw list if used.
*
* Note: in case input- or output-tida is used, the tidaw-list must be stored
* in contiguous storage (no ttic). The tcal field in the tccb must be
* up-to-date.
*/
void tcw_finalize(struct tcw *tcw, int num_tidaws)
{
struct tidaw *tidaw;
struct tccb *tccb;
struct tccb_tcat *tcat;
u32 count;
/* Terminate tidaw list. */
tidaw = tcw_get_data(tcw);
if (num_tidaws > 0)
tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
memset(tcat, 0, sizeof(tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
count += calc_cbc_size(tidaw, num_tidaws);
if (tcw->r)
tcw->input_count = count;
else if (tcw->w)
tcw->output_count = count;
tcat->count = ALIGN(count, 4) + 4;
/* Calculate tccbl. */
tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
sizeof(struct tccb_tcat) - 20) >> 2;
}
EXPORT_SYMBOL(tcw_finalize);
/**
* tcw_set_intrg - set the interrogate tcw address of a tcw
* @tcw: the tcw address
* @intrg_tcw: the address of the interrogate tcw
*
* Set the address of the interrogate tcw in the specified tcw.
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
tcw->intrg = (u32) ((addr_t) intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
/**
* tcw_set_data - set data address and tida flag of a tcw
* @tcw: the tcw address
* @data: the data address
* @use_tidal: zero of the data address specifies a contiguous block of data,
* non-zero if it specifies a list if tidaws.
*
* Set the input/output data address of a tcw (depending on the value of the
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
* is set as well.
*/
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
tcw->input = (u64) ((addr_t) data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
tcw->output = (u64) ((addr_t) data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
}
EXPORT_SYMBOL(tcw_set_data);
/**
* tcw_set_tccb - set tccb address of a tcw
* @tcw: the tcw address
* @tccb: the tccb address
*
* Set the address of the tccb in the specified tcw.
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
tcw->tccb = (u64) ((addr_t) tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
/**
* tcw_set_tsb - set tsb address of a tcw
* @tcw: the tcw address
* @tsb: the tsb address
*
* Set the address of the tsb in the specified tcw.
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
tcw->tsb = (u64) ((addr_t) tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
/**
* tccb_init - initialize tccb
* @tccb: the tccb address
* @size: the maximum size of the tccb
* @sac: the service-action-code to be user
*
* Initialize the header of the specified tccb by resetting all values to zero
* and filling in defaults for format, sac and initial tcal fields.
*/
void tccb_init(struct tccb *tccb, size_t size, u32 sac)
{
memset(tccb, 0, size);
tccb->tcah.format = TCCB_FORMAT_DEFAULT;
tccb->tcah.sac = sac;
tccb->tcah.tcal = 12;
}
EXPORT_SYMBOL(tccb_init);
/**
* tsb_init - initialize tsb
* @tsb: the tsb address
*
* Initialize the specified tsb by resetting all values to zero.
*/
void tsb_init(struct tsb *tsb)
{
memset(tsb, 0, sizeof(tsb));
}
EXPORT_SYMBOL(tsb_init);
/**
* tccb_add_dcw - add a dcw to the tccb
* @tccb: the tccb address
* @tccb_size: the maximum tccb size
* @cmd: the dcw command
* @flags: flags for the dcw
* @cd: pointer to control data for this dcw or NULL if none is required
* @cd_count: number of control data bytes for this dcw
* @count: number of data bytes for this dcw
*
* Add a new dcw to the specified tccb by writing the dcw information specified
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
* would exceed the available space as defined by @tccb_size.
*
* Note: the tcal field of the tccb header will be updates to reflect added
* content.
*/
struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
void *cd, u8 cd_count, u32 count)
{
struct dcw *dcw;
int size;
int tca_offset;
/* Check for space. */
tca_offset = tca_size(tccb);
size = ALIGN(sizeof(struct dcw) + cd_count, 4);
if (sizeof(struct tccb_tcah) + tca_offset + size +
sizeof(struct tccb_tcat) > tccb_size)
return ERR_PTR(-ENOSPC);
/* Add dcw to tca. */
dcw = (struct dcw *) &tccb->tca[tca_offset];
memset(dcw, 0, size);
dcw->cmd = cmd;
dcw->flags = flags;
dcw->count = count;
dcw->cd_count = cd_count;
if (cd)
memcpy(&dcw->cd[0], cd, cd_count);
tccb->tcah.tcal += size;
return dcw;
}
EXPORT_SYMBOL(tccb_add_dcw);
/**
* tcw_add_tidaw - add a tidaw to a tcw
* @tcw: the tcw address
* @num_tidaws: the current number of tidaws
* @flags: flags for the new tidaw
* @addr: address value for the new tidaw
* @count: count value for the new tidaw
*
* Add a new tidaw to the input/output data tidaw-list of the specified tcw
* (depending on the value of the r-flag and w-flag) and return a pointer to
* the new tidaw.
*
* Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
* must ensure that there is enough space for the new tidaw. The last-tidaw
* flag for the last tidaw in the list will be set by tcw_finalize.
*/
struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
void *addr, u32 count)
{
struct tidaw *tidaw;
/* Add tidaw to tidaw-list. */
tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
tidaw->addr = (u64) ((addr_t) addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);

View File

@ -8,7 +8,7 @@
#ifndef S390_IDSET_H
#define S390_IDSET_H S390_IDSET_H
#include "schid.h"
#include <asm/schid.h>
struct idset;

View File

@ -1,12 +1,12 @@
#ifndef S390_IO_SCH_H
#define S390_IO_SCH_H
#include "schid.h"
#include <asm/schid.h>
/*
* operation request block
* command-mode operation request block
*/
struct orb {
struct cmd_orb {
u32 intparm; /* interruption parameter */
u32 key : 4; /* flags, like key, suspend control, etc. */
u32 spnd : 1; /* suspend control */
@ -28,8 +28,36 @@ struct orb {
u32 cpa; /* channel program address */
} __attribute__ ((packed, aligned(4)));
/*
* transport-mode operation request block
*/
struct tm_orb {
u32 intparm;
u32 key:4;
u32 :9;
u32 b:1;
u32 :2;
u32 lpm:8;
u32 :7;
u32 x:1;
u32 tcw;
u32 prio:8;
u32 :8;
u32 rsvpgm:8;
u32 :8;
u32 :32;
u32 :32;
u32 :32;
u32 :32;
} __attribute__ ((packed, aligned(4)));
union orb {
struct cmd_orb cmd;
struct tm_orb tm;
} __attribute__ ((packed, aligned(4)));
struct io_subchannel_private {
struct orb orb; /* operation request block */
union orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
} __attribute__ ((aligned(8)));
@ -95,16 +123,18 @@ struct ccw_device_private {
void *cmb_wait; /* deferred cmb enable/disable */
};
static inline int ssch(struct subchannel_id schid, volatile struct orb *addr)
static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
int ccode = -EIO;
asm volatile(
" ssch 0(%2)\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
return ccode;
}

View File

@ -2,7 +2,7 @@
#define S390_CIO_IOASM_H
#include <asm/chpid.h>
#include "schid.h"
#include <asm/schid.h>
/*
* TPI info structure

68
drivers/s390/cio/isc.c Normal file
View File

@ -0,0 +1,68 @@
/*
* Functions for registration of I/O interruption subclasses on s390.
*
* Copyright IBM Corp. 2008
* Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/isc.h>
static unsigned int isc_refs[MAX_ISC + 1];
static DEFINE_SPINLOCK(isc_ref_lock);
/**
* isc_register - register an I/O interruption subclass.
* @isc: I/O interruption subclass to register
*
* The number of users for @isc is increased. If this is the first user to
* register @isc, the corresponding I/O interruption subclass mask is enabled.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_register(unsigned int isc)
{
if (isc > MAX_ISC) {
WARN_ON(1);
return;
}
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_register);
/**
* isc_unregister - unregister an I/O interruption subclass.
* @isc: I/O interruption subclass to unregister
*
* The number of users for @isc is decreased. If this is the last user to
* unregister @isc, the corresponding I/O interruption subclass mask is
* disabled.
* Note: This function must not be called if isc_register() hasn't been called
* before by the driver for @isc.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_unregister(unsigned int isc)
{
spin_lock(&isc_ref_lock);
/* check for misuse */
if (isc > MAX_ISC || isc_refs[isc] == 0) {
WARN_ON(1);
goto out_unlock;
}
if (isc_refs[isc] == 1)
ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_unregister);

327
drivers/s390/cio/itcw.c Normal file
View File

@ -0,0 +1,327 @@
/*
* Functions for incremental construction of fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <asm/fcx.h>
#include <asm/itcw.h>
/**
* struct itcw - incremental tcw helper data type
*
* This structure serves as a handle for the incremental construction of a
* tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
* tcw and associated data. The data structures are contained inside a single
* contiguous buffer provided by the user.
*
* The itcw construction functions take care of overall data integrity:
* - reset unused fields to zero
* - fill in required pointers
* - ensure required alignment for data structures
* - prevent data structures to cross 4k-byte boundary where required
* - calculate tccb-related length fields
* - optionally provide ready-made interrogate tcw and associated structures
*
* Restrictions apply to the itcws created with these construction functions:
* - tida only supported for data address, not for tccb
* - only contiguous tidaw-lists (no ttic)
* - total number of bytes required per itcw may not exceed 4k bytes
* - either read or write operation (may not work with r=0 and w=0)
*
* Example:
* struct itcw *itcw;
* void *buffer;
* size_t size;
*
* size = itcw_calc_size(1, 2, 0);
* buffer = kmalloc(size, GFP_DMA);
* if (!buffer)
* return -ENOMEM;
* itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
* if (IS_ERR(itcw))
* return PTR_ER(itcw);
* itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
* itcw_add_tidaw(itcw, 0, 0x30000, 20);
* itcw_add_tidaw(itcw, 0, 0x40000, 52);
* itcw_finalize(itcw);
*
*/
struct itcw {
struct tcw *tcw;
struct tcw *intrg_tcw;
int num_tidaws;
int max_tidaws;
int intrg_num_tidaws;
int intrg_max_tidaws;
};
/**
* itcw_get_tcw - return pointer to tcw associated with the itcw
* @itcw: address of the itcw
*
* Return pointer to the tcw associated with the itcw.
*/
struct tcw *itcw_get_tcw(struct itcw *itcw)
{
return itcw->tcw;
}
EXPORT_SYMBOL(itcw_get_tcw);
/**
* itcw_calc_size - return the size of an itcw with the given parameters
* @intrg: if non-zero, add an interrogate tcw
* @max_tidaws: maximum number of tidaws to be used for data addressing or zero
* if no tida is to be used.
* @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
* by the interrogate tcw, if specified
*
* Calculate and return the number of bytes required to hold an itcw with the
* given parameters and assuming tccbs with maximum size.
*
* Note that the resulting size also contains bytes needed for alignment
* padding as well as padding to ensure that data structures don't cross a
* 4k-boundary where required.
*/
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
{
size_t len;
/* Main data. */
len = sizeof(struct itcw);
len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
/* TSB */ sizeof(struct tsb) +
/* TIDAL */ max_tidaws * sizeof(struct tidaw);
/* Interrogate data. */
if (intrg) {
len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
/* TSB */ sizeof(struct tsb) +
/* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
}
/* Maximum required alignment padding. */
len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
/* Maximum padding for structures that may not cross 4k boundary. */
if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
len += max(max_tidaws, intrg_max_tidaws) *
sizeof(struct tidaw) - 1;
return len;
}
EXPORT_SYMBOL(itcw_calc_size);
#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
int align, int check_4k)
{
addr_t addr;
addr = ALIGN(*start, align);
if (check_4k && CROSS4K(addr, len)) {
addr = ALIGN(addr, 4096);
addr = ALIGN(addr, align);
}
if (addr + len > end)
return ERR_PTR(-ENOSPC);
*start = addr + len;
return (void *) addr;
}
/**
* itcw_init - initialize incremental tcw data structure
* @buffer: address of buffer to use for data structures
* @size: number of bytes in buffer
* @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
* operation tcw
* @intrg: if non-zero, add and initialize an interrogate tcw
* @max_tidaws: maximum number of tidaws to be used for data addressing or zero
* if no tida is to be used.
* @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
* by the interrogate tcw, if specified
*
* Prepare the specified buffer to be used as an incremental tcw, i.e. a
* helper data structure that can be used to construct a valid tcw by
* successive calls to other helper functions. Note: the buffer needs to be
* located below the 2G address limit. The resulting tcw has the following
* restrictions:
* - no tccb tidal
* - input/output tidal is contiguous (no ttic)
* - total data should not exceed 4k
* - tcw specifies either read or write operation
*
* On success, return pointer to the resulting incremental tcw data structure,
* ERR_PTR otherwise.
*/
struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
int max_tidaws, int intrg_max_tidaws)
{
struct itcw *itcw;
void *chunk;
addr_t start;
addr_t end;
/* Check for 2G limit. */
start = (addr_t) buffer;
end = start + size;
if (end > (1 << 31))
return ERR_PTR(-EINVAL);
memset(buffer, 0, size);
/* ITCW. */
chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
if (IS_ERR(chunk))
return chunk;
itcw = chunk;
itcw->max_tidaws = max_tidaws;
itcw->intrg_max_tidaws = intrg_max_tidaws;
/* Main TCW. */
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
if (IS_ERR(chunk))
return chunk;
itcw->tcw = chunk;
tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
(op == ITCW_OP_WRITE) ? 1 : 0);
/* Interrogate TCW. */
if (intrg) {
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
if (IS_ERR(chunk))
return chunk;
itcw->intrg_tcw = chunk;
tcw_init(itcw->intrg_tcw, 1, 0);
tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
}
/* Data TIDAL. */
if (max_tidaws > 0) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
max_tidaws, 16, 1);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->tcw, chunk, 1);
}
/* Interrogate data TIDAL. */
if (intrg && (intrg_max_tidaws > 0)) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
intrg_max_tidaws, 16, 1);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->intrg_tcw, chunk, 1);
}
/* TSB. */
chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
if (IS_ERR(chunk))
return chunk;
tsb_init(chunk);
tcw_set_tsb(itcw->tcw, chunk);
/* Interrogate TSB. */
if (intrg) {
chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
if (IS_ERR(chunk))
return chunk;
tsb_init(chunk);
tcw_set_tsb(itcw->intrg_tcw, chunk);
}
/* TCCB. */
chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
if (IS_ERR(chunk))
return chunk;
tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
tcw_set_tccb(itcw->tcw, chunk);
/* Interrogate TCCB. */
if (intrg) {
chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
if (IS_ERR(chunk))
return chunk;
tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
tcw_set_tccb(itcw->intrg_tcw, chunk);
tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
sizeof(struct dcw_intrg_data), 0);
tcw_finalize(itcw->intrg_tcw, 0);
}
return itcw;
}
EXPORT_SYMBOL(itcw_init);
/**
* itcw_add_dcw - add a dcw to the itcw
* @itcw: address of the itcw
* @cmd: the dcw command
* @flags: flags for the dcw
* @cd: address of control data for this dcw or NULL if none is required
* @cd_count: number of control data bytes for this dcw
* @count: number of data bytes for this dcw
*
* Add a new dcw to the specified itcw by writing the dcw information specified
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
* would exceed the available space.
*
* Note: the tcal field of the tccb header will be updated to reflect added
* content.
*/
struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
u8 cd_count, u32 count)
{
return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
flags, cd, cd_count, count);
}
EXPORT_SYMBOL(itcw_add_dcw);
/**
* itcw_add_tidaw - add a tidaw to the itcw
* @itcw: address of the itcw
* @flags: flags for the new tidaw
* @addr: address value for the new tidaw
* @count: count value for the new tidaw
*
* Add a new tidaw to the input/output data tidaw-list of the specified itcw
* (depending on the value of the r-flag and w-flag). Return a pointer to
* the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
* available space.
*
* Note: the tidaw-list is assumed to be contiguous with no ttics. The
* last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
*/
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
{
if (itcw->num_tidaws >= itcw->max_tidaws)
return ERR_PTR(-ENOSPC);
return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
}
EXPORT_SYMBOL(itcw_add_tidaw);
/**
* itcw_set_data - set data address and tida flag of the itcw
* @itcw: address of the itcw
* @addr: the data address
* @use_tidal: zero of the data address specifies a contiguous block of data,
* non-zero if it specifies a list if tidaws.
*
* Set the input/output data address of the itcw (depending on the value of the
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
* is set as well.
*/
void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
{
tcw_set_data(itcw->tcw, addr, use_tidal);
}
EXPORT_SYMBOL(itcw_set_data);
/**
* itcw_finalize - calculate length and count fields of the itcw
* @itcw: address of the itcw
*
* Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
* In case input- or output-tida is used, the tidaw-list must be stored in
* continuous storage (no ttic). The tcal field in the tccb must be
* up-to-date.
*/
void itcw_finalize(struct itcw *itcw)
{
tcw_finalize(itcw->tcw, itcw->num_tidaws);
}
EXPORT_SYMBOL(itcw_finalize);

View File

@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev)
default:
BUG();
}
ccw_device_set_timeout(cdev, 0);
wake_up(&cdev->private->wait_q);
}
@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case -EIO:
QDIO_PRINT_ERR("i/o error on device %s\n",
cdev->dev.bus_id);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
wake_up(&cdev->private->wait_q);
return;
case -ETIMEDOUT:
qdio_timeout_handler(cdev);
@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
QDIO_DBF_TEXT4(0, trace, dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
cstat = irb->scsw.cstat;
dstat = irb->scsw.dstat;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void)
{
char dbf_text[15];
if (!css_characteristics_avail)
return -EIO;
/* Check for bit 41. */
if (!css_general_characteristics.aif) {
QDIO_PRINT_WARN("Adapter interruption facility not " \
@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how)
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
} else if (rc == 0) {
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
ccw_device_set_timeout(cdev, timeout);
spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
wait_event(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR);
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
timeout);
} else {
QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
"device %s\n", result, cdev->dev.bus_id);
@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how)
/* Ignore errors. */
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
ccw_device_set_timeout(cdev, 0);
out:
up(&irq_ptr->setting_up_sema);
return result;
@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
ccw_device_set_timeout(cdev, 0);
if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
return;
}
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
ccw_device_set_timeout(cdev, 0);
}
int
@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data)
irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
result, result2);
result=result2;
if (result)
ccw_device_set_timeout(cdev, 0);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags)
spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
ccw_device_set_timeout(cdev, 0);
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void)
char dbf_text[20];
tiqdio_ind =
s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
TIQDIO_THININT_ISC);
if (IS_ERR(tiqdio_ind)) {
sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
QDIO_DBF_TEXT0(0,setup,dbf_text);
@ -3738,7 +3730,8 @@ static void
tiqdio_unregister_thinints(void)
{
if (tiqdio_ind)
s390_unregister_adapter_interrupt(tiqdio_ind);
s390_unregister_adapter_interrupt(tiqdio_ind,
TIQDIO_THININT_ISC);
}
static int
@ -3899,6 +3892,7 @@ init_QDIO(void)
qdio_mempool_alloc,
qdio_mempool_free, NULL);
isc_register(QDIO_AIRQ_ISC);
if (tiqdio_check_chsc_availability())
QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
@ -3911,6 +3905,7 @@ static void __exit
cleanup_QDIO(void)
{
tiqdio_unregister_thinints();
isc_unregister(QDIO_AIRQ_ISC);
qdio_remove_procfs_entry();
qdio_release_qdio_memory();
qdio_unregister_dbf_views();

View File

@ -2,8 +2,8 @@
#define _CIO_QDIO_H
#include <asm/page.h>
#include "schid.h"
#include <asm/isc.h>
#include <asm/schid.h>
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_VERBOSE_LEVEL 9
@ -26,7 +26,7 @@
*/
#define IQDIO_FILL_LEVEL_TO_POLL 4
#define TIQDIO_THININT_ISC 3
#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
#define TIQDIO_DELAY_TARGET 0
#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */

843
drivers/s390/cio/scsw.c Normal file
View File

@ -0,0 +1,843 @@
/*
* Helper functions for scsw access.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/types.h>
#include <linux/module.h>
#include <asm/cio.h>
#include "css.h"
#include "chsc.h"
/**
* scsw_is_tm - check for transport mode scsw
* @scsw: pointer to scsw
*
* Return non-zero if the specified scsw is a transport mode scsw, zero
* otherwise.
*/
int scsw_is_tm(union scsw *scsw)
{
return css_general_characteristics.fcx && (scsw->tm.x == 1);
}
EXPORT_SYMBOL(scsw_is_tm);
/**
* scsw_key - return scsw key field
* @scsw: pointer to scsw
*
* Return the value of the key field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.key;
else
return scsw->cmd.key;
}
EXPORT_SYMBOL(scsw_key);
/**
* scsw_eswf - return scsw eswf field
* @scsw: pointer to scsw
*
* Return the value of the eswf field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.eswf;
else
return scsw->cmd.eswf;
}
EXPORT_SYMBOL(scsw_eswf);
/**
* scsw_cc - return scsw cc field
* @scsw: pointer to scsw
*
* Return the value of the cc field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cc;
else
return scsw->cmd.cc;
}
EXPORT_SYMBOL(scsw_cc);
/**
* scsw_ectl - return scsw ectl field
* @scsw: pointer to scsw
*
* Return the value of the ectl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.ectl;
else
return scsw->cmd.ectl;
}
EXPORT_SYMBOL(scsw_ectl);
/**
* scsw_pno - return scsw pno field
* @scsw: pointer to scsw
*
* Return the value of the pno field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.pno;
else
return scsw->cmd.pno;
}
EXPORT_SYMBOL(scsw_pno);
/**
* scsw_fctl - return scsw fctl field
* @scsw: pointer to scsw
*
* Return the value of the fctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.fctl;
else
return scsw->cmd.fctl;
}
EXPORT_SYMBOL(scsw_fctl);
/**
* scsw_actl - return scsw actl field
* @scsw: pointer to scsw
*
* Return the value of the actl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.actl;
else
return scsw->cmd.actl;
}
EXPORT_SYMBOL(scsw_actl);
/**
* scsw_stctl - return scsw stctl field
* @scsw: pointer to scsw
*
* Return the value of the stctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.stctl;
else
return scsw->cmd.stctl;
}
EXPORT_SYMBOL(scsw_stctl);
/**
* scsw_dstat - return scsw dstat field
* @scsw: pointer to scsw
*
* Return the value of the dstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.dstat;
else
return scsw->cmd.dstat;
}
EXPORT_SYMBOL(scsw_dstat);
/**
* scsw_cstat - return scsw cstat field
* @scsw: pointer to scsw
*
* Return the value of the cstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
u32 scsw_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cstat;
else
return scsw->cmd.cstat;
}
EXPORT_SYMBOL(scsw_cstat);
/**
* scsw_cmd_is_valid_key - check key field validity
* @scsw: pointer to scsw
*
* Return non-zero if the key field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_key(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_key);
/**
* scsw_cmd_is_valid_sctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_sctl(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
/**
* scsw_cmd_is_valid_eswf - check eswf field validity
* @scsw: pointer to scsw
*
* Return non-zero if the eswf field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_eswf(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
/**
* scsw_cmd_is_valid_cc - check cc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cc field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_cc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
/**
* scsw_cmd_is_valid_fmt - check fmt field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fmt field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_fmt(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
/**
* scsw_cmd_is_valid_pfch - check pfch field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pfch field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_pfch(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
/**
* scsw_cmd_is_valid_isic - check isic field validity
* @scsw: pointer to scsw
*
* Return non-zero if the isic field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_isic(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
/**
* scsw_cmd_is_valid_alcc - check alcc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the alcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_alcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
/**
* scsw_cmd_is_valid_ssi - check ssi field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ssi field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_ssi(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
/**
* scsw_cmd_is_valid_zcc - check zcc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the zcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_zcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
/**
* scsw_cmd_is_valid_ectl - check ectl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ectl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_ectl(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
/**
* scsw_cmd_is_valid_pno - check pno field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pno field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_pno(union scsw *scsw)
{
return (scsw->cmd.fctl != 0) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
}
EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
/**
* scsw_cmd_is_valid_fctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
/**
* scsw_cmd_is_valid_actl - check actl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the actl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
/**
* scsw_cmd_is_valid_stctl - check stctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the stctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
/**
* scsw_cmd_is_valid_dstat - check dstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the dstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_dstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
/**
* scsw_cmd_is_valid_cstat - check cstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
int scsw_cmd_is_valid_cstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
/**
* scsw_tm_is_valid_key - check key field validity
* @scsw: pointer to scsw
*
* Return non-zero if the key field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_key(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
}
EXPORT_SYMBOL(scsw_tm_is_valid_key);
/**
* scsw_tm_is_valid_eswf - check eswf field validity
* @scsw: pointer to scsw
*
* Return non-zero if the eswf field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_eswf(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
/**
* scsw_tm_is_valid_cc - check cc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cc field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_cc(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
EXPORT_SYMBOL(scsw_tm_is_valid_cc);
/**
* scsw_tm_is_valid_fmt - check fmt field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fmt field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_fmt(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
/**
* scsw_tm_is_valid_x - check x field validity
* @scsw: pointer to scsw
*
* Return non-zero if the x field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_x(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_x);
/**
* scsw_tm_is_valid_q - check q field validity
* @scsw: pointer to scsw
*
* Return non-zero if the q field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_q(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_q);
/**
* scsw_tm_is_valid_ectl - check ectl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ectl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_ectl(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
}
EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
/**
* scsw_tm_is_valid_pno - check pno field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pno field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_pno(union scsw *scsw)
{
return (scsw->tm.fctl != 0) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
}
EXPORT_SYMBOL(scsw_tm_is_valid_pno);
/**
* scsw_tm_is_valid_fctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
/**
* scsw_tm_is_valid_actl - check actl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the actl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_actl);
/**
* scsw_tm_is_valid_stctl - check stctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the stctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
/**
* scsw_tm_is_valid_dstat - check dstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the dstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_dstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
/**
* scsw_tm_is_valid_cstat - check cstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_cstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
/**
* scsw_tm_is_valid_fcxs - check fcxs field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fcxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_fcxs(union scsw *scsw)
{
return 1;
}
EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
/**
* scsw_tm_is_valid_schxs - check schxs field validity
* @scsw: pointer to scsw
*
* Return non-zero if the schxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
int scsw_tm_is_valid_schxs(union scsw *scsw)
{
return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_PROT_CHECK |
SCHN_STAT_CHN_DATA_CHK));
}
EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
/**
* scsw_is_valid_actl - check actl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the actl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_actl(scsw);
else
return scsw_cmd_is_valid_actl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_actl);
/**
* scsw_is_valid_cc - check cc field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cc field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cc(scsw);
else
return scsw_cmd_is_valid_cc(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_cc);
/**
* scsw_is_valid_cstat - check cstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the cstat field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cstat(scsw);
else
return scsw_cmd_is_valid_cstat(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_cstat);
/**
* scsw_is_valid_dstat - check dstat field validity
* @scsw: pointer to scsw
*
* Return non-zero if the dstat field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_dstat(scsw);
else
return scsw_cmd_is_valid_dstat(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_dstat);
/**
* scsw_is_valid_ectl - check ectl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the ectl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_ectl(scsw);
else
return scsw_cmd_is_valid_ectl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_ectl);
/**
* scsw_is_valid_eswf - check eswf field validity
* @scsw: pointer to scsw
*
* Return non-zero if the eswf field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_eswf(scsw);
else
return scsw_cmd_is_valid_eswf(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_eswf);
/**
* scsw_is_valid_fctl - check fctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the fctl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_fctl(scsw);
else
return scsw_cmd_is_valid_fctl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_fctl);
/**
* scsw_is_valid_key - check key field validity
* @scsw: pointer to scsw
*
* Return non-zero if the key field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_key(scsw);
else
return scsw_cmd_is_valid_key(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_key);
/**
* scsw_is_valid_pno - check pno field validity
* @scsw: pointer to scsw
*
* Return non-zero if the pno field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_pno(scsw);
else
return scsw_cmd_is_valid_pno(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_pno);
/**
* scsw_is_valid_stctl - check stctl field validity
* @scsw: pointer to scsw
*
* Return non-zero if the stctl field of the specified scsw is valid,
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
int scsw_is_valid_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_stctl(scsw);
else
return scsw_cmd_is_valid_stctl(scsw);
}
EXPORT_SYMBOL(scsw_is_valid_stctl);
/**
* scsw_cmd_is_solicited - check for solicited scsw
* @scsw: pointer to scsw
*
* Return non-zero if the command mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
int scsw_cmd_is_solicited(union scsw *scsw)
{
return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
EXPORT_SYMBOL(scsw_cmd_is_solicited);
/**
* scsw_tm_is_solicited - check for solicited scsw
* @scsw: pointer to scsw
*
* Return non-zero if the transport mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
int scsw_tm_is_solicited(union scsw *scsw)
{
return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
EXPORT_SYMBOL(scsw_tm_is_solicited);
/**
* scsw_is_solicited - check for solicited scsw
* @scsw: pointer to scsw
*
* Return non-zero if the transport or command mode scsw indicates that the
* associated status condition is solicited, zero if it is unsolicited.
*/
int scsw_is_solicited(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_solicited(scsw);
else
return scsw_cmd_is_solicited(scsw);
}
EXPORT_SYMBOL(scsw_is_solicited);

View File

@ -34,13 +34,15 @@
#include <linux/mutex.h>
#include <asm/s390_rdev.h>
#include <asm/reset.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include "ap_bus.h"
/* Some prototypes. */
static void ap_scan_bus(struct work_struct *);
static void ap_poll_all(unsigned long);
static void ap_poll_timeout(unsigned long);
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
@ -80,12 +82,15 @@ static DECLARE_WORK(ap_config_work, ap_scan_bus);
/*
* Tasklet & timer for AP request polling.
*/
static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
static struct hrtimer ap_poll_timer;
/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000;
/**
* ap_intructions_available() - Test if AP instructions are available.
@ -636,11 +641,39 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus,
static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
}
static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
size_t count)
{
unsigned long long time;
ktime_t hr_time;
/* 120 seconds = maximum poll interval */
if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000)
return -EINVAL;
poll_timeout = time;
hr_time = ktime_set(0, poll_timeout);
if (!hrtimer_is_queued(&ap_poll_timer) ||
!hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) {
ap_poll_timer.expires = hr_time;
hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS);
}
return count;
}
static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_config_time,
&bus_attr_poll_thread,
NULL
&bus_attr_poll_timeout,
NULL,
};
/**
@ -895,9 +928,10 @@ ap_config_timeout(unsigned long ptr)
*/
static inline void ap_schedule_poll_timer(void)
{
if (timer_pending(&ap_poll_timer))
if (hrtimer_is_queued(&ap_poll_timer))
return;
mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout),
HRTIMER_MODE_ABS);
}
/**
@ -1115,13 +1149,14 @@ EXPORT_SYMBOL(ap_cancel_message);
/**
* ap_poll_timeout(): AP receive polling for finished AP requests.
* @unused: Unused variable.
* @unused: Unused pointer.
*
* Schedules the AP tasklet.
* Schedules the AP tasklet using a high resolution timer.
*/
static void ap_poll_timeout(unsigned long unused)
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
{
tasklet_schedule(&ap_tasklet);
return HRTIMER_NORESTART;
}
/**
@ -1344,6 +1379,14 @@ int __init ap_module_init(void)
ap_config_timer.expires = jiffies + ap_config_time * HZ;
add_timer(&ap_config_timer);
/* Setup the high resultion poll timer.
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
if (MACHINE_IS_VM)
poll_timeout = 1500000;
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ap_poll_timer.function = ap_poll_timeout;
/* Start the low priority AP bus poll thread. */
if (ap_thread_flag) {
rc = ap_poll_thread_start();
@ -1355,7 +1398,7 @@ int __init ap_module_init(void)
out_work:
del_timer_sync(&ap_config_timer);
del_timer_sync(&ap_poll_timer);
hrtimer_cancel(&ap_poll_timer);
destroy_workqueue(ap_work_queue);
out_root:
s390_root_dev_unregister(ap_root_device);
@ -1386,7 +1429,7 @@ void ap_module_exit(void)
ap_reset_domain();
ap_poll_thread_stop();
del_timer_sync(&ap_config_timer);
del_timer_sync(&ap_poll_timer);
hrtimer_cancel(&ap_poll_timer);
destroy_workqueue(ap_work_queue);
tasklet_kill(&ap_tasklet);
s390_root_dev_unregister(ap_root_device);

View File

@ -92,6 +92,8 @@ struct ap_queue_status {
#define AP_DEVICE_TYPE_PCIXCC 5
#define AP_DEVICE_TYPE_CEX2A 6
#define AP_DEVICE_TYPE_CEX2C 7
#define AP_DEVICE_TYPE_CEX2A2 8
#define AP_DEVICE_TYPE_CEX2C2 9
/*
* AP reset flag states

View File

@ -1068,10 +1068,8 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
#define LBUFSIZE 1200UL
lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
if (!lbuf) {
PRINTK("kmalloc failed!\n");
if (!lbuf)
return 0;
}
local_count = min(LBUFSIZE - 1, count);
if (copy_from_user(lbuf, buffer, local_count) != 0) {
@ -1081,23 +1079,15 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
lbuf[local_count] = '\0';
ptr = strstr(lbuf, "Online devices");
if (!ptr) {
PRINTK("Unable to parse data (missing \"Online devices\")\n");
if (!ptr)
goto out;
}
ptr = strstr(ptr, "\n");
if (!ptr) {
PRINTK("Unable to parse data (missing newline "
"after \"Online devices\")\n");
if (!ptr)
goto out;
}
ptr++;
if (strstr(ptr, "Waiting work element counts") == NULL) {
PRINTK("Unable to parse data (missing "
"\"Waiting work element counts\")\n");
if (strstr(ptr, "Waiting work element counts") == NULL)
goto out;
}
for (j = 0; j < 64 && *ptr; ptr++) {
/*
@ -1197,16 +1187,12 @@ int __init zcrypt_api_init(void)
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0) {
PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
zcrypt_misc_device.minor, rc);
if (rc < 0)
goto out;
}
/* Set up the proc file system */
zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
if (!zcrypt_entry) {
PRINTK("Couldn't create z90crypt proc entry\n");
rc = -ENOMEM;
goto out_misc;
}

View File

@ -30,34 +30,6 @@
#ifndef _ZCRYPT_API_H_
#define _ZCRYPT_API_H_
/**
* Macro definitions
*
* PDEBUG debugs in the form "zcrypt: function_name -> message"
*
* PRINTK is like PDEBUG, except that it is always enabled
* PRINTKN is like PRINTK, except that it does not include the function name
* PRINTKW is like PRINTK, except that it uses KERN_WARNING
* PRINTKC is like PRINTK, except that it uses KERN_CRIT
*/
#define DEV_NAME "zcrypt"
#define PRINTK(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
#define PRINTKN(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
#define PRINTKW(fmt, args...) \
printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
#define PRINTKC(fmt, args...) \
printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
#ifdef ZCRYPT_DEBUG
#define PDEBUG(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
#else
#define PDEBUG(fmt, args...) do {} while (0)
#endif
#include "ap_bus.h"
#include <asm/zcrypt.h>

View File

@ -49,6 +49,7 @@
static struct ap_device_id zcrypt_cex2a_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) },
{ /* end of list */ },
};
@ -242,9 +243,6 @@ static int convert_response(struct zcrypt_device *zdev,
return convert_type80(zdev, reply,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}

View File

@ -92,10 +92,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
{
struct error_hdr *ehdr = reply->message;
PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
ehdr->type, *(unsigned int *) reply->message,
*(unsigned int *) (reply->message + 4));
switch (ehdr->reply_code) {
case REP82_ERROR_OPERAND_INVALID:
case REP82_ERROR_OPERAND_SIZE:
@ -123,8 +119,6 @@ static inline int convert_error(struct zcrypt_device *zdev,
zdev->online = 0;
return -EAGAIN;
default:
PRINTKW("unknown type %02x reply code = %d\n",
ehdr->type, ehdr->reply_code);
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}

View File

@ -226,9 +226,6 @@ static int convert_response(struct zcrypt_device *zdev,
return convert_type84(zdev, reply,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}

View File

@ -361,26 +361,18 @@ static int convert_type86(struct zcrypt_device *zdev,
service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
if (unlikely(service_rc != 0)) {
service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
if (service_rc == 8 && service_rs == 66) {
PDEBUG("Bad block format on PCICC\n");
if (service_rc == 8 && service_rs == 66)
return -EINVAL;
}
if (service_rc == 8 && service_rs == 65) {
PDEBUG("Probably an even modulus on PCICC\n");
if (service_rc == 8 && service_rs == 65)
return -EINVAL;
}
if (service_rc == 8 && service_rs == 770) {
PDEBUG("Invalid key length on PCICC\n");
zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
return -EAGAIN;
}
if (service_rc == 8 && service_rs == 783) {
PDEBUG("Extended bitlengths not enabled on PCICC\n");
zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
return -EAGAIN;
}
PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
service_rc, service_rs);
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
@ -434,9 +426,6 @@ static int convert_response(struct zcrypt_device *zdev,
outputdata, outputdatalength);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}

View File

@ -72,6 +72,7 @@ struct response_type {
static struct ap_device_id zcrypt_pcixcc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) },
{ /* end of list */ },
};
@ -289,38 +290,19 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
ap_msg->length = sizeof(struct type6_hdr) +
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) {
PRINTK("Combined message is too large (%ld/%d/%d).\n",
sizeof(struct type6_hdr),
xcRB->request_control_blk_length,
xcRB->request_data_length);
if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
return -EFAULT;
}
if (CEIL4(xcRB->reply_control_blk_length) >
PCIXCC_MAX_XCRB_REPLY_SIZE) {
PDEBUG("Reply CPRB length is too large (%d).\n",
xcRB->request_control_blk_length);
if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
return -EFAULT;
}
if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
PDEBUG("Reply data block length is too large (%d).\n",
xcRB->reply_data_length);
if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
return -EFAULT;
}
replylen = CEIL4(xcRB->reply_control_blk_length) +
CEIL4(xcRB->reply_data_length) +
sizeof(struct type86_fmt2_msg);
if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
" (%d/%d/%d).\n",
sizeof(struct type86_fmt2_msg),
xcRB->reply_control_blk_length,
xcRB->reply_data_length);
xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
(sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_data_length));
PDEBUG("Capping Reply CPRB length at %d\n",
xcRB->reply_control_blk_length);
}
/* prepare type6 header */
@ -339,11 +321,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
xcRB->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcRB->request_control_blk_length) {
PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
xcRB->request_control_blk_length);
xcRB->request_control_blk_length)
return -EFAULT;
}
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
@ -471,29 +450,18 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) {
service_rs = msg->cprbx.ccp_rscode;
if (service_rc == 8 && service_rs == 66) {
PDEBUG("Bad block format on PCIXCC/CEX2C\n");
if (service_rc == 8 && service_rs == 66)
return -EINVAL;
}
if (service_rc == 8 && service_rs == 65) {
PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
if (service_rc == 8 && service_rs == 65)
return -EINVAL;
}
if (service_rc == 8 && service_rs == 770) {
PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
if (service_rc == 8 && service_rs == 770)
return -EINVAL;
}
if (service_rc == 8 && service_rs == 783) {
PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
return -EAGAIN;
}
if (service_rc == 12 && service_rs == 769) {
PDEBUG("Invalid key on PCIXCC/CEX2C\n");
if (service_rc == 12 && service_rs == 769)
return -EINVAL;
}
PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
service_rc, service_rs);
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
@ -569,11 +537,8 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
} __attribute__((packed)) *msg = reply->message;
char *data = reply->message;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) {
PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
rc, rs);
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
return -EINVAL;
}
memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
return msg->fmt2.count2;
}
@ -598,9 +563,6 @@ static int convert_response_ica(struct zcrypt_device *zdev,
outputdata, outputdatalength);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
@ -627,9 +589,6 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
return convert_type86_xcrb(zdev, reply, xcRB);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
@ -653,9 +612,6 @@ static int convert_response_rng(struct zcrypt_device *zdev,
return convert_type86_rng(zdev, reply, data);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
@ -700,10 +656,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
memcpy(msg->message, reply->message, length);
break;
default:
PRINTK("Invalid internal response type: %i\n",
resp_type->type);
memcpy(msg->message, &error_reply,
sizeof error_reply);
memcpy(msg->message, &error_reply, sizeof error_reply);
}
} else
memcpy(msg->message, reply->message, sizeof error_reply);

View File

@ -703,7 +703,8 @@ claw_irq_handler(struct ccw_device *cdev,
if (!cdev->dev.driver_data) {
printk(KERN_WARNING "claw: unsolicited interrupt for device:"
"%s received c-%02x d-%02x\n",
cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat);
cdev->dev.bus_id, irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
#ifdef FUNCTRACE
printk(KERN_INFO "claw: %s() "
"exit on line %d\n",__func__,__LINE__);
@ -732,22 +733,23 @@ claw_irq_handler(struct ccw_device *cdev,
#ifdef IOTRACE
printk(KERN_INFO "%s: interrupt for device: %04x "
"received c-%02x d-%02x state-%02x\n",
dev->name, p_ch->devno, irb->scsw.cstat,
irb->scsw.dstat, p_ch->claw_state);
dev->name, p_ch->devno, irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat, p_ch->claw_state);
#endif
/* Copy interruption response block. */
memcpy(p_ch->irb, irb, sizeof(struct irb));
/* Check for good subchannel return code, otherwise error message */
if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) {
if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
printk(KERN_INFO "%s: subchannel check for device: %04x -"
" Sch Stat %02x Dev Stat %02x CPA - %04x\n",
dev->name, p_ch->devno,
irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa);
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.cpa);
#ifdef IOTRACE
dumpit((char *)irb,sizeof(struct irb));
dumpit((char *)(unsigned long)irb->scsw.cpa,
dumpit((char *)(unsigned long)irb->scsw.cmd.cpa,
sizeof(struct ccw1));
#endif
#ifdef FUNCTRACE
@ -759,22 +761,24 @@ claw_irq_handler(struct ccw_device *cdev,
}
/* Check the reason-code of a unit check */
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
ccw_check_unit_check(p_ch, irb->ecw[0]);
}
/* State machine to bring the connection up, down and to restart */
p_ch->last_dstat = irb->scsw.dstat;
p_ch->last_dstat = irb->scsw.cmd.dstat;
switch (p_ch->claw_state) {
case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
#ifdef DEBUGMSG
printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name);
#endif
if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.stctl ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
if (!((p_ch->irb->scsw.cmd.stctl &
SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.cmd.stctl ==
SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.cmd.stctl ==
(SCSW_STCTL_ALERT_STATUS |
SCSW_STCTL_STATUS_PEND)))) {
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit on line %d\n",
dev->name,__func__,__LINE__);
@ -798,10 +802,13 @@ claw_irq_handler(struct ccw_device *cdev,
printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n",
dev->name);
#endif
if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.stctl ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
if (!((p_ch->irb->scsw.cmd.stctl &
SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.cmd.stctl ==
SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.cmd.stctl ==
(SCSW_STCTL_ALERT_STATUS |
SCSW_STCTL_STATUS_PEND)))) {
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit on line %d\n",
dev->name,__func__,__LINE__);
@ -828,8 +835,8 @@ claw_irq_handler(struct ccw_device *cdev,
"interrupt for device:"
"%s received c-%02x d-%02x\n",
cdev->dev.bus_id,
irb->scsw.cstat,
irb->scsw.dstat);
irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
return;
}
#ifdef DEBUGMSG
@ -844,7 +851,7 @@ claw_irq_handler(struct ccw_device *cdev,
return;
case CLAW_START_READ:
CLAW_DBF_TEXT(4,trace,"ReadIRQ");
if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
clear_bit(0, (void *)&p_ch->IO_active);
if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
(p_ch->irb->ecw[0] & 0x40) == 0x40 ||
@ -863,8 +870,8 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4,trace,"notrdy");
return;
}
if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) &&
(p_ch->irb->scsw.dstat==0)) {
if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
(p_ch->irb->scsw.cmd.dstat == 0)) {
if (test_and_set_bit(CLAW_BH_ACTIVE,
(void *)&p_ch->flag_a) == 0) {
tasklet_schedule(&p_ch->tasklet);
@ -879,10 +886,13 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4,trace,"PCI_read");
return;
}
if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.stctl ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
if (!((p_ch->irb->scsw.cmd.stctl &
SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.cmd.stctl ==
SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.cmd.stctl ==
(SCSW_STCTL_ALERT_STATUS |
SCSW_STCTL_STATUS_PEND)))) {
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit on line %d\n",
dev->name,__func__,__LINE__);
@ -911,7 +921,7 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4,trace,"RdIRQXit");
return;
case CLAW_START_WRITE:
if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
printk(KERN_INFO "%s: Unit Check Occured in "
"write channel\n",dev->name);
clear_bit(0, (void *)&p_ch->IO_active);
@ -934,16 +944,19 @@ claw_irq_handler(struct ccw_device *cdev,
CLAW_DBF_TEXT(4,trace,"rstrtwrt");
return;
}
if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) {
if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
clear_bit(0, (void *)&p_ch->IO_active);
printk(KERN_INFO "%s: Unit Exception "
"Occured in write channel\n",
dev->name);
}
if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.stctl ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
if (!((p_ch->irb->scsw.cmd.stctl &
SCSW_STCTL_SEC_STATUS) ||
(p_ch->irb->scsw.cmd.stctl ==
SCSW_STCTL_STATUS_PEND) ||
(p_ch->irb->scsw.cmd.stctl ==
(SCSW_STCTL_ALERT_STATUS |
SCSW_STCTL_STATUS_PEND)))) {
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit on line %d\n",
dev->name,__func__,__LINE__);

View File

@ -257,9 +257,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
if (ch->irb->scsw.count != 0)
if (ch->irb->scsw.cmd.count != 0)
ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
dev->name, ch->irb->scsw.count);
dev->name, ch->irb->scsw.cmd.count);
fsm_deltimer(&ch->timer);
while ((skb = skb_dequeue(&ch->io_queue))) {
priv->stats.tx_packets++;
@ -353,7 +353,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
struct channel *ch = arg;
struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->priv;
int len = ch->max_bufsize - ch->irb->scsw.count;
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
struct sk_buff *skb = ch->trans_skb;
__u16 block_len = *((__u16 *)skb->data);
int check_len;
@ -1234,9 +1234,9 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
if (duration > ch->prof.tx_time)
ch->prof.tx_time = duration;
if (ch->irb->scsw.count != 0)
if (ch->irb->scsw.cmd.count != 0)
ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n",
dev->name, ch->irb->scsw.count);
dev->name, ch->irb->scsw.cmd.count);
fsm_deltimer(&ch->timer);
while ((skb = skb_dequeue(&ch->io_queue))) {
priv->stats.tx_packets++;
@ -1394,7 +1394,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
struct sk_buff *skb = ch->trans_skb;
struct sk_buff *new_skb;
unsigned long saveflags = 0; /* avoids compiler warning */
int len = ch->max_bufsize - ch->irb->scsw.count;
int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
if (do_debug_data) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n",

View File

@ -1236,8 +1236,8 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
/* Check for unsolicited interrupts. */
if (cgdev == NULL) {
ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n",
cdev->dev.bus_id, irb->scsw.cstat,
irb->scsw.dstat);
cdev->dev.bus_id, irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
return;
}
@ -1266,40 +1266,40 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
"received c-%02x d-%02x\n",
dev->name,
ch->id,
irb->scsw.cstat,
irb->scsw.dstat);
irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
/* Copy interruption response block. */
memcpy(ch->irb, irb, sizeof(struct irb));
/* Check for good subchannel return code, otherwise error message */
if (irb->scsw.cstat) {
if (irb->scsw.cmd.cstat) {
fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n",
dev->name, ch->id, irb->scsw.cstat,
irb->scsw.dstat);
dev->name, ch->id, irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
return;
}
/* Check the reason-code of a unit check */
if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
ccw_unit_check(ch, irb->ecw[0]);
return;
}
if (irb->scsw.dstat & DEV_STAT_BUSY) {
if (irb->scsw.dstat & DEV_STAT_ATTENTION)
if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
else
fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
return;
}
if (irb->scsw.dstat & DEV_STAT_ATTENTION) {
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
return;
}
if ((irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
(irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
(irb->scsw.stctl ==
if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
(irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
(irb->scsw.cmd.stctl ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
else

View File

@ -36,7 +36,6 @@ const char *cu3088_type[] = {
"CTC/A",
"ESCON channel",
"FICON channel",
"P390 LCS card",
"OSA LCS card",
"CLAW channel device",
"unknown channel type",
@ -49,7 +48,6 @@ static struct ccw_device_id cu3088_ids[] = {
{ CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
{ CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
{ CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
{ CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
{ CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
{ CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
{ /* end of list */ }

View File

@ -17,9 +17,6 @@ enum channel_types {
/* Device is a FICON channel */
channel_type_ficon,
/* Device is a P390 LCS card */
channel_type_p390,
/* Device is a OSA2 card */
channel_type_osa2,

Some files were not shown because too many files have changed in this diff Show More