Merge upstream ieee80211.h with us (us == branch 'ieee80211' of netdev-2.6)

This commit is contained in:
Jeff Garzik 2005-06-27 22:03:52 -04:00 committed by Jeff Garzik
commit 716b43303d
318 changed files with 18385 additions and 13983 deletions

View File

@ -0,0 +1,176 @@
Block io priorities
===================
Intro
-----
With the introduction of cfq v3 (aka cfq-ts or time sliced cfq), basic io
priorities is supported for reads on files. This enables users to io nice
processes or process groups, similar to what has been possible to cpu
scheduling for ages. This document mainly details the current possibilites
with cfq, other io schedulers do not support io priorities so far.
Scheduling classes
------------------
CFQ implements three generic scheduling classes that determine how io is
served for a process.
IOPRIO_CLASS_RT: This is the realtime io class. This scheduling class is given
higher priority than any other in the system, processes from this class are
given first access to the disk every time. Thus it needs to be used with some
care, one io RT process can starve the entire system. Within the RT class,
there are 8 levels of class data that determine exactly how much time this
process needs the disk for on each service. In the future this might change
to be more directly mappable to performance, by passing in a wanted data
rate instead.
IOPRIO_CLASS_BE: This is the best-effort scheduling class, which is the default
for any process that hasn't set a specific io priority. The class data
determines how much io bandwidth the process will get, it's directly mappable
to the cpu nice levels just more coarsely implemented. 0 is the highest
BE prio level, 7 is the lowest. The mapping between cpu nice level and io
nice level is determined as: io_nice = (cpu_nice + 20) / 5.
IOPRIO_CLASS_IDLE: This is the idle scheduling class, processes running at this
level only get io time when no one else needs the disk. The idle class has no
class data, since it doesn't really apply here.
Tools
-----
See below for a sample ionice tool. Usage:
# ionice -c<class> -n<level> -p<pid>
If pid isn't given, the current process is assumed. IO priority settings
are inherited on fork, so you can use ionice to start the process at a given
level:
# ionice -c2 -n0 /bin/ls
will run ls at the best-effort scheduling class at the highest priority.
For a running process, you can give the pid instead:
# ionice -c1 -n2 -p100
will change pid 100 to run at the realtime scheduling class, at priority 2.
---> snip ionice.c tool <---
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <getopt.h>
#include <unistd.h>
#include <sys/ptrace.h>
#include <asm/unistd.h>
extern int sys_ioprio_set(int, int, int);
extern int sys_ioprio_get(int, int);
#if defined(__i386__)
#define __NR_ioprio_set 289
#define __NR_ioprio_get 290
#elif defined(__ppc__)
#define __NR_ioprio_set 273
#define __NR_ioprio_get 274
#elif defined(__x86_64__)
#define __NR_ioprio_set 251
#define __NR_ioprio_get 252
#elif defined(__ia64__)
#define __NR_ioprio_set 1274
#define __NR_ioprio_get 1275
#else
#error "Unsupported arch"
#endif
_syscall3(int, ioprio_set, int, which, int, who, int, ioprio);
_syscall2(int, ioprio_get, int, which, int, who);
enum {
IOPRIO_CLASS_NONE,
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE,
};
enum {
IOPRIO_WHO_PROCESS = 1,
IOPRIO_WHO_PGRP,
IOPRIO_WHO_USER,
};
#define IOPRIO_CLASS_SHIFT 13
const char *to_prio[] = { "none", "realtime", "best-effort", "idle", };
int main(int argc, char *argv[])
{
int ioprio = 4, set = 0, ioprio_class = IOPRIO_CLASS_BE;
int c, pid = 0;
while ((c = getopt(argc, argv, "+n:c:p:")) != EOF) {
switch (c) {
case 'n':
ioprio = strtol(optarg, NULL, 10);
set = 1;
break;
case 'c':
ioprio_class = strtol(optarg, NULL, 10);
set = 1;
break;
case 'p':
pid = strtol(optarg, NULL, 10);
break;
}
}
switch (ioprio_class) {
case IOPRIO_CLASS_NONE:
ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_RT:
case IOPRIO_CLASS_BE:
break;
case IOPRIO_CLASS_IDLE:
ioprio = 7;
break;
default:
printf("bad prio class %d\n", ioprio_class);
return 1;
}
if (!set) {
if (!pid && argv[optind])
pid = strtol(argv[optind], NULL, 10);
ioprio = ioprio_get(IOPRIO_WHO_PROCESS, pid);
printf("pid=%d, %d\n", pid, ioprio);
if (ioprio == -1)
perror("ioprio_get");
else {
ioprio_class = ioprio >> IOPRIO_CLASS_SHIFT;
ioprio = ioprio & 0xff;
printf("%s: prio %d\n", to_prio[ioprio_class], ioprio);
}
} else {
if (ioprio_set(IOPRIO_WHO_PROCESS, pid, ioprio | ioprio_class << IOPRIO_CLASS_SHIFT) == -1) {
perror("ioprio_set");
return 1;
}
if (argv[optind])
execvp(argv[optind], &argv[optind]);
}
return 0;
}
---> snip ionice.c tool <---
March 11 2005, Jens Axboe <axboe@suse.de>

View File

@ -17,6 +17,7 @@ This driver is known to work with the following cards:
* SA P600
* SA P800
* SA E400
* SA E300
If nodes are not already created in the /dev/cciss directory, run as root:

View File

@ -1115,7 +1115,7 @@ running once the system is up.
See Documentation/ramdisk.txt.
psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
probe for (bare|imps|exps).
probe for (bare|imps|exps|lifebook|any).
psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
per second.
psmouse.resetafter=

View File

@ -1149,7 +1149,7 @@ S: Maintained
INFINIBAND SUBSYSTEM
P: Roland Dreier
M: roland@topspin.com
M: rolandd@cisco.com
P: Sean Hefty
M: mshefty@ichips.intel.com
P: Hal Rosenstock

View File

@ -32,6 +32,7 @@
#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/mach/time.h>
extern const char *processor_modes[];
extern void setup_mm_for_reboot(char mode);
@ -85,8 +86,10 @@ EXPORT_SYMBOL(pm_power_off);
void default_idle(void)
{
local_irq_disable();
if (!need_resched() && !hlt_counter)
if (!need_resched() && !hlt_counter) {
timer_dyn_reprogram();
arch_idle();
}
local_irq_enable();
}

View File

@ -424,15 +424,19 @@ static int timer_dyn_tick_disable(void)
return ret;
}
/*
* Reprogram the system timer for at least the calculated time interval.
* This function should be called from the idle thread with IRQs disabled,
* immediately before sleeping.
*/
void timer_dyn_reprogram(void)
{
struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
unsigned long flags;
write_seqlock_irqsave(&xtime_lock, flags);
write_seqlock(&xtime_lock);
if (dyn_tick->state & DYN_TICK_ENABLED)
dyn_tick->reprogram(next_timer_interrupt() - jiffies);
write_sequnlock_irqrestore(&xtime_lock, flags);
write_sequnlock(&xtime_lock);
}
static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)

View File

@ -0,0 +1 @@
zreladdr-y := 0xf0008000

View File

@ -288,8 +288,8 @@ static void usb_release(struct device *dev)
static struct resource udc_resources[] = {
/* order is significant! */
{ /* registers */
.start = IO_ADDRESS(UDC_BASE),
.end = IO_ADDRESS(UDC_BASE + 0xff),
.start = UDC_BASE,
.end = UDC_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, { /* general IRQ */
.start = IH2_BASE + 20,
@ -355,8 +355,8 @@ static struct platform_device ohci_device = {
static struct resource otg_resources[] = {
/* order is significant! */
{
.start = IO_ADDRESS(OTG_BASE),
.end = IO_ADDRESS(OTG_BASE + 0xff),
.start = OTG_BASE,
.end = OTG_BASE + 0xff,
.flags = IORESOURCE_MEM,
}, {
.start = IH2_BASE + 8,

View File

@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
}
static inline void
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn);
end_pg = pfn_to_page(end_pfn);
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
}
/*
* The mem_map array can get very big. Free the unused area of the memory map.
*/
static void __init free_unused_memmap_node(int node, struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* [FIXME] This relies on each bank being in address order. This
* may not be the case, especially if the user has provided the
* information on the command line.
*/
for (i = 0; i < mi->nr_banks; i++) {
if (mi->bank[i].size == 0 || mi->bank[i].node != node)
continue;
bank_start = mi->bank[i].start >> PAGE_SHIFT;
if (bank_start < prev_bank_end) {
printk(KERN_ERR "MEM: unordered memory banks. "
"Not freeing memmap.\n");
break;
}
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end != bank_start)
free_memmap(node, prev_bank_end, bank_start);
prev_bank_end = (mi->bank[i].start +
mi->bank[i].size) >> PAGE_SHIFT;
}
}
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@ -540,16 +603,12 @@ void __init mem_init(void)
max_mapnr = virt_to_page(high_memory) - mem_map;
#endif
/*
* We may have non-contiguous memory.
*/
if (meminfo.nr_banks != 1)
create_memmap_holes(&meminfo);
/* this will put all unused low memory onto the freelists */
for_each_online_node(node) {
pg_data_t *pgdat = NODE_DATA(node);
free_unused_memmap_node(node, &meminfo);
if (pgdat->node_spanned_pages != 0)
totalram_pages += free_all_bootmem_node(pgdat);
}

View File

@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd = pgd_offset_k(0);
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
/*
@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
spin_unlock(&mm->page_table_lock);
}
/*
* Copy over the kernel and IO PGD entries
*/
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
return new_pgd;
no_pte:
@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
for (i = 0; i < nr; i++)
create_mapping(io_desc + i);
}
static inline void
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn);
end_pg = pfn_to_page(end_pfn);
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
}
static inline void free_unused_memmap_node(int node, struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* [FIXME] This relies on each bank being in address order. This
* may not be the case, especially if the user has provided the
* information on the command line.
*/
for (i = 0; i < mi->nr_banks; i++) {
if (mi->bank[i].size == 0 || mi->bank[i].node != node)
continue;
bank_start = mi->bank[i].start >> PAGE_SHIFT;
if (bank_start < prev_bank_end) {
printk(KERN_ERR "MEM: unordered memory banks. "
"Not freeing memmap.\n");
break;
}
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end != bank_start)
free_memmap(node, prev_bank_end, bank_start);
prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
mi->bank[i].size) >> PAGE_SHIFT;
}
}
/*
* The mem_map array can get very big. Free
* the unused area of the memory map.
*/
void __init create_memmap_holes(struct meminfo *mi)
{
int node;
for_each_online_node(node)
free_unused_memmap_node(node, mi);
}

View File

@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
# Last update: Thu Mar 24 14:34:50 2005
# Last update: Thu Jun 23 20:19:33 2005
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@ -243,7 +243,7 @@ yoho ARCH_YOHO YOHO 231
jasper ARCH_JASPER JASPER 232
dsc25 ARCH_DSC25 DSC25 233
omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234
ramses ARCH_RAMSES RAMSES 235
mnci ARCH_RAMSES RAMSES 235
s28x ARCH_S28X S28X 236
mport3 ARCH_MPORT3 MPORT3 237
pxa_eagle250 ARCH_PXA_EAGLE250 PXA_EAGLE250 238
@ -323,7 +323,7 @@ nimbra29x ARCH_NIMBRA29X NIMBRA29X 311
nimbra210 ARCH_NIMBRA210 NIMBRA210 312
hhp_d95xx ARCH_HHP_D95XX HHP_D95XX 313
labarm ARCH_LABARM LABARM 314
m825xx ARCH_M825XX M825XX 315
comcerto ARCH_M825XX M825XX 315
m7100 SA1100_M7100 M7100 316
nipc2 ARCH_NIPC2 NIPC2 317
fu7202 ARCH_FU7202 FU7202 318
@ -724,3 +724,66 @@ lpc22xx MACH_LPC22XX LPC22XX 715
omap_comet3 MACH_COMET3 COMET3 716
omap_comet4 MACH_COMET4 COMET4 717
csb625 MACH_CSB625 CSB625 718
fortunet2 MACH_FORTUNET2 FORTUNET2 719
s5h2200 MACH_S5H2200 S5H2200 720
optorm920 MACH_OPTORM920 OPTORM920 721
adsbitsyxb MACH_ADSBITSYXB ADSBITSYXB 722
adssphere MACH_ADSSPHERE ADSSPHERE 723
adsportal MACH_ADSPORTAL ADSPORTAL 724
ln2410sbc MACH_LN2410SBC LN2410SBC 725
cb3rufc MACH_CB3RUFC CB3RUFC 726
mp2usb MACH_MP2USB MP2USB 727
ntnp425c MACH_NTNP425C NTNP425C 728
colibri MACH_COLIBRI COLIBRI 729
pcm7220 MACH_PCM7220 PCM7220 730
gateway7001 MACH_GATEWAY7001 GATEWAY7001 731
pcm027 MACH_PCM027 PCM027 732
cmpxa MACH_CMPXA CMPXA 733
anubis MACH_ANUBIS ANUBIS 734
ite8152 MACH_ITE8152 ITE8152 735
lpc3xxx MACH_LPC3XXX LPC3XXX 736
puppeteer MACH_PUPPETEER PUPPETEER 737
vt001 MACH_MACH_VADATECH MACH_VADATECH 738
e570 MACH_E570 E570 739
x50 MACH_X50 X50 740
recon MACH_RECON RECON 741
xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742
fpic2 MACH_FPIC2 FPIC2 743
akita MACH_AKITA AKITA 744
a81 MACH_A81 A81 745
svm_sc25x MACH_SVM_SC25X SVM_SC25X 746
vt020 MACH_VADATECH020 VADATECH020 747
tli MACH_TLI TLI 748
edb9315lc MACH_EDB9315LC EDB9315LC 749
passec MACH_PASSEC PASSEC 750
ds_tiger MACH_DS_TIGER DS_TIGER 751
e310 MACH_E310 E310 752
e330 MACH_E330 E330 753
rt3000 MACH_RT3000 RT3000 754
nokia770 MACH_NOKIA770 NOKIA770 755
pnx0106 MACH_PNX0106 PNX0106 756
hx21xx MACH_HX21XX HX21XX 757
faraday MACH_FARADAY FARADAY 758
sbc9312 MACH_SBC9312 SBC9312 759
batman MACH_BATMAN BATMAN 760
jpd201 MACH_JPD201 JPD201 761
mipsa MACH_MIPSA MIPSA 762
kacom MACH_KACOM KACOM 763
swarcocpu MACH_SWARCOCPU SWARCOCPU 764
swarcodsl MACH_SWARCODSL SWARCODSL 765
blueangel MACH_BLUEANGEL BLUEANGEL 766
hairygrama MACH_HAIRYGRAMA HAIRYGRAMA 767
banff MACH_BANFF BANFF 768
carmeva MACH_CARMEVA CARMEVA 769
sam255 MACH_SAM255 SAM255 770
ppm10 MACH_PPM10 PPM10 771
edb9315a MACH_EDB9315A EDB9315A 772
sunset MACH_SUNSET SUNSET 773
stargate2 MACH_STARGATE2 STARGATE2 774
intelmote2 MACH_INTELMOTE2 INTELMOTE2 775
trizeps4 MACH_TRIZEPS4 TRIZEPS4 776
mainstone2 MACH_MAINSTONE2 MAINSTONE2 777
ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778
tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779
universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780
hicoarm9 MACH_HICOARM9 HICOARM9 781

View File

@ -127,48 +127,23 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->eip = (unsigned long)&p->ainsn.insn;
}
struct task_struct *arch_get_kprobe_task(void *ptr)
{
return ((struct thread_info *) (((unsigned long) ptr) &
(~(THREAD_SIZE -1))))->task;
}
void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)&regs->esp;
struct kretprobe_instance *ri;
static void *orig_ret_addr;
struct kretprobe_instance *ri;
/*
* Save the return address when the return probe hits
* the first time, and use it to populate the (krprobe
* instance)->ret_addr for subsequent return probes at
* the same addrress since stack address would have
* the kretprobe_trampoline by then.
*/
if (((void*) *sara) != kretprobe_trampoline)
orig_ret_addr = (void*) *sara;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) *sara;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->stack_addr = sara;
ri->ret_addr = orig_ret_addr;
add_rp_inst(ri);
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
} else {
rp->nmissed++;
}
}
void arch_kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
while ((ri = get_rp_inst_tsk(tk)) != NULL) {
*((unsigned long *)(ri->stack_addr)) =
(unsigned long) ri->ret_addr;
recycle_rp_inst(ri);
}
add_rp_inst(ri);
} else {
rp->nmissed++;
}
}
/*
@ -286,36 +261,59 @@ no_kprobe:
*/
int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct task_struct *tsk;
struct kretprobe_instance *ri;
struct hlist_head *head;
struct hlist_node *node;
unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
tsk = arch_get_kprobe_task(sara);
head = kretprobe_inst_table_head(tsk);
head = kretprobe_inst_table_head(current);
hlist_for_each_entry(ri, node, head, hlist) {
if (ri->stack_addr == sara && ri->rp) {
if (ri->rp->handler)
ri->rp->handler(ri, regs);
}
}
return 0;
}
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
struct kretprobe_instance *ri;
/* RA already popped */
unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
while ((ri = get_rp_inst(sara))) {
regs->eip = (unsigned long)ri->ret_addr;
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
regs->eflags &= ~TF_MASK;
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->eip = orig_ret_address;
unlock_kprobes();
preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we have handled unlocking
* and re-enabling preemption.
*/
return 1;
}
/*
@ -403,8 +401,7 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
current_kprobe->post_handler(current_kprobe, regs, 0);
}
if (current_kprobe->post_handler != trampoline_post_handler)
resume_execution(current_kprobe, regs);
resume_execution(current_kprobe, regs);
regs->eflags |= kprobe_saved_eflags;
/*Restore back the original saved kprobes variables and continue. */
@ -534,3 +531,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
}
return 0;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init(void)
{
return register_kprobe(&trampoline_p);
}

View File

@ -616,6 +616,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
}
/*
* This function selects if the context switch from prev to next
* has to tweak the TSC disable bit in the cr4.
*/
static inline void disable_tsc(struct task_struct *prev_p,
struct task_struct *next_p)
{
struct thread_info *prev, *next;
/*
* gcc should eliminate the ->thread_info dereference if
* has_secure_computing returns 0 at compile time (SECCOMP=n).
*/
prev = prev_p->thread_info;
next = next_p->thread_info;
if (has_secure_computing(prev) || has_secure_computing(next)) {
/* slow path here */
if (has_secure_computing(prev) &&
!has_secure_computing(next)) {
write_cr4(read_cr4() & ~X86_CR4_TSD);
} else if (!has_secure_computing(prev) &&
has_secure_computing(next))
write_cr4(read_cr4() | X86_CR4_TSD);
}
}
/*
* switch_to(x,yn) should switch tasks from x to y.
*
@ -695,6 +722,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
handle_io_bitmap(next, tss);
disable_tsc(prev_p, next_p);
return prev_p;
}

View File

@ -289,3 +289,5 @@ ENTRY(sys_call_table)
.long sys_add_key
.long sys_request_key
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get /* 290 */

View File

@ -1577,8 +1577,8 @@ sys_call_table:
data8 sys_add_key
data8 sys_request_key
data8 sys_keyctl
data8 sys_ni_syscall
data8 sys_ni_syscall // 1275
data8 sys_ioprio_set
data8 sys_ioprio_get // 1275
data8 sys_set_zone_reclaim
data8 sys_ni_syscall
data8 sys_ni_syscall

View File

@ -34,6 +34,7 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>
#include <asm/sections.h>
extern void jprobe_inst_return(void);
@ -263,13 +264,33 @@ static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
}
}
/* Returns non-zero if the addr is in the Interrupt Vector Table */
static inline int in_ivt_functions(unsigned long addr)
{
return (addr >= (unsigned long)__start_ivt_text
&& addr < (unsigned long)__end_ivt_text);
}
static int valid_kprobe_addr(int template, int slot, unsigned long addr)
{
if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
addr);
printk(KERN_WARNING "Attempting to insert unaligned kprobe "
"at 0x%lx\n", addr);
return -EINVAL;
}
if (in_ivt_functions(addr)) {
printk(KERN_WARNING "Kprobes can't be inserted inside "
"IVT functions at 0x%lx\n", addr);
return -EINVAL;
}
if (slot == 1 && bundle_encoding[template][1] != L) {
printk(KERN_WARNING "Inserting kprobes on slot #1 "
"is not supported\n");
return -EINVAL;
}
return 0;
}
@ -290,6 +311,94 @@ static inline void set_current_kprobe(struct kprobe *p)
current_kprobe = p;
}
static void kretprobe_trampoline(void)
{
}
/*
* At this point the target function has been tricked into
* returning into our trampoline. Lookup the associated instance
* and then:
* - call the handler function
* - cleanup by marking the instance as unused
* - long jump back to the original return address
*/
int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long orig_ret_address = 0;
unsigned long trampoline_address =
((struct fnptr *)kretprobe_trampoline)->ip;
head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->cr_iip = orig_ret_address;
unlock_kprobes();
preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we have handled unlocking
* and re-enabling preemption.
*/
return 1;
}
void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
{
struct kretprobe_instance *ri;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *)regs->b0;
/* Replace the return addr with trampoline addr */
regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
add_rp_inst(ri);
} else {
rp->nmissed++;
}
}
int arch_prepare_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long) p->addr;
@ -492,8 +601,8 @@ static int pre_kprobes_handler(struct die_args *args)
if (p->pre_handler && p->pre_handler(p, regs))
/*
* Our pre-handler is specifically requesting that we just
* do a return. This is handling the case where the
* pre-handler is really our special jprobe pre-handler.
* do a return. This is used for both the jprobe pre-handler
* and the kretprobe trampoline
*/
return 1;
@ -599,3 +708,14 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
*regs = jprobe_saved_regs;
return 1;
}
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
int __init arch_init(void)
{
trampoline_p.addr =
(kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip;
return register_kprobe(&trampoline_p);
}

View File

@ -27,6 +27,7 @@
#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <asm/cpu.h>
#include <asm/delay.h>
@ -707,6 +708,13 @@ kernel_thread_helper (int (*fn)(void *), void *arg)
void
flush_thread (void)
{
/*
* Remove function-return probe instances associated with this task
* and put them back on the free list. Do not insert an exit probe for
* this function, it will be disabled by kprobe_flush_task if you do.
*/
kprobe_flush_task(current);
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
@ -721,6 +729,14 @@ flush_thread (void)
void
exit_thread (void)
{
/*
* Remove function-return probe instances associated with this task
* and put them back on the free list. Do not insert an exit probe for
* this function, it will be disabled by kprobe_flush_task if you do.
*/
kprobe_flush_task(current);
ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */

View File

@ -8,6 +8,11 @@
#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
#include <asm-generic/vmlinux.lds.h>
#define IVT_TEXT \
VMLINUX_SYMBOL(__start_ivt_text) = .; \
*(.text.ivt) \
VMLINUX_SYMBOL(__end_ivt_text) = .;
OUTPUT_FORMAT("elf64-ia64-little")
OUTPUT_ARCH(ia64)
ENTRY(phys_start)
@ -39,7 +44,7 @@ SECTIONS
.text : AT(ADDR(.text) - LOAD_OFFSET)
{
*(.text.ivt)
IVT_TEXT
*(.text)
SCHED_TEXT
LOCK_TEXT

View File

@ -457,7 +457,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs)
if (!user_mode(regs))
return 1;
if (try_to_freeze(0))
if (try_to_freeze())
goto no_signal;
if (!oldset)

View File

@ -1449,3 +1449,5 @@ _GLOBAL(sys_call_table)
.long sys_request_key /* 270 */
.long sys_keyctl
.long sys_waitid
.long sys_ioprio_set
.long sys_ioprio_get

View File

@ -606,9 +606,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)
&& !test_bit(PG_arch_1, &page->flags)) {
if (vma->vm_mm == current->active_mm)
if (vma->vm_mm == current->active_mm) {
#ifdef CONFIG_8xx
/* On 8xx, cache control instructions (particularly
* "dcbst" from flush_dcache_icache) fault as write
* operation if there is an unpopulated TLB entry
* for the address in question. To workaround that,
* we invalidate the TLB here, thus avoiding dcbst
* misbehaviour.
*/
_tlbie(address);
#endif
__flush_dcache_icache((void *) address);
else
} else
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}

View File

@ -46,7 +46,7 @@
.section .text
.align 5
#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ_PMAC)
#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
/* This gets called by via-pmu.c late during the sleep process.
* The PMU was already send the sleep command and will shut us down
@ -382,7 +382,7 @@ turn_on_mmu:
isync
rfi
#endif /* defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_CPU_FREQ) */
#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
.section .data
.balign L1_CACHE_LINE_SIZE

View File

@ -206,7 +206,7 @@ via_calibrate_decr(void)
return 1;
}
#ifdef CONFIG_PMAC_PBOOK
#ifdef CONFIG_PM
/*
* Reset the time after a sleep.
*/
@ -238,7 +238,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when)
static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = {
time_sleep_notify, SLEEP_LEVEL_MISC,
};
#endif /* CONFIG_PMAC_PBOOK */
#endif /* CONFIG_PM */
/*
* Query the OF and get the decr frequency.
@ -251,9 +251,9 @@ pmac_calibrate_decr(void)
struct device_node *cpu;
unsigned int freq, *fp;
#ifdef CONFIG_PMAC_PBOOK
#ifdef CONFIG_PM
pmu_register_sleep_notifier(&time_sleep_notifier);
#endif /* CONFIG_PMAC_PBOOK */
#endif /* CONFIG_PM */
/* We assume MacRISC2 machines have correct device-tree
* calibration. That's better since the VIA itself seems

View File

@ -324,6 +324,7 @@ sandpoint_setup_arch(void)
pdata[1].irq = 0;
pdata[1].mapbase = 0;
}
}
printk(KERN_INFO "Motorola SPS Sandpoint Test Platform\n");
printk(KERN_INFO "Port by MontaVista Software, Inc. (source@mvista.com)\n");

View File

@ -370,8 +370,9 @@ void __init openpic_init(int offset)
/* Initialize IPI interrupts */
if ( ppc_md.progress ) ppc_md.progress("openpic: ipi",0x3bb);
for (i = 0; i < OPENPIC_NUM_IPI; i++) {
/* Disabled, Priority 10..13 */
openpic_initipi(i, 10+i, OPENPIC_VEC_IPI+i+offset);
/* Disabled, increased priorities 10..13 */
openpic_initipi(i, OPENPIC_PRIORITY_IPI_BASE+i,
OPENPIC_VEC_IPI+i+offset);
/* IPIs are per-CPU */
irq_desc[OPENPIC_VEC_IPI+i+offset].status |= IRQ_PER_CPU;
irq_desc[OPENPIC_VEC_IPI+i+offset].handler = &open_pic_ipi;
@ -399,8 +400,9 @@ void __init openpic_init(int offset)
if (sense & IRQ_SENSE_MASK)
irq_desc[i+offset].status = IRQ_LEVEL;
/* Enabled, Priority 8 */
openpic_initirq(i, 8, i+offset, (sense & IRQ_POLARITY_MASK),
/* Enabled, Default priority */
openpic_initirq(i, OPENPIC_PRIORITY_DEFAULT, i+offset,
(sense & IRQ_POLARITY_MASK),
(sense & IRQ_SENSE_MASK));
/* Processor 0 */
openpic_mapirq(i, CPU_MASK_CPU0, CPU_MASK_NONE);
@ -655,6 +657,18 @@ static void __init openpic_maptimer(u_int timer, cpumask_t cpumask)
cpus_addr(phys)[0]);
}
/*
* Change the priority of an interrupt
*/
void __init
openpic_set_irq_priority(u_int irq, u_int pri)
{
check_arg_irq(irq);
openpic_safe_writefield(&ISR[irq - open_pic_irq_offset]->Vector_Priority,
OPENPIC_PRIORITY_MASK,
pri << OPENPIC_PRIORITY_SHIFT);
}
/*
* Initalize the interrupt source which will generate an NMI.
* This raises the interrupt's priority from 8 to 9.
@ -665,9 +679,7 @@ void __init
openpic_init_nmi_irq(u_int irq)
{
check_arg_irq(irq);
openpic_safe_writefield(&ISR[irq - open_pic_irq_offset]->Vector_Priority,
OPENPIC_PRIORITY_MASK,
9 << OPENPIC_PRIORITY_SHIFT);
openpic_set_irq_priority(irq, OPENPIC_PRIORITY_NMI);
}
/*

View File

@ -36,6 +36,8 @@
#include <asm/kdebug.h>
#include <asm/sstep.h>
static DECLARE_MUTEX(kprobe_mutex);
static struct kprobe *current_kprobe;
static unsigned long kprobe_status, kprobe_saved_msr;
static struct kprobe *kprobe_prev;
@ -54,6 +56,15 @@ int arch_prepare_kprobe(struct kprobe *p)
printk("Cannot register a kprobe on rfid or mtmsrd\n");
ret = -EINVAL;
}
/* insn must be on a special executable page on ppc64 */
if (!ret) {
up(&kprobe_mutex);
p->ainsn.insn = get_insn_slot();
down(&kprobe_mutex);
if (!p->ainsn.insn)
ret = -ENOMEM;
}
return ret;
}
@ -79,16 +90,22 @@ void arch_disarm_kprobe(struct kprobe *p)
void arch_remove_kprobe(struct kprobe *p)
{
up(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
down(&kprobe_mutex);
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = *p->ainsn.insn;
regs->msr |= MSR_SE;
/*single step inline if it a breakpoint instruction*/
if (p->opcode == BREAKPOINT_INSTRUCTION)
/* single step inline if it is a trap variant */
if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn))
regs->nip = (unsigned long)p->addr;
else
regs->nip = (unsigned long)&p->ainsn.insn;
regs->nip = (unsigned long)p->ainsn.insn;
}
static inline void save_previous_kprobe(void)
@ -105,6 +122,23 @@ static inline void restore_previous_kprobe(void)
kprobe_saved_msr = kprobe_saved_msr_prev;
}
void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
{
struct kretprobe_instance *ri;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *)regs->link;
/* Replace the return addr with trampoline addr */
regs->link = (unsigned long)kretprobe_trampoline;
add_rp_inst(ri);
} else {
rp->nmissed++;
}
}
static inline int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
@ -194,6 +228,78 @@ no_kprobe:
return ret;
}
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
* - When the probed function returns, this probe
* causes the handlers to fire
*/
void kretprobe_trampoline_holder(void)
{
asm volatile(".global kretprobe_trampoline\n"
"kretprobe_trampoline:\n"
"nop\n");
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->nip = orig_ret_address;
unlock_kprobes();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we have handled unlocking
* and re-enabling preemption.
*/
return 1;
}
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
@ -205,9 +311,10 @@ no_kprobe:
static void resume_execution(struct kprobe *p, struct pt_regs *regs)
{
int ret;
unsigned int insn = *p->ainsn.insn;
regs->nip = (unsigned long)p->addr;
ret = emulate_step(regs, p->ainsn.insn[0]);
ret = emulate_step(regs, insn);
if (ret == 0)
regs->nip = (unsigned long)p->addr + 4;
}
@ -331,3 +438,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
return 1;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init(void)
{
return register_kprobe(&trampoline_p);
}

View File

@ -75,6 +75,7 @@ EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(giveup_altivec);
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_ISERIES

View File

@ -36,6 +36,7 @@
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/utsname.h>
#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@ -307,6 +308,8 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
kprobe_flush_task(current);
#ifndef CONFIG_SMP
if (last_task_used_math == current)
last_task_used_math = NULL;
@ -321,6 +324,7 @@ void flush_thread(void)
{
struct thread_info *t = current_thread_info();
kprobe_flush_task(current);
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);

View File

@ -91,6 +91,7 @@ unsigned long tb_to_xs;
unsigned tb_to_us;
unsigned long processor_freq;
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
unsigned long tb_to_ns_scale;
unsigned long tb_to_ns_shift;

View File

@ -38,7 +38,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/preempt.h>
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <asm/kdebug.h>
@ -51,8 +51,6 @@ static struct kprobe *kprobe_prev;
static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev;
static struct pt_regs jprobe_saved_regs;
static long *jprobe_saved_rsp;
static kprobe_opcode_t *get_insn_slot(void);
static void free_insn_slot(kprobe_opcode_t *slot);
void jprobe_return_end(void);
/* copy of the kernel stack at the probe fire time */
@ -274,48 +272,23 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->rip = (unsigned long)p->ainsn.insn;
}
struct task_struct *arch_get_kprobe_task(void *ptr)
{
return ((struct thread_info *) (((unsigned long) ptr) &
(~(THREAD_SIZE -1))))->task;
}
void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)regs->rsp;
struct kretprobe_instance *ri;
static void *orig_ret_addr;
struct kretprobe_instance *ri;
/*
* Save the return address when the return probe hits
* the first time, and use it to populate the (krprobe
* instance)->ret_addr for subsequent return probes at
* the same addrress since stack address would have
* the kretprobe_trampoline by then.
*/
if (((void*) *sara) != kretprobe_trampoline)
orig_ret_addr = (void*) *sara;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->task = current;
ri->ret_addr = (kprobe_opcode_t *) *sara;
if ((ri = get_free_rp_inst(rp)) != NULL) {
ri->rp = rp;
ri->stack_addr = sara;
ri->ret_addr = orig_ret_addr;
add_rp_inst(ri);
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
} else {
rp->nmissed++;
}
}
void arch_kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
while ((ri = get_rp_inst_tsk(tk)) != NULL) {
*((unsigned long *)(ri->stack_addr)) =
(unsigned long) ri->ret_addr;
recycle_rp_inst(ri);
}
add_rp_inst(ri);
} else {
rp->nmissed++;
}
}
/*
@ -428,36 +401,59 @@ no_kprobe:
*/
int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct task_struct *tsk;
struct kretprobe_instance *ri;
struct hlist_head *head;
struct hlist_node *node;
unsigned long *sara = (unsigned long *)regs->rsp - 1;
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
unsigned long orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
tsk = arch_get_kprobe_task(sara);
head = kretprobe_inst_table_head(tsk);
head = kretprobe_inst_table_head(current);
hlist_for_each_entry(ri, node, head, hlist) {
if (ri->stack_addr == sara && ri->rp) {
if (ri->rp->handler)
ri->rp->handler(ri, regs);
}
}
return 0;
}
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
struct kretprobe_instance *ri;
/* RA already popped */
unsigned long *sara = ((unsigned long *)regs->rsp) - 1;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
while ((ri = get_rp_inst(sara))) {
regs->rip = (unsigned long)ri->ret_addr;
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
regs->eflags &= ~TF_MASK;
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->rip = orig_ret_address;
unlock_kprobes();
preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we have handled unlocking
* and re-enabling preemption.
*/
return 1;
}
/*
@ -550,8 +546,7 @@ int post_kprobe_handler(struct pt_regs *regs)
current_kprobe->post_handler(current_kprobe, regs, 0);
}
if (current_kprobe->post_handler != trampoline_post_handler)
resume_execution(current_kprobe, regs);
resume_execution(current_kprobe, regs);
regs->eflags |= kprobe_saved_rflags;
/* Restore the original saved kprobes variables and continue. */
@ -682,111 +677,12 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0;
}
/*
* kprobe->ainsn.insn points to the copy of the instruction to be single-stepped.
* By default on x86_64, pages we get from kmalloc or vmalloc are not
* executable. Single-stepping an instruction on such a page yields an
* oops. So instead of storing the instruction copies in their respective
* kprobe objects, we allocate a page, map it executable, and store all the
* instruction copies there. (We can allocate additional pages if somebody
* inserts a huge number of probes.) Each page can hold up to INSNS_PER_PAGE
* instruction slots, each of which is MAX_INSN_SIZE*sizeof(kprobe_opcode_t)
* bytes.
*/
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE*sizeof(kprobe_opcode_t)))
struct kprobe_insn_page {
struct hlist_node hlist;
kprobe_opcode_t *insns; /* page of instruction slots */
char slot_used[INSNS_PER_PAGE];
int nused;
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
static struct hlist_head kprobe_insn_pages;
/**
* get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
*/
static kprobe_opcode_t *get_insn_slot(void)
int __init arch_init(void)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos;
hlist_for_each(pos, &kprobe_insn_pages) {
kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
if (!kip->slot_used[i]) {
kip->slot_used[i] = 1;
kip->nused++;
return kip->insns + (i*MAX_INSN_SIZE);
}
}
/* Surprise! No unused slots. Fix kip->nused. */
kip->nused = INSNS_PER_PAGE;
}
}
/* All out of space. Need to allocate a new page. Use slot 0.*/
kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
if (!kip) {
return NULL;
}
/*
* For the %rip-relative displacement fixups to be doable, we
* need our instruction copy to be within +/- 2GB of any data it
* might access via %rip. That is, within 2GB of where the
* kernel image and loaded module images reside. So we allocate
* a page in the module loading area.
*/
kip->insns = module_alloc(PAGE_SIZE);
if (!kip->insns) {
kfree(kip);
return NULL;
}
INIT_HLIST_NODE(&kip->hlist);
hlist_add_head(&kip->hlist, &kprobe_insn_pages);
memset(kip->slot_used, 0, INSNS_PER_PAGE);
kip->slot_used[0] = 1;
kip->nused = 1;
return kip->insns;
}
/**
* free_insn_slot() - Free instruction slot obtained from get_insn_slot().
*/
static void free_insn_slot(kprobe_opcode_t *slot)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos;
hlist_for_each(pos, &kprobe_insn_pages) {
kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
if (kip->insns <= slot
&& slot < kip->insns+(INSNS_PER_PAGE*MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
kip->slot_used[i] = 0;
kip->nused--;
if (kip->nused == 0) {
/*
* Page is no longer in use. Free it unless
* it's the last one. We keep the last one
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
hlist_del(&kip->hlist);
if (hlist_empty(&kprobe_insn_pages)) {
INIT_HLIST_NODE(&kip->hlist);
hlist_add_head(&kip->hlist,
&kprobe_insn_pages);
} else {
module_free(NULL, kip->insns);
kfree(kip);
}
}
return;
}
}
return register_kprobe(&trampoline_p);
}

View File

@ -481,6 +481,33 @@ out:
return err;
}
/*
* This function selects if the context switch from prev to next
* has to tweak the TSC disable bit in the cr4.
*/
static inline void disable_tsc(struct task_struct *prev_p,
struct task_struct *next_p)
{
struct thread_info *prev, *next;
/*
* gcc should eliminate the ->thread_info dereference if
* has_secure_computing returns 0 at compile time (SECCOMP=n).
*/
prev = prev_p->thread_info;
next = next_p->thread_info;
if (has_secure_computing(prev) || has_secure_computing(next)) {
/* slow path here */
if (has_secure_computing(prev) &&
!has_secure_computing(next)) {
write_cr4(read_cr4() & ~X86_CR4_TSD);
} else if (!has_secure_computing(prev) &&
has_secure_computing(next))
write_cr4(read_cr4() | X86_CR4_TSD);
}
}
/*
* This special macro can be used to load a debugging register
*/
@ -599,6 +626,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
}
}
disable_tsc(prev_p, next_p);
return prev_p;
}

View File

@ -1806,7 +1806,8 @@ static void as_put_request(request_queue_t *q, struct request *rq)
rq->elevator_private = NULL;
}
static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
static int as_set_request(request_queue_t *q, struct request *rq,
struct bio *bio, int gfp_mask)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
@ -1827,7 +1828,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
return 1;
}
static int as_may_queue(request_queue_t *q, int rw)
static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;

View File

@ -1,6 +1,6 @@
/*
* Disk Array driver for HP SA 5xxx and 6xxx Controllers
* Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
* Copyright 2000, 2005 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -54,7 +54,7 @@
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6");
MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
" SA6i P600 P800 E400");
" SA6i P600 P800 E400 E300");
MODULE_LICENSE("GPL");
#include "cciss_cmd.h"
@ -85,8 +85,10 @@ static const struct pci_device_id cciss_pci_device_id[] = {
0x103C, 0x3225, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
0x103c, 0x3223, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
0x103c, 0x3231, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
0x103c, 0x3233, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
@ -110,6 +112,7 @@ static struct board_type products[] = {
{ 0x3225103C, "Smart Array P600", &SA5_access},
{ 0x3223103C, "Smart Array P800", &SA5_access},
{ 0x3231103C, "Smart Array E400", &SA5_access},
{ 0x3233103C, "Smart Array E300", &SA5_access},
};
/* How long to wait (in millesconds) for board to go into simple mode */
@ -635,6 +638,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
cciss_pci_info_struct pciinfo;
if (!arg) return -EINVAL;
pciinfo.domain = pci_domain_nr(host->pdev->bus);
pciinfo.bus = host->pdev->bus->number;
pciinfo.dev_fn = host->pdev->devfn;
pciinfo.board_id = host->board_id;
@ -787,13 +791,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
luninfo.LunID = drv->LunID;
luninfo.num_opens = drv->usage_count;
luninfo.num_parts = 0;
/* count partitions 1 to 15 with sizes > 0 */
for (i = 0; i < MAX_PART - 1; i++) {
if (!disk->part[i])
continue;
if (disk->part[i]->nr_sects != 0)
luninfo.num_parts++;
}
if (copy_to_user(argp, &luninfo,
sizeof(LogvolInfo_struct)))
return -EFAULT;

File diff suppressed because it is too large Load Diff

View File

@ -760,7 +760,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
}
static int
deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
int gfp_mask)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq;

View File

@ -486,12 +486,13 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL;
}
int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
int gfp_mask)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_set_req_fn)
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
rq->elevator_private = NULL;
return 0;
@ -505,12 +506,12 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(q, rq);
}
int elv_may_queue(request_queue_t *q, int rw)
int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_may_queue_fn)
return e->ops->elevator_may_queue_fn(q, rw);
return e->ops->elevator_may_queue_fn(q, rw, bio);
return ELV_MQUEUE_MAY;
}

View File

@ -276,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
rq->ioprio = 0;
rq->buffer = NULL;
rq->ref_count = 1;
rq->q = q;
@ -1442,11 +1443,7 @@ void __generic_unplug_device(request_queue_t *q)
if (!blk_remove_plug(q))
return;
/*
* was plugged, fire request_fn if queue has stuff to do
*/
if (elv_next_request(q))
q->request_fn(q);
q->request_fn(q);
}
EXPORT_SYMBOL(__generic_unplug_device);
@ -1776,8 +1773,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
int gfp_mask)
static inline struct request *
blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@ -1790,7 +1787,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
*/
rq->flags = rw;
if (!elv_set_request(q, rq, gfp_mask))
if (!elv_set_request(q, rq, bio, gfp_mask))
return rq;
mempool_free(rq, q->rq.rq_pool);
@ -1872,7 +1869,8 @@ static void freed_request(request_queue_t *q, int rw)
/*
* Get a free request, queue_lock must not be held
*/
static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
@ -1895,7 +1893,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
}
}
switch (elv_may_queue(q, rw)) {
switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO:
goto rq_starved;
case ELV_MQUEUE_MAY:
@ -1920,7 +1918,7 @@ get_rq:
set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw, gfp_mask);
rq = blk_alloc_request(q, rw, bio, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
@ -1961,7 +1959,8 @@ out:
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
*/
static struct request *get_request_wait(request_queue_t *q, int rw)
static struct request *get_request_wait(request_queue_t *q, int rw,
struct bio *bio)
{
DEFINE_WAIT(wait);
struct request *rq;
@ -1972,7 +1971,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
rq = get_request(q, rw, GFP_NOIO);
rq = get_request(q, rw, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
@ -2003,9 +2002,9 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
BUG_ON(rw != READ && rw != WRITE);
if (gfp_mask & __GFP_WAIT)
rq = get_request_wait(q, rw);
rq = get_request_wait(q, rw, NULL);
else
rq = get_request(q, rw, gfp_mask);
rq = get_request(q, rw, NULL, gfp_mask);
return rq;
}
@ -2333,7 +2332,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
return;
req->rq_status = RQ_INACTIVE;
req->q = NULL;
req->rl = NULL;
/*
@ -2462,6 +2460,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
req->rq_disk->in_flight--;
}
req->ioprio = ioprio_best(req->ioprio, next->ioprio);
__blk_put_request(q, next);
return 1;
}
@ -2514,11 +2514,13 @@ static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
unsigned short prio;
sector_t sector;
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio);
prio = bio_prio(bio);
rw = bio_data_dir(bio);
sync = bio_sync(bio);
@ -2559,6 +2561,7 @@ again:
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req);
@ -2583,6 +2586,7 @@ again:
req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req);
@ -2610,7 +2614,7 @@ get_rq:
freereq = NULL;
} else {
spin_unlock_irq(q->queue_lock);
if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
/*
* READA bit set
*/
@ -2618,7 +2622,7 @@ get_rq:
if (bio_rw_ahead(bio))
goto end_io;
freereq = get_request_wait(q, rw);
freereq = get_request_wait(q, rw, bio);
}
goto again;
}
@ -2646,6 +2650,7 @@ get_rq:
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
@ -2674,7 +2679,7 @@ static inline void blk_partition_remap(struct bio *bio)
if (bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
switch (bio->bi_rw) {
switch (bio_data_dir(bio)) {
case READ:
p->read_sectors += bio_sectors(bio);
p->reads++;
@ -2693,6 +2698,7 @@ void blk_finish_queue_drain(request_queue_t *q)
{
struct request_list *rl = &q->rq;
struct request *rq;
int requeued = 0;
spin_lock_irq(q->queue_lock);
clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
@ -2701,9 +2707,13 @@ void blk_finish_queue_drain(request_queue_t *q)
rq = list_entry_rq(q->drain_list.next);
list_del_init(&rq->queuelist);
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
elv_requeue_request(q, rq);
requeued++;
}
if (requeued)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
wake_up(&rl->wait[0]);
@ -2900,7 +2910,7 @@ void submit_bio(int rw, struct bio *bio)
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw = rw;
bio->bi_rw |= rw;
if (rw & WRITE)
mod_page_state(pgpgout, count);
else
@ -3257,8 +3267,11 @@ void exit_io_context(void)
struct io_context *ioc;
local_irq_save(flags);
task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
ioc->task = NULL;
task_unlock(current);
local_irq_restore(flags);
if (ioc->aic && ioc->aic->exit)
@ -3293,12 +3306,12 @@ struct io_context *get_io_context(int gfp_flags)
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) {
atomic_set(&ret->refcount, 1);
ret->pid = tsk->pid;
ret->task = current;
ret->set_ioprio = NULL;
ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic = NULL;
spin_lock_init(&ret->lock);
local_irq_save(flags);

View File

@ -253,7 +253,7 @@ static int floppy_revalidate(struct gendisk *disk);
static int swim3_add_device(struct device_node *swims);
int swim3_init(void);
#ifndef CONFIG_PMAC_PBOOK
#ifndef CONFIG_PMAC_MEDIABAY
#define check_media_bay(which, what) 1
#endif
@ -297,9 +297,11 @@ static void do_fd_request(request_queue_t * q)
int i;
for(i=0;i<floppy_count;i++)
{
#ifdef CONFIG_PMAC_MEDIABAY
if (floppy_states[i].media_bay &&
check_media_bay(floppy_states[i].media_bay, MB_FD))
continue;
#endif /* CONFIG_PMAC_MEDIABAY */
start_request(&floppy_states[i]);
}
sti();
@ -856,8 +858,10 @@ static int floppy_ioctl(struct inode *inode, struct file *filp,
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
#ifdef CONFIG_PMAC_MEDIABAY
if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
return -ENXIO;
#endif
switch (cmd) {
case FDEJECT:
@ -881,8 +885,10 @@ static int floppy_open(struct inode *inode, struct file *filp)
int n, err = 0;
if (fs->ref_count == 0) {
#ifdef CONFIG_PMAC_MEDIABAY
if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
return -ENXIO;
#endif
out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
out_8(&sw->control_bic, 0xff);
out_8(&sw->mode, 0x95);
@ -967,8 +973,10 @@ static int floppy_revalidate(struct gendisk *disk)
struct swim3 __iomem *sw;
int ret, n;
#ifdef CONFIG_PMAC_MEDIABAY
if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
return -ENXIO;
#endif
sw = fs->swim3;
grab_drive(fs, revalidating, 0);

View File

@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/hdreg.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@ -1582,9 +1583,9 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
pci_name(pdev));
@ -1593,7 +1594,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_dac = 1;
} else {
#endif
rc = pci_set_dma_mask(pdev, 0xffffffffULL);
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
pci_name(pdev));

View File

@ -308,9 +308,6 @@ static int __init misc_init(void)
#endif
#ifdef CONFIG_BVME6000
rtc_DP8570A_init();
#endif
#ifdef CONFIG_PMAC_PBOOK
pmu_device_init();
#endif
if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
printk("unable to get major %d for misc devices\n",

View File

@ -1324,9 +1324,9 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
/* XXX FIXME: Media bay stuff need re-organizing */
if (np->parent && np->parent->name
&& strcasecmp(np->parent->name, "media-bay") == 0) {
#ifdef CONFIG_PMAC_PBOOK
#ifdef CONFIG_PMAC_MEDIABAY
media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, hwif->index);
#endif /* CONFIG_PMAC_PBOOK */
#endif /* CONFIG_PMAC_MEDIABAY */
pmif->mediabay = 1;
if (!bidp)
pmif->aapl_bus_id = 1;
@ -1382,10 +1382,10 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
pmif->mediabay ? " (mediabay)" : "", hwif->irq);
#ifdef CONFIG_PMAC_PBOOK
#ifdef CONFIG_PMAC_MEDIABAY
if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0)
hwif->noprobe = 0;
#endif /* CONFIG_PMAC_PBOOK */
#endif /* CONFIG_PMAC_MEDIABAY */
hwif->sg_max_nents = MAX_DCMDS;

View File

@ -3538,8 +3538,8 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
static int ohci1394_pci_resume (struct pci_dev *pdev)
{
#ifdef CONFIG_PMAC_PBOOK
{
#ifdef CONFIG_PPC_PMAC
if (_machine == _MACH_Pmac) {
struct device_node *of_node;
/* Re-enable 1394 */
@ -3547,7 +3547,7 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
if (of_node)
pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
}
#endif
#endif /* CONFIG_PPC_PMAC */
pci_enable_device(pdev);
@ -3557,8 +3557,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{
#ifdef CONFIG_PMAC_PBOOK
{
#ifdef CONFIG_PPC_PMAC
if (_machine == _MACH_Pmac) {
struct device_node *of_node;
/* Disable 1394 */

View File

@ -96,7 +96,7 @@ void ib_pack(const struct ib_field *desc,
else
val = 0;
mask = cpu_to_be64(((1ull << desc[i].size_bits) - 1) << shift);
mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift);
addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
*addr = (*addr & ~mask) | (cpu_to_be64(val) & mask);
} else {
@ -176,7 +176,7 @@ void ib_unpack(const struct ib_field *desc,
__be64 *addr;
shift = 64 - desc[i].offset_bits - desc[i].size_bits;
mask = ((1ull << desc[i].size_bits) - 1) << shift;
mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
addr = (__be64 *) buf + desc[i].offset_words;
val = (be64_to_cpup(addr) & mask) >> shift;
value_write(desc[i].struct_offset_bytes,

View File

@ -507,7 +507,13 @@ retry:
spin_unlock_irqrestore(&idr_lock, flags);
}
return ret;
/*
* It's not safe to dereference query any more, because the
* send may already have completed and freed the query in
* another context. So use wr.wr_id, which has a copy of the
* query's id.
*/
return ret ? ret : wr.wr_id;
}
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
@ -598,14 +604,15 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
rec, query->sa_query.mad->data);
*sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms);
if (ret) {
if (ret < 0) {
*sa_query = NULL;
kfree(query->sa_query.mad);
kfree(query);
}
return ret ? ret : query->sa_query.id;
return ret;
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
@ -674,14 +681,15 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
rec, query->sa_query.mad->data);
*sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms);
if (ret) {
if (ret < 0) {
*sa_query = NULL;
kfree(query->sa_query.mad);
kfree(query);
}
return ret ? ret : query->sa_query.id;
return ret;
}
EXPORT_SYMBOL(ib_sa_mcmember_rec_query);

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU

View File

@ -431,6 +431,36 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
timeout, status);
}
int mthca_cmd_init(struct mthca_dev *dev)
{
sema_init(&dev->cmd.hcr_sem, 1);
sema_init(&dev->cmd.poll_sem, 1);
dev->cmd.use_events = 0;
dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
MTHCA_HCR_SIZE);
if (!dev->hcr) {
mthca_err(dev, "Couldn't map command register.");
return -ENOMEM;
}
dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
MTHCA_MAILBOX_SIZE,
MTHCA_MAILBOX_SIZE, 0);
if (!dev->cmd.pool) {
iounmap(dev->hcr);
return -ENOMEM;
}
return 0;
}
void mthca_cmd_cleanup(struct mthca_dev *dev)
{
pci_pool_destroy(dev->cmd.pool);
iounmap(dev->hcr);
}
/*
* Switch to using events to issue FW commands (should be called after
* event queue to command events has been initialized).
@ -489,6 +519,33 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
up(&dev->cmd.poll_sem);
}
struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
unsigned int gfp_mask)
{
struct mthca_mailbox *mailbox;
mailbox = kmalloc(sizeof *mailbox, gfp_mask);
if (!mailbox)
return ERR_PTR(-ENOMEM);
mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
return mailbox;
}
void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
{
if (!mailbox)
return;
pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
{
u64 out;
@ -513,20 +570,20 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
u64 virt, u8 *status)
{
u32 *inbox;
dma_addr_t indma;
struct mthca_mailbox *mailbox;
struct mthca_icm_iter iter;
__be64 *pages;
int lg;
int nent = 0;
int i;
int err = 0;
int ts = 0, tc = 0;
inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma);
if (!inbox)
return -ENOMEM;
memset(inbox, 0, PAGE_SIZE);
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
pages = mailbox->buf;
for (mthca_icm_first(icm, &iter);
!mthca_icm_last(&iter);
@ -546,19 +603,17 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
}
for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) {
if (virt != -1) {
*((__be64 *) (inbox + nent * 4)) =
cpu_to_be64(virt);
pages[nent * 2] = cpu_to_be64(virt);
virt += 1 << lg;
}
*((__be64 *) (inbox + nent * 4 + 2)) =
cpu_to_be64((mthca_icm_addr(&iter) +
(i << lg)) | (lg - 12));
pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) +
(i << lg)) | (lg - 12));
ts += 1 << (lg - 10);
++tc;
if (nent == PAGE_SIZE / 16) {
err = mthca_cmd(dev, indma, nent, 0, op,
if (nent == MTHCA_MAILBOX_SIZE / 16) {
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
CMD_TIME_CLASS_B, status);
if (err || *status)
goto out;
@ -568,7 +623,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
}
if (nent)
err = mthca_cmd(dev, indma, nent, 0, op,
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
CMD_TIME_CLASS_B, status);
switch (op) {
@ -585,7 +640,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
}
out:
pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma);
mthca_free_mailbox(dev, mailbox);
return err;
}
@ -606,8 +661,8 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
dma_addr_t outdma;
int err = 0;
u8 lg;
@ -625,12 +680,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40
#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma);
if (!outbox) {
return -ENOMEM;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW,
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
CMD_TIME_CLASS_A, status);
if (err)
@ -681,15 +736,15 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
}
out:
pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
{
struct mthca_mailbox *mailbox;
u8 info;
u32 *outbox;
dma_addr_t outdma;
int err = 0;
#define ENABLE_LAM_OUT_SIZE 0x100
@ -700,11 +755,12 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
#define ENABLE_LAM_INFO_ECC_MASK 0x3
outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma);
if (!outbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM,
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
CMD_TIME_CLASS_C, status);
if (err)
@ -733,7 +789,7 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
(unsigned long long) dev->ddr_end);
out:
pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma);
mthca_free_mailbox(dev, mailbox);
return err;
}
@ -744,9 +800,9 @@ int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
{
struct mthca_mailbox *mailbox;
u8 info;
u32 *outbox;
dma_addr_t outdma;
int err = 0;
#define QUERY_DDR_OUT_SIZE 0x100
@ -757,11 +813,12 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
#define QUERY_DDR_INFO_ECC_MASK 0x3
outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma);
if (!outbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR,
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
CMD_TIME_CLASS_A, status);
if (err)
@ -787,15 +844,15 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
(unsigned long long) dev->ddr_end);
out:
pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
struct mthca_dev_lim *dev_lim, u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
dma_addr_t outdma;
u8 field;
u16 size;
int err;
@ -860,11 +917,12 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f
#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0
outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma);
if (!outbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM,
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
CMD_TIME_CLASS_A, status);
if (err)
@ -1020,15 +1078,15 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
}
out:
pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
struct mthca_adapter *adapter, u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
dma_addr_t outdma;
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
@ -1037,23 +1095,24 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma);
if (!outbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER,
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
CMD_TIME_CLASS_A, status);
if (err)
goto out;
MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
out:
pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma);
mthca_free_mailbox(dev, mailbox);
return err;
}
@ -1061,8 +1120,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
struct mthca_init_hca_param *param,
u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
dma_addr_t indma;
int err;
#define INIT_HCA_IN_SIZE 0x200
@ -1102,9 +1161,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18)
inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma);
if (!inbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
memset(inbox, 0, INIT_HCA_IN_SIZE);
@ -1167,10 +1227,9 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
}
err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA,
HZ, status);
err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma);
mthca_free_mailbox(dev, mailbox);
return err;
}
@ -1178,8 +1237,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
struct mthca_init_ib_param *param,
int port, u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
dma_addr_t indma;
int err;
u32 flags;
@ -1199,9 +1258,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
#define INIT_IB_NODE_GUID_OFFSET 0x18
#define INIT_IB_SI_GUID_OFFSET 0x20
inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma);
if (!inbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
memset(inbox, 0, INIT_IB_IN_SIZE);
@ -1221,10 +1281,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB,
err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
CMD_TIME_CLASS_A, status);
pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma);
mthca_free_mailbox(dev, mailbox);
return err;
}
@ -1241,8 +1301,8 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
int port, u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
dma_addr_t indma;
int err;
u32 flags = 0;
@ -1253,9 +1313,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
#define SET_IB_CAP_MASK_OFFSET 0x04
#define SET_IB_SI_GUID_OFFSET 0x08
inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma);
if (!inbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
memset(inbox, 0, SET_IB_IN_SIZE);
@ -1266,10 +1327,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB,
err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
CMD_TIME_CLASS_B, status);
pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma);
mthca_free_mailbox(dev, mailbox);
return err;
}
@ -1280,20 +1341,22 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
{
struct mthca_mailbox *mailbox;
u64 *inbox;
dma_addr_t indma;
int err;
inbox = pci_alloc_consistent(dev->pdev, 16, &indma);
if (!inbox)
return -ENOMEM;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
inbox[0] = cpu_to_be64(virt);
inbox[1] = cpu_to_be64(dma_addr);
err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status);
err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
CMD_TIME_CLASS_B, status);
pci_free_consistent(dev->pdev, 16, inbox, indma);
mthca_free_mailbox(dev, mailbox);
if (!err)
mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
@ -1338,69 +1401,26 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
return 0;
}
int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry,
int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status)
{
dma_addr_t indma;
int err;
indma = pci_map_single(dev->pdev, mpt_entry,
MTHCA_MPT_ENTRY_SIZE,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT,
CMD_TIME_CLASS_B, status);
pci_unmap_single(dev->pdev, indma,
MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE);
return err;
return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
CMD_TIME_CLASS_B, status);
}
int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry,
int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status)
{
dma_addr_t outdma = 0;
int err;
if (mpt_entry) {
outdma = pci_map_single(dev->pdev, mpt_entry,
MTHCA_MPT_ENTRY_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(outdma))
return -ENOMEM;
}
err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry,
CMD_HW2SW_MPT,
CMD_TIME_CLASS_B, status);
if (mpt_entry)
pci_unmap_single(dev->pdev, outdma,
MTHCA_MPT_ENTRY_SIZE,
PCI_DMA_FROMDEVICE);
return err;
return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
!mailbox, CMD_HW2SW_MPT,
CMD_TIME_CLASS_B, status);
}
int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry,
int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int num_mtt, u8 *status)
{
dma_addr_t indma;
int err;
indma = pci_map_single(dev->pdev, mtt_entry,
(num_mtt + 2) * 8,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT,
CMD_TIME_CLASS_B, status);
pci_unmap_single(dev->pdev, indma,
(num_mtt + 2) * 8, PCI_DMA_TODEVICE);
return err;
return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
CMD_TIME_CLASS_B, status);
}
int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
@ -1418,92 +1438,38 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
}
int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context,
int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status)
{
dma_addr_t indma;
int err;
indma = pci_map_single(dev->pdev, eq_context,
MTHCA_EQ_CONTEXT_SIZE,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, indma,
MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
return err;
return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
CMD_TIME_CLASS_A, status);
}
int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context,
int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status)
{
dma_addr_t outdma = 0;
int err;
outdma = pci_map_single(dev->pdev, eq_context,
MTHCA_EQ_CONTEXT_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(outdma))
return -ENOMEM;
err = mthca_cmd_box(dev, 0, outdma, eq_num, 0,
CMD_HW2SW_EQ,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, outdma,
MTHCA_EQ_CONTEXT_SIZE,
PCI_DMA_FROMDEVICE);
return err;
return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
CMD_HW2SW_EQ,
CMD_TIME_CLASS_A, status);
}
int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context,
int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status)
{
dma_addr_t indma;
int err;
indma = pci_map_single(dev->pdev, cq_context,
MTHCA_CQ_CONTEXT_SIZE,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ,
return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, indma,
MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
return err;
}
int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context,
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status)
{
dma_addr_t outdma = 0;
int err;
outdma = pci_map_single(dev->pdev, cq_context,
MTHCA_CQ_CONTEXT_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(outdma))
return -ENOMEM;
err = mthca_cmd_box(dev, 0, outdma, cq_num, 0,
CMD_HW2SW_CQ,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, outdma,
MTHCA_CQ_CONTEXT_SIZE,
PCI_DMA_FROMDEVICE);
return err;
return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
CMD_HW2SW_CQ,
CMD_TIME_CLASS_A, status);
}
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, void *qp_context, u32 optmask,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status)
{
static const u16 op[] = {
@ -1520,36 +1486,34 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
[MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE
};
u8 op_mod = 0;
dma_addr_t indma;
int my_mailbox = 0;
int err;
if (trans < 0 || trans >= ARRAY_SIZE(op))
return -EINVAL;
if (trans == MTHCA_TRANS_ANY2RST) {
indma = 0;
op_mod = 3; /* don't write outbox, any->reset */
/* For debugging */
qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE,
&indma);
op_mod = 2; /* write outbox, any->reset */
if (!mailbox) {
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (!IS_ERR(mailbox)) {
my_mailbox = 1;
op_mod = 2; /* write outbox, any->reset */
} else
mailbox = NULL;
}
} else {
indma = pci_map_single(dev->pdev, qp_context,
MTHCA_QP_CONTEXT_SIZE,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
if (0) {
int i;
mthca_dbg(dev, "Dumping QP context:\n");
printk(" opt param mask: %08x\n", be32_to_cpup(qp_context));
printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
for (i = 0; i < 0x100 / 4; ++i) {
if (i % 8 == 0)
printk(" [%02x] ", i * 4);
printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2]));
printk(" %08x",
be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
@ -1557,55 +1521,39 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
}
if (trans == MTHCA_TRANS_ANY2RST) {
err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num,
op_mod, op[trans], CMD_TIME_CLASS_C, status);
err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
(!!is_ee << 24) | num, op_mod,
op[trans], CMD_TIME_CLASS_C, status);
if (0) {
if (0 && mailbox) {
int i;
mthca_dbg(dev, "Dumping QP context:\n");
printk(" %08x\n", be32_to_cpup(qp_context));
printk(" %08x\n", be32_to_cpup(mailbox->buf));
for (i = 0; i < 0x100 / 4; ++i) {
if (i % 8 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2]));
printk(" %08x",
be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
}
} else
err = mthca_cmd(dev, indma, (!!is_ee << 24) | num,
err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
op_mod, op[trans], CMD_TIME_CLASS_C, status);
if (trans != MTHCA_TRANS_ANY2RST)
pci_unmap_single(dev->pdev, indma,
MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE);
else
pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE,
qp_context, indma);
if (my_mailbox)
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
void *qp_context, u8 *status)
struct mthca_mailbox *mailbox, u8 *status)
{
dma_addr_t outdma = 0;
int err;
outdma = pci_map_single(dev->pdev, qp_context,
MTHCA_QP_CONTEXT_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(outdma))
return -ENOMEM;
err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0,
CMD_QUERY_QPEE,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, outdma,
MTHCA_QP_CONTEXT_SIZE,
PCI_DMA_FROMDEVICE);
return err;
return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
}
int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
@ -1635,11 +1583,11 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
}
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc* in_wc, struct ib_grh* in_grh,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad, u8 *status)
{
void *box;
dma_addr_t dma;
struct mthca_mailbox *inmailbox, *outmailbox;
void *inbox;
int err;
u32 in_modifier = port;
u8 op_modifier = 0;
@ -1653,11 +1601,18 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
#define MAD_IFC_PKEY_OFFSET 0x10e
#define MAD_IFC_GRH_OFFSET 0x140
box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma);
if (!box)
return -ENOMEM;
inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(inmailbox))
return PTR_ERR(inmailbox);
inbox = inmailbox->buf;
memcpy(box, in_mad, 256);
outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(outmailbox)) {
mthca_free_mailbox(dev, inmailbox);
return PTR_ERR(outmailbox);
}
memcpy(inbox, in_mad, 256);
/*
* Key check traps can't be generated unless we have in_wc to
@ -1671,97 +1626,65 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
if (in_wc) {
u8 val;
memset(box + 256, 0, 256);
memset(inbox + 256, 0, 256);
MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
val = in_wc->sl << 4;
MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET);
MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
val = in_wc->dlid_path_bits |
(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET);
MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET);
MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET);
MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
if (in_grh)
memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40);
memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
op_modifier |= 0x10;
in_modifier |= in_wc->slid << 16;
}
err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier,
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
if (!err && !*status)
memcpy(response_mad, box + 512, 256);
memcpy(response_mad, outmailbox->buf, 256);
pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma);
mthca_free_mailbox(dev, inmailbox);
mthca_free_mailbox(dev, outmailbox);
return err;
}
int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm,
u8 *status)
int mthca_READ_MGM(struct mthca_dev *dev, int index,
struct mthca_mailbox *mailbox, u8 *status)
{
dma_addr_t outdma = 0;
int err;
outdma = pci_map_single(dev->pdev, mgm,
MTHCA_MGM_ENTRY_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(outdma))
return -ENOMEM;
err = mthca_cmd_box(dev, 0, outdma, index, 0,
CMD_READ_MGM,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, outdma,
MTHCA_MGM_ENTRY_SIZE,
PCI_DMA_FROMDEVICE);
return err;
return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
CMD_READ_MGM, CMD_TIME_CLASS_A, status);
}
int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm,
u8 *status)
int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
struct mthca_mailbox *mailbox, u8 *status)
{
dma_addr_t indma;
int err;
indma = pci_map_single(dev->pdev, mgm,
MTHCA_MGM_ENTRY_SIZE,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM,
CMD_TIME_CLASS_A, status);
pci_unmap_single(dev->pdev, indma,
MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE);
return err;
return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
CMD_TIME_CLASS_A, status);
}
int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash,
u8 *status)
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
u16 *hash, u8 *status)
{
dma_addr_t indma;
u64 imm;
int err;
indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(indma))
return -ENOMEM;
err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH,
err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
CMD_TIME_CLASS_A, status);
*hash = imm;
pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE);
*hash = imm;
return err;
}

View File

@ -37,8 +37,7 @@
#include <ib_verbs.h>
#define MTHCA_CMD_MAILBOX_ALIGN 16UL
#define MTHCA_CMD_MAILBOX_EXTRA (MTHCA_CMD_MAILBOX_ALIGN - 1)
#define MTHCA_MAILBOX_SIZE 4096
enum {
/* command completed successfully: */
@ -112,6 +111,11 @@ enum {
DEV_LIM_FLAG_UD_MULTI = 1 << 21,
};
struct mthca_mailbox {
dma_addr_t dma;
void *buf;
};
struct mthca_dev_lim {
int max_srq_sz;
int max_qp_sz;
@ -235,11 +239,17 @@ struct mthca_set_ib_param {
u32 cap_mask;
};
int mthca_cmd_init(struct mthca_dev *dev);
void mthca_cmd_cleanup(struct mthca_dev *dev);
int mthca_cmd_use_events(struct mthca_dev *dev);
void mthca_cmd_use_polling(struct mthca_dev *dev);
void mthca_cmd_event(struct mthca_dev *dev, u16 token,
u8 status, u64 out_param);
struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
unsigned int gfp_mask);
void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
@ -270,41 +280,39 @@ int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
u8 *status);
int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry,
int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status);
int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry,
int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status);
int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry,
int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int num_mtt, u8 *status);
int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status);
int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
int eq_num, u8 *status);
int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context,
int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status);
int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context,
int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status);
int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context,
int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context,
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, void *qp_context, u32 optmask,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status);
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
void *qp_context, u8 *status);
struct mthca_mailbox *mailbox, u8 *status);
int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
u8 *status);
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc* in_wc, struct ib_grh* in_grh,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad, u8 *status);
int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm,
u8 *status);
int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm,
u8 *status);
int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash,
u8 *status);
int mthca_READ_MGM(struct mthca_dev *dev, int index,
struct mthca_mailbox *mailbox, u8 *status);
int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
struct mthca_mailbox *mailbox, u8 *status);
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
u16 *hash, u8 *status);
int mthca_NOP(struct mthca_dev *dev, u8 *status);
#define MAILBOX_ALIGN(x) ((void *) ALIGN((unsigned long) (x), MTHCA_CMD_MAILBOX_ALIGN))
#endif /* MTHCA_CMD_H */

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -171,6 +172,17 @@ static inline void set_cqe_hw(struct mthca_cqe *cqe)
cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
}
static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
{
__be32 *cqe = cqe_ptr;
(void) cqe; /* avoid warning if mthca_dbg compiled away... */
mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
}
/*
* incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
* should be correct before calling update_cons_index().
@ -280,16 +292,12 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
int dbd;
u32 new_wqe;
if (1 && cqe->syndrome != SYNDROME_WR_FLUSH_ERR) {
int j;
mthca_dbg(dev, "%x/%d: error CQE -> QPN %06x, WQE @ %08x\n",
cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
be32_to_cpu(cqe->wqe));
for (j = 0; j < 8; ++j)
printk(KERN_DEBUG " [%2x] %08x\n",
j * 4, be32_to_cpu(((u32 *) cqe)[j]));
if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
mthca_dbg(dev, "local QP operation err "
"(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
cq->cqn, cq->cons_index);
dump_cqe(dev, cqe);
}
/*
@ -377,15 +385,6 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
return 0;
}
static void dump_cqe(struct mthca_cqe *cqe)
{
int j;
for (j = 0; j < 8; ++j)
printk(KERN_DEBUG " [%2x] %08x\n",
j * 4, be32_to_cpu(((u32 *) cqe)[j]));
}
static inline int mthca_poll_one(struct mthca_dev *dev,
struct mthca_cq *cq,
struct mthca_qp **cur_qp,
@ -414,8 +413,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
be32_to_cpu(cqe->wqe));
dump_cqe(cqe);
dump_cqe(dev, cqe);
}
is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
@ -638,19 +636,19 @@ static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
int size;
if (cq->is_direct)
pci_free_consistent(dev->pdev,
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
cq->queue.direct.buf,
pci_unmap_addr(&cq->queue.direct,
mapping));
dma_free_coherent(&dev->pdev->dev,
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
cq->queue.direct.buf,
pci_unmap_addr(&cq->queue.direct,
mapping));
else {
size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
if (cq->queue.page_list[i].buf)
pci_free_consistent(dev->pdev, PAGE_SIZE,
cq->queue.page_list[i].buf,
pci_unmap_addr(&cq->queue.page_list[i],
mapping));
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
cq->queue.page_list[i].buf,
pci_unmap_addr(&cq->queue.page_list[i],
mapping));
kfree(cq->queue.page_list);
}
@ -670,8 +668,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
npages = 1;
shift = get_order(size) + PAGE_SHIFT;
cq->queue.direct.buf = pci_alloc_consistent(dev->pdev,
size, &t);
cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!cq->queue.direct.buf)
return -ENOMEM;
@ -709,7 +707,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
for (i = 0; i < npages; ++i) {
cq->queue.page_list[i].buf =
pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!cq->queue.page_list[i].buf)
goto err_free;
@ -746,7 +745,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_cq *cq)
{
int size = nent * MTHCA_CQ_ENTRY_SIZE;
void *mailbox = NULL;
struct mthca_mailbox *mailbox;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
u8 status;
@ -780,12 +779,11 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
goto err_out_ci;
}
mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox)
goto err_out_mailbox;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
goto err_out_arm;
cq_context = MAILBOX_ALIGN(mailbox);
cq_context = mailbox->buf;
err = mthca_alloc_cq_buf(dev, size, cq);
if (err)
@ -816,7 +814,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context->state_db = cpu_to_be32(cq->arm_db_index);
}
err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status);
err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
if (err) {
mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
goto err_out_free_mr;
@ -840,7 +838,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq->cons_index = 0;
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
return 0;
@ -849,8 +847,9 @@ err_out_free_mr:
mthca_free_cq_buf(dev, cq);
err_out_mailbox:
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
err_out_arm:
if (mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
@ -870,28 +869,26 @@ err_out:
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq)
{
void *mailbox;
struct mthca_mailbox *mailbox;
int err;
u8 status;
might_sleep();
mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox) {
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
mthca_warn(dev, "No memory for mailbox to free CQ.\n");
return;
}
err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status);
err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
if (err)
mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
else if (status)
mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n",
status);
mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
if (0) {
u32 *ctx = MAILBOX_ALIGN(mailbox);
u32 *ctx = mailbox->buf;
int j;
printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
@ -919,11 +916,11 @@ void mthca_free_cq(struct mthca_dev *dev,
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
mthca_table_put(dev, dev->cq_table.table, cq->cqn);
}
mthca_table_put(dev, dev->cq_table.table, cq->cqn);
mthca_free(&dev->cq_table.alloc, cq->cqn);
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
}
int __devinit mthca_init_cq_table(struct mthca_dev *dev)

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -46,8 +47,8 @@
#define DRV_NAME "ib_mthca"
#define PFX DRV_NAME ": "
#define DRV_VERSION "0.06-pre"
#define DRV_RELDATE "November 8, 2004"
#define DRV_VERSION "0.06"
#define DRV_RELDATE "June 23, 2005"
enum {
MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
@ -98,6 +99,7 @@ enum {
};
struct mthca_cmd {
struct pci_pool *pool;
int use_events;
struct semaphore hcr_sem;
struct semaphore poll_sem;
@ -379,6 +381,12 @@ void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd);
void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size);
void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt);
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len);
int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr);
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU

View File

@ -469,7 +469,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
PAGE_SIZE;
u64 *dma_list = NULL;
dma_addr_t t;
void *mailbox = NULL;
struct mthca_mailbox *mailbox;
struct mthca_eq_context *eq_context;
int err = -ENOMEM;
int i;
@ -494,17 +494,16 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
if (!dma_list)
goto err_out_free;
mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox)
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
goto err_out_free;
eq_context = MAILBOX_ALIGN(mailbox);
eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
eq->page_list[i].buf = pci_alloc_consistent(dev->pdev,
PAGE_SIZE, &t);
eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
PAGE_SIZE, &t, GFP_KERNEL);
if (!eq->page_list[i].buf)
goto err_out_free;
goto err_out_free_pages;
dma_list[i] = t;
pci_unmap_addr_set(&eq->page_list[i], mapping, t);
@ -517,7 +516,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq->eqn = mthca_alloc(&dev->eq_table.alloc);
if (eq->eqn == -1)
goto err_out_free;
goto err_out_free_pages;
err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
dma_list, PAGE_SHIFT, npages,
@ -548,7 +547,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq_context->intr = intr;
eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status);
err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
if (err) {
mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
goto err_out_free_mr;
@ -561,7 +560,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
}
kfree(dma_list);
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
eq->eqn_mask = swab32(1 << eq->eqn);
eq->cons_index = 0;
@ -579,17 +578,19 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
err_out_free_eq:
mthca_free(&dev->eq_table.alloc, eq->eqn);
err_out_free:
err_out_free_pages:
for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf)
pci_free_consistent(dev->pdev, PAGE_SIZE,
eq->page_list[i].buf,
pci_unmap_addr(&eq->page_list[i],
mapping));
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
pci_unmap_addr(&eq->page_list[i],
mapping));
mthca_free_mailbox(dev, mailbox);
err_out_free:
kfree(eq->page_list);
kfree(dma_list);
kfree(mailbox);
err_out:
return err;
@ -598,25 +599,22 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
static void mthca_free_eq(struct mthca_dev *dev,
struct mthca_eq *eq)
{
void *mailbox = NULL;
struct mthca_mailbox *mailbox;
int err;
u8 status;
int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
PAGE_SIZE;
int i;
mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox)
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return;
err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox),
eq->eqn, &status);
err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
if (err)
mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
if (status)
mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n",
status);
mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
dev->eq_table.arm_mask &= ~eq->eqn_mask;
@ -625,7 +623,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4));
printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
if ((i + 1) % 4 == 0)
printk("\n");
}
@ -638,7 +636,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
pci_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
}
static void mthca_free_irqs(struct mthca_dev *dev)
@ -709,8 +707,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_arm_base) + 4, 4,
&dev->eq_regs.arbel.eq_arm)) {
mthca_err(dev, "Couldn't map interrupt clear register, "
"aborting.\n");
mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
dev->clr_base);
@ -721,8 +718,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
dev->fw.arbel.eq_set_ci_base,
MTHCA_EQ_SET_CI_SIZE,
&dev->eq_regs.arbel.eq_set_ci_base)) {
mthca_err(dev, "Couldn't map interrupt clear register, "
"aborting.\n");
mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_arm_base) + 4, 4,
dev->eq_regs.arbel.eq_arm);

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -69,7 +70,7 @@ MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
#endif /* CONFIG_PCI_MSI */
static const char mthca_version[] __devinitdata =
"ib_mthca: Mellanox InfiniBand HCA driver v"
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mthca_profile default_profile = {
@ -927,13 +928,13 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
*/
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) != 1 << 20) {
dev_err(&pdev->dev, "Missing DCS, aborting.");
dev_err(&pdev->dev, "Missing DCS, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 2) != 1 << 23) {
dev_err(&pdev->dev, "Missing UAR, aborting.");
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
@ -1004,25 +1005,18 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
!pci_enable_msi(pdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI;
sema_init(&mdev->cmd.hcr_sem, 1);
sema_init(&mdev->cmd.poll_sem, 1);
mdev->cmd.use_events = 0;
mdev->hcr = ioremap(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE, MTHCA_HCR_SIZE);
if (!mdev->hcr) {
mthca_err(mdev, "Couldn't map command register, "
"aborting.\n");
err = -ENOMEM;
if (mthca_cmd_init(mdev)) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
err = mthca_tune_pci(mdev);
if (err)
goto err_iounmap;
goto err_cmd;
err = mthca_init_hca(mdev);
if (err)
goto err_iounmap;
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n",
@ -1070,8 +1064,8 @@ err_cleanup:
err_close:
mthca_close_hca(mdev);
err_iounmap:
iounmap(mdev->hcr);
err_cmd:
mthca_cmd_cleanup(mdev);
err_free_dev:
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
@ -1118,10 +1112,8 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
iounmap(mdev->kar);
mthca_uar_free(mdev, &mdev->driver_uar);
mthca_cleanup_uar_table(mdev);
mthca_close_hca(mdev);
iounmap(mdev->hcr);
mthca_cmd_cleanup(mdev);
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
@ -1163,7 +1155,7 @@ static struct pci_device_id mthca_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mthca_pci_table);
static struct pci_driver mthca_driver = {
.name = "ib_mthca",
.name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
.remove = __devexit_p(mthca_remove_one)

View File

@ -66,22 +66,23 @@ static const u8 zero_gid[16]; /* automatically initialized to 0 */
* entry in hash chain and *mgm holds end of hash chain.
*/
static int find_mgm(struct mthca_dev *dev,
u8 *gid, struct mthca_mgm *mgm,
u8 *gid, struct mthca_mailbox *mgm_mailbox,
u16 *hash, int *prev, int *index)
{
void *mailbox;
struct mthca_mailbox *mailbox;
struct mthca_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
u8 status;
mailbox = kmalloc(16 + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
if (!mailbox)
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return -ENOMEM;
mgid = MAILBOX_ALIGN(mailbox);
mgid = mailbox->buf;
memcpy(mgid, gid, 16);
err = mthca_MGID_HASH(dev, mgid, hash, &status);
err = mthca_MGID_HASH(dev, mailbox, hash, &status);
if (err)
goto out;
if (status) {
@ -103,7 +104,7 @@ static int find_mgm(struct mthca_dev *dev,
*prev = -1;
do {
err = mthca_READ_MGM(dev, *index, mgm, &status);
err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
if (err)
goto out;
if (status) {
@ -129,14 +130,14 @@ static int find_mgm(struct mthca_dev *dev,
*index = -1;
out:
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
void *mailbox;
struct mthca_mailbox *mailbox;
struct mthca_mgm *mgm;
u16 hash;
int index, prev;
@ -145,15 +146,15 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err;
u8 status;
mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
if (!mailbox)
return -ENOMEM;
mgm = MAILBOX_ALIGN(mailbox);
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem))
return -EINTR;
err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index);
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
goto out;
@ -170,7 +171,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
err = mthca_READ_MGM(dev, index, mgm, &status);
err = mthca_READ_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@ -195,7 +196,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
err = mthca_WRITE_MGM(dev, index, mgm, &status);
err = mthca_WRITE_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@ -206,7 +207,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (!link)
goto out;
err = mthca_READ_MGM(dev, prev, mgm, &status);
err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@ -217,7 +218,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->next_gid_index = cpu_to_be32(index << 5);
err = mthca_WRITE_MGM(dev, prev, mgm, &status);
err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@ -227,14 +228,14 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
out:
up(&dev->mcg_table.sem);
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
void *mailbox;
struct mthca_mailbox *mailbox;
struct mthca_mgm *mgm;
u16 hash;
int prev, index;
@ -242,15 +243,15 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err;
u8 status;
mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
if (!mailbox)
return -ENOMEM;
mgm = MAILBOX_ALIGN(mailbox);
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem))
return -EINTR;
err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index);
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
goto out;
@ -285,7 +286,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
err = mthca_WRITE_MGM(dev, index, mgm, &status);
err = mthca_WRITE_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@ -304,7 +305,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (be32_to_cpu(mgm->next_gid_index) >> 5) {
err = mthca_READ_MGM(dev,
be32_to_cpu(mgm->next_gid_index) >> 5,
mgm, &status);
mailbox, &status);
if (err)
goto out;
if (status) {
@ -316,7 +317,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} else
memset(mgm->gid, 0, 16);
err = mthca_WRITE_MGM(dev, index, mgm, &status);
err = mthca_WRITE_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@ -327,7 +328,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} else {
/* Remove entry from AMGM */
index = be32_to_cpu(mgm->next_gid_index) >> 5;
err = mthca_READ_MGM(dev, prev, mgm, &status);
err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@ -338,7 +339,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->next_gid_index = cpu_to_be32(index << 5);
err = mthca_WRITE_MGM(dev, prev, mgm, &status);
err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@ -350,7 +351,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
out:
up(&dev->mcg_table.sem);
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
return err;
}

View File

@ -179,9 +179,14 @@ out:
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
{
int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
int i;
u8 status;
if (!mthca_is_memfree(dev))
return;
i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
down(&table->mutex);
if (--table->icm[i]->refcount == 0) {
@ -256,6 +261,9 @@ void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
{
int i;
if (!mthca_is_memfree(dev))
return;
for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
mthca_table_put(dev, table, i);
}

View File

@ -40,6 +40,12 @@
#include "mthca_cmd.h"
#include "mthca_memfree.h"
struct mthca_mtt {
struct mthca_buddy *buddy;
int order;
u32 first_seg;
};
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@ -173,8 +179,8 @@ static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy)
kfree(buddy->bits);
}
static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order,
struct mthca_buddy *buddy)
static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
struct mthca_buddy *buddy)
{
u32 seg = mthca_buddy_alloc(buddy, order);
@ -191,14 +197,102 @@ static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order,
return seg;
}
static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order,
struct mthca_buddy* buddy)
static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
struct mthca_buddy *buddy)
{
mthca_buddy_free(buddy, seg, order);
struct mthca_mtt *mtt;
int i;
if (mthca_is_memfree(dev))
mthca_table_put_range(dev, dev->mr_table.mtt_table, seg,
seg + (1 << order) - 1);
if (size <= 0)
return ERR_PTR(-EINVAL);
mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
if (!mtt)
return ERR_PTR(-ENOMEM);
mtt->buddy = buddy;
mtt->order = 0;
for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
++mtt->order;
mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
if (mtt->first_seg == -1) {
kfree(mtt);
return ERR_PTR(-ENOMEM);
}
return mtt;
}
struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
{
return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
}
void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
{
if (!mtt)
return;
mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
mthca_table_put_range(dev, dev->mr_table.mtt_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
kfree(mtt);
}
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len)
{
struct mthca_mailbox *mailbox;
u64 *mtt_entry;
int err = 0;
u8 status;
int i;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mtt_entry = mailbox->buf;
while (list_len > 0) {
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
mtt->first_seg * MTHCA_MTT_SEG_SIZE +
start_index * 8);
mtt_entry[1] = 0;
for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
MTHCA_MTT_FLAG_PRESENT);
/*
* If we have an odd number of entries to write, add
* one more dummy entry for firmware efficiency.
*/
if (i & 1)
mtt_entry[i + 2] = 0;
err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
if (err) {
mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
goto out;
}
if (status) {
mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
status);
err = -EINVAL;
goto out;
}
list_len -= i;
start_index += i;
buffer_list += i;
}
out:
mthca_free_mailbox(dev, mailbox);
return err;
}
static inline u32 tavor_hw_index_to_key(u32 ind)
@ -237,91 +331,18 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
return tavor_key_to_hw_index(key);
}
int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr)
int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
{
void *mailbox = NULL;
struct mthca_mailbox *mailbox;
struct mthca_mpt_entry *mpt_entry;
u32 key;
int i;
int err;
u8 status;
might_sleep();
mr->order = -1;
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
}
mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox) {
err = -ENOMEM;
goto err_out_table;
}
mpt_entry = MAILBOX_ALIGN(mailbox);
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_PHYSICAL |
MTHCA_MPT_FLAG_REGION |
access);
mpt_entry->page_size = 0;
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
mpt_entry->start = 0;
mpt_entry->length = ~0ULL;
memset(&mpt_entry->lkey, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
err = mthca_SW2HW_MPT(dev, mpt_entry,
key & (dev->limits.num_mpts - 1),
&status);
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_table;
} else if (status) {
mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
status);
err = -EINVAL;
goto err_out_table;
}
kfree(mailbox);
return err;
err_out_table:
if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
kfree(mailbox);
return err;
}
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u64 *buffer_list, int buffer_size_shift,
int list_len, u64 iova, u64 total_size,
u32 access, struct mthca_mr *mr)
{
void *mailbox;
u64 *mtt_entry;
struct mthca_mpt_entry *mpt_entry;
u32 key;
int err = -ENOMEM;
u8 status;
int i;
might_sleep();
WARN_ON(buffer_size_shift >= 32);
key = mthca_alloc(&dev->mr_table.mpt_alloc);
@ -335,75 +356,33 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
goto err_out_mpt_free;
}
for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0;
i < list_len;
i <<= 1, ++mr->order)
; /* nothing */
mr->first_seg = mthca_alloc_mtt(dev, mr->order,
&dev->mr_table.mtt_buddy);
if (mr->first_seg == -1)
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_table;
/*
* If list_len is odd, we add one more dummy entry for
* firmware efficiency.
*/
mailbox = kmalloc(max(sizeof *mpt_entry,
(size_t) 8 * (list_len + (list_len & 1) + 2)) +
MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox)
goto err_out_free_mtt;
mtt_entry = MAILBOX_ALIGN(mailbox);
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
mr->first_seg * MTHCA_MTT_SEG_SIZE);
mtt_entry[1] = 0;
for (i = 0; i < list_len; ++i)
mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
MTHCA_MTT_FLAG_PRESENT);
if (list_len & 1) {
mtt_entry[i + 2] = 0;
++list_len;
}
if (0) {
mthca_dbg(dev, "Dumping MPT entry\n");
for (i = 0; i < list_len + 2; ++i)
printk(KERN_ERR "[%2d] %016llx\n",
i, (unsigned long long) be64_to_cpu(mtt_entry[i]));
}
err = mthca_WRITE_MTT(dev, mtt_entry, list_len, &status);
if (err) {
mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
goto err_out_mailbox_free;
}
if (status) {
mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
status);
err = -EINVAL;
goto err_out_mailbox_free;
}
mpt_entry = MAILBOX_ALIGN(mailbox);
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access);
if (!mr->mtt)
mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
memset(&mpt_entry->lkey, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base +
mr->first_seg * MTHCA_MTT_SEG_SIZE);
if (mr->mtt)
mpt_entry->mtt_seg =
cpu_to_be64(dev->mr_table.mtt_base +
mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
@ -416,45 +395,70 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
}
}
err = mthca_SW2HW_MPT(dev, mpt_entry,
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
if (err)
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
else if (status) {
goto err_out_mailbox;
} else if (status) {
mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
status);
err = -EINVAL;
goto err_out_mailbox;
}
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
return err;
err_out_mailbox_free:
kfree(mailbox);
err_out_free_mtt:
mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy);
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
err_out_table:
if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table, key);
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
return err;
}
/* Free mr or fmr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey, int order,
u32 first_seg, struct mthca_buddy *buddy)
int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr)
{
if (order >= 0)
mthca_free_mtt(dev, first_seg, order, buddy);
mr->mtt = NULL;
return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
}
if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table,
arbel_key_to_hw_index(lkey));
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u64 *buffer_list, int buffer_size_shift,
int list_len, u64 iova, u64 total_size,
u32 access, struct mthca_mr *mr)
{
int err;
mr->mtt = mthca_alloc_mtt(dev, list_len);
if (IS_ERR(mr->mtt))
return PTR_ERR(mr->mtt);
err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
if (err) {
mthca_free_mtt(dev, mr->mtt);
return err;
}
err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
total_size, access, mr);
if (err)
mthca_free_mtt(dev, mr->mtt);
return err;
}
/* Free mr or fmr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
arbel_key_to_hw_index(lkey));
mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
}
@ -476,15 +480,15 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
status);
mthca_free_region(dev, mr->ibmr.lkey, mr->order, mr->first_seg,
&dev->mr_table.mtt_buddy);
mthca_free_region(dev, mr->ibmr.lkey);
mthca_free_mtt(dev, mr->mtt);
}
int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_fmr *mr)
{
struct mthca_mpt_entry *mpt_entry;
void *mailbox;
struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
u8 status;
@ -522,31 +526,24 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
sizeof *(mr->mem.tavor.mpt) * idx;
for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0;
i < list_len;
i <<= 1, ++mr->order)
; /* nothing */
mr->first_seg = mthca_alloc_mtt(dev, mr->order,
dev->mr_table.fmr_mtt_buddy);
if (mr->first_seg == -1)
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt))
goto err_out_table;
mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE;
mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
mr->first_seg);
mr->mtt->first_seg);
BUG_ON(!mr->mem.arbel.mtts);
} else
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA,
GFP_KERNEL);
if (!mailbox)
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
goto err_out_free_mtt;
mpt_entry = MAILBOX_ALIGN(mailbox);
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
@ -571,7 +568,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
}
}
err = mthca_SW2HW_MPT(dev, mpt_entry,
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
if (err) {
@ -585,19 +582,17 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
goto err_out_mailbox_free;
}
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
return 0;
err_out_mailbox_free:
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
err_out_free_mtt:
mthca_free_mtt(dev, mr->first_seg, mr->order,
dev->mr_table.fmr_mtt_buddy);
mthca_free_mtt(dev, mr->mtt);
err_out_table:
if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table, key);
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
@ -609,8 +604,9 @@ int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
if (fmr->maps)
return -EBUSY;
mthca_free_region(dev, fmr->ibmr.lkey, fmr->order, fmr->first_seg,
dev->mr_table.fmr_mtt_buddy);
mthca_free_region(dev, fmr->ibmr.lkey);
mthca_free_mtt(dev, fmr->mtt);
return 0;
}
@ -826,7 +822,8 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
if (dev->limits.reserved_mtts) {
i = fls(dev->limits.reserved_mtts - 1);
if (mthca_alloc_mtt(dev, i, dev->mr_table.fmr_mtt_buddy) == -1) {
if (mthca_alloc_mtt_range(dev, i,
dev->mr_table.fmr_mtt_buddy) == -1) {
mthca_warn(dev, "MTT table of order %d is too small.\n",
dev->mr_table.fmr_mtt_buddy->max_order);
err = -ENOMEM;

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -52,7 +53,7 @@ static int mthca_query_device(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
memset(props, 0, sizeof props);
memset(props, 0, sizeof *props);
props->fw_ver = mdev->fw_ver;
@ -558,6 +559,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
convert_access(acc), mr);
if (err) {
kfree(page_list);
kfree(mr);
return ERR_PTR(err);
}

View File

@ -54,18 +54,18 @@ struct mthca_uar {
int index;
};
struct mthca_mtt;
struct mthca_mr {
struct ib_mr ibmr;
int order;
u32 first_seg;
struct ib_mr ibmr;
struct mthca_mtt *mtt;
};
struct mthca_fmr {
struct ib_fmr ibmr;
struct ib_fmr ibmr;
struct ib_fmr_attr attr;
int order;
u32 first_seg;
int maps;
struct mthca_mtt *mtt;
int maps;
union {
struct {
struct mthca_mpt_entry __iomem *mpt;

View File

@ -357,6 +357,9 @@ static const struct {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_QKEY),
[UC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
[RC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
@ -378,6 +381,9 @@ static const struct {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_QKEY),
[UC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
[RC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
@ -388,6 +394,11 @@ static const struct {
[IB_QPS_RTR] = {
.trans = MTHCA_TRANS_INIT2RTR,
.req_param = {
[UC] = (IB_QP_AV |
IB_QP_PATH_MTU |
IB_QP_DEST_QPN |
IB_QP_RQ_PSN |
IB_QP_MAX_DEST_RD_ATOMIC),
[RC] = (IB_QP_AV |
IB_QP_PATH_MTU |
IB_QP_DEST_QPN |
@ -398,6 +409,9 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
[UC] = (IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX),
[RC] = (IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX),
@ -413,6 +427,8 @@ static const struct {
.trans = MTHCA_TRANS_RTR2RTS,
.req_param = {
[UD] = IB_QP_SQ_PSN,
[UC] = (IB_QP_SQ_PSN |
IB_QP_MAX_QP_RD_ATOMIC),
[RC] = (IB_QP_TIMEOUT |
IB_QP_RETRY_CNT |
IB_QP_RNR_RETRY |
@ -423,6 +439,11 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[UC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX |
IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
@ -442,6 +463,9 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[UC] = (IB_QP_ACCESS_FLAGS |
IB_QP_ALT_PATH |
IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_ACCESS_FLAGS |
IB_QP_ALT_PATH |
IB_QP_PATH_MIG_STATE |
@ -462,6 +486,10 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[UC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
@ -476,6 +504,14 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
[UC] = (IB_QP_AV |
IB_QP_MAX_QP_RD_ATOMIC |
IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX |
IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_AV |
IB_QP_TIMEOUT |
IB_QP_RETRY_CNT |
@ -501,6 +537,7 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
[UC] = (IB_QP_CUR_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_MIN_RNR_TIMER),
[MLX] = (IB_QP_CUR_STATE |
@ -552,7 +589,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
void *mailbox = NULL;
struct mthca_mailbox *mailbox;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
u32 req_param, opt_param;
@ -609,10 +646,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return -EINVAL;
}
mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
if (!mailbox)
return -ENOMEM;
qp_param = MAILBOX_ALIGN(mailbox);
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
qp_param = mailbox->buf;
qp_context = &qp_param->context;
memset(qp_param, 0, sizeof *qp_param);
@ -683,7 +720,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (attr_mask & IB_QP_AV) {
qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3;
qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
if (attr->ah_attr.ah_flags & IB_AH_GRH) {
qp_context->pri_path.g_mylmc |= 1 << 7;
qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
@ -724,9 +761,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ?
ffs(attr->max_dest_rd_atomic) - 1 : 0,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
ffs(attr->max_rd_atomic) - 1 : 0,
7) << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
}
@ -764,10 +801,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp->atomic_rd_en = attr->qp_access_flags;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
u8 rra_max;
if (qp->resp_depth && !attr->max_rd_atomic) {
if (qp->resp_depth && !attr->max_dest_rd_atomic) {
/*
* Lowering our responder resources to zero.
* Turn off RDMA/atomics as responder.
@ -778,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
MTHCA_QP_OPTPAR_RAE);
}
if (!qp->resp_depth && attr->max_rd_atomic) {
if (!qp->resp_depth && attr->max_dest_rd_atomic) {
/*
* Increasing our responder resources from
* zero. Turn on RDMA/atomics as appropriate.
@ -799,7 +836,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
for (rra_max = 0;
1 << rra_max < attr->max_rd_atomic &&
1 << rra_max < attr->max_dest_rd_atomic &&
rra_max < dev->qp_table.rdb_shift;
++rra_max)
; /* nothing */
@ -807,7 +844,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->params2 |= cpu_to_be32(rra_max << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
qp->resp_depth = attr->max_rd_atomic;
qp->resp_depth = attr->max_dest_rd_atomic;
}
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
@ -835,7 +872,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
qp->qpn, 0, qp_param, 0, &status);
qp->qpn, 0, mailbox, 0, &status);
if (status) {
mthca_warn(dev, "modify QP %d returned status %02x.\n",
state_table[cur_state][new_state].trans, status);
@ -845,7 +882,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (!err)
qp->state = new_state;
kfree(mailbox);
mthca_free_mailbox(dev, mailbox);
if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask);
@ -934,7 +971,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
size, shift);
qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
&t, GFP_KERNEL);
if (!qp->queue.direct.buf)
goto err_out;
@ -973,7 +1011,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
for (i = 0; i < npages; ++i) {
qp->queue.page_list[i].buf =
pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!qp->queue.page_list[i].buf)
goto err_out_free;
@ -996,16 +1035,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
err_out_free:
if (qp->is_direct) {
pci_free_consistent(dev->pdev, size,
qp->queue.direct.buf,
pci_unmap_addr(&qp->queue.direct, mapping));
dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
pci_unmap_addr(&qp->queue.direct, mapping));
} else
for (i = 0; i < npages; ++i) {
if (qp->queue.page_list[i].buf)
pci_free_consistent(dev->pdev, PAGE_SIZE,
qp->queue.page_list[i].buf,
pci_unmap_addr(&qp->queue.page_list[i],
mapping));
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
qp->queue.page_list[i].buf,
pci_unmap_addr(&qp->queue.page_list[i],
mapping));
}
@ -1073,11 +1111,12 @@ static void mthca_free_memfree(struct mthca_dev *dev,
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
mthca_table_put(dev, dev->qp_table.rdb_table,
qp->qpn << dev->qp_table.rdb_shift);
mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
}
mthca_table_put(dev, dev->qp_table.rdb_table,
qp->qpn << dev->qp_table.rdb_shift);
mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
}
static void mthca_wq_init(struct mthca_wq* wq)
@ -1529,6 +1568,26 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UC:
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
default:
/* No extra segments required for sends */
break;
}
break;
case UD:
((struct mthca_tavor_ud_seg *) wqe)->lkey =
cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
@ -1814,9 +1873,29 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
sizeof (struct mthca_atomic_seg);
break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
default:
/* No extra segments required for sends */
break;
}
break;
case UC:
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =

View File

@ -21,6 +21,7 @@
#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/compat.h>
struct evdev {
int exist;
@ -145,6 +146,41 @@ static int evdev_open(struct inode * inode, struct file * file)
return 0;
}
#ifdef CONFIG_COMPAT
struct input_event_compat {
struct compat_timeval time;
__u16 type;
__u16 code;
__s32 value;
};
#ifdef CONFIG_X86_64
# define COMPAT_TEST test_thread_flag(TIF_IA32)
#elif defined(CONFIG_IA64)
# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
#elif defined(CONFIG_ARCH_S390)
# define COMPAT_TEST test_thread_flag(TIF_31BIT)
#else
# define COMPAT_TEST test_thread_flag(TIF_32BIT)
#endif
static ssize_t evdev_write_compat(struct file * file, const char __user * buffer, size_t count, loff_t *ppos)
{
struct evdev_list *list = file->private_data;
struct input_event_compat event;
int retval = 0;
while (retval < count) {
if (copy_from_user(&event, buffer + retval, sizeof(struct input_event_compat)))
return -EFAULT;
input_event(list->evdev->handle.dev, event.type, event.code, event.value);
retval += sizeof(struct input_event_compat);
}
return retval;
}
#endif
static ssize_t evdev_write(struct file * file, const char __user * buffer, size_t count, loff_t *ppos)
{
struct evdev_list *list = file->private_data;
@ -153,6 +189,11 @@ static ssize_t evdev_write(struct file * file, const char __user * buffer, size_
if (!list->evdev->exist) return -ENODEV;
#ifdef CONFIG_COMPAT
if (COMPAT_TEST)
return evdev_write_compat(file, buffer, count, ppos);
#endif
while (retval < count) {
if (copy_from_user(&event, buffer + retval, sizeof(struct input_event)))
@ -164,11 +205,56 @@ static ssize_t evdev_write(struct file * file, const char __user * buffer, size_
return retval;
}
#ifdef CONFIG_COMPAT
static ssize_t evdev_read_compat(struct file * file, char __user * buffer, size_t count, loff_t *ppos)
{
struct evdev_list *list = file->private_data;
int retval;
if (count < sizeof(struct input_event_compat))
return -EINVAL;
if (list->head == list->tail && list->evdev->exist && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(list->evdev->wait,
list->head != list->tail || (!list->evdev->exist));
if (retval)
return retval;
if (!list->evdev->exist)
return -ENODEV;
while (list->head != list->tail && retval + sizeof(struct input_event_compat) <= count) {
struct input_event *event = (struct input_event *) list->buffer + list->tail;
struct input_event_compat event_compat;
event_compat.time.tv_sec = event->time.tv_sec;
event_compat.time.tv_usec = event->time.tv_usec;
event_compat.type = event->type;
event_compat.code = event->code;
event_compat.value = event->value;
if (copy_to_user(buffer + retval, &event_compat,
sizeof(struct input_event_compat))) return -EFAULT;
list->tail = (list->tail + 1) & (EVDEV_BUFFER_SIZE - 1);
retval += sizeof(struct input_event_compat);
}
return retval;
}
#endif
static ssize_t evdev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos)
{
struct evdev_list *list = file->private_data;
int retval;
#ifdef CONFIG_COMPAT
if (COMPAT_TEST)
return evdev_read_compat(file, buffer, count, ppos);
#endif
if (count < sizeof(struct input_event))
return -EINVAL;
@ -186,7 +272,7 @@ static ssize_t evdev_read(struct file * file, char __user * buffer, size_t count
while (list->head != list->tail && retval + sizeof(struct input_event) <= count) {
if (copy_to_user(buffer + retval, list->buffer + list->tail,
sizeof(struct input_event))) return -EFAULT;
sizeof(struct input_event))) return -EFAULT;
list->tail = (list->tail + 1) & (EVDEV_BUFFER_SIZE - 1);
retval += sizeof(struct input_event);
}
@ -203,7 +289,7 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
(list->evdev->exist ? 0 : (POLLHUP | POLLERR));
}
static int evdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct evdev_list *list = file->private_data;
struct evdev *evdev = list->evdev;
@ -285,110 +371,268 @@ static int evdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
default:
if (_IOC_TYPE(cmd) != 'E' || _IOC_DIR(cmd) != _IOC_READ)
if (_IOC_TYPE(cmd) != 'E')
return -EINVAL;
if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
if (_IOC_DIR(cmd) == _IOC_READ) {
long *bits;
int len;
if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
switch (_IOC_NR(cmd) & EV_MAX) {
case 0: bits = dev->evbit; len = EV_MAX; break;
case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
case EV_REL: bits = dev->relbit; len = REL_MAX; break;
case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
default: return -EINVAL;
long *bits;
int len;
switch (_IOC_NR(cmd) & EV_MAX) {
case 0: bits = dev->evbit; len = EV_MAX; break;
case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
case EV_REL: bits = dev->relbit; len = REL_MAX; break;
case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
default: return -EINVAL;
}
len = NBITS(len) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, bits, len) ? -EFAULT : len;
}
len = NBITS(len) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, bits, len) ? -EFAULT : len;
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) {
int len;
len = NBITS(KEY_MAX) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->key, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) {
int len;
len = NBITS(LED_MAX) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->led, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) {
int len;
len = NBITS(SND_MAX) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->snd, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
int len;
if (!dev->name) return -ENOENT;
len = strlen(dev->name) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->name, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) {
int len;
if (!dev->phys) return -ENOENT;
len = strlen(dev->phys) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->phys, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) {
int len;
if (!dev->uniq) return -ENOENT;
len = strlen(dev->uniq) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->uniq, len) ? -EFAULT : len;
}
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
int t = _IOC_NR(cmd) & ABS_MAX;
abs.value = dev->abs[t];
abs.minimum = dev->absmin[t];
abs.maximum = dev->absmax[t];
abs.fuzz = dev->absfuzz[t];
abs.flat = dev->absflat[t];
if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
return -EFAULT;
return 0;
}
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) {
int len;
len = NBITS(KEY_MAX) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->key, len) ? -EFAULT : len;
}
if (_IOC_DIR(cmd) == _IOC_WRITE) {
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) {
int len;
len = NBITS(LED_MAX) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->led, len) ? -EFAULT : len;
}
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) {
int len;
len = NBITS(SND_MAX) * sizeof(long);
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->snd, len) ? -EFAULT : len;
}
int t = _IOC_NR(cmd) & ABS_MAX;
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
int len;
if (!dev->name) return -ENOENT;
len = strlen(dev->name) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->name, len) ? -EFAULT : len;
}
if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
return -EFAULT;
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) {
int len;
if (!dev->phys) return -ENOENT;
len = strlen(dev->phys) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->phys, len) ? -EFAULT : len;
}
dev->abs[t] = abs.value;
dev->absmin[t] = abs.minimum;
dev->absmax[t] = abs.maximum;
dev->absfuzz[t] = abs.fuzz;
dev->absflat[t] = abs.flat;
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) {
int len;
if (!dev->uniq) return -ENOENT;
len = strlen(dev->uniq) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->uniq, len) ? -EFAULT : len;
}
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
int t = _IOC_NR(cmd) & ABS_MAX;
abs.value = dev->abs[t];
abs.minimum = dev->absmin[t];
abs.maximum = dev->absmax[t];
abs.fuzz = dev->absfuzz[t];
abs.flat = dev->absflat[t];
if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
return -EFAULT;
return 0;
}
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
int t = _IOC_NR(cmd) & ABS_MAX;
if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
return -EFAULT;
dev->abs[t] = abs.value;
dev->absmin[t] = abs.minimum;
dev->absmax[t] = abs.maximum;
dev->absfuzz[t] = abs.fuzz;
dev->absflat[t] = abs.flat;
return 0;
return 0;
}
}
}
return -EINVAL;
}
#ifdef CONFIG_COMPAT
#define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
#define NBITS_COMPAT(x) ((((x)-1)/BITS_PER_LONG_COMPAT)+1)
#define OFF_COMPAT(x) ((x)%BITS_PER_LONG_COMPAT)
#define BIT_COMPAT(x) (1UL<<OFF_COMPAT(x))
#define LONG_COMPAT(x) ((x)/BITS_PER_LONG_COMPAT)
#define test_bit_compat(bit, array) ((array[LONG_COMPAT(bit)] >> OFF_COMPAT(bit)) & 1)
#ifdef __BIG_ENDIAN
#define bit_to_user(bit, max) \
do { \
int i; \
int len = NBITS_COMPAT((max)) * sizeof(compat_long_t); \
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); \
for (i = 0; i < len / sizeof(compat_long_t); i++) \
if (copy_to_user((compat_long_t*) p + i, \
(compat_long_t*) (bit) + i + 1 - ((i % 2) << 1), \
sizeof(compat_long_t))) \
return -EFAULT; \
return len; \
} while (0)
#else
#define bit_to_user(bit, max) \
do { \
int len = NBITS_COMPAT((max)) * sizeof(compat_long_t); \
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); \
return copy_to_user(p, (bit), len) ? -EFAULT : len; \
} while (0)
#endif
static long evdev_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
{
struct evdev_list *list = file->private_data;
struct evdev *evdev = list->evdev;
struct input_dev *dev = evdev->handle.dev;
struct input_absinfo abs;
void __user *p = compat_ptr(arg);
if (!evdev->exist) return -ENODEV;
switch (cmd) {
case EVIOCGVERSION:
case EVIOCGID:
case EVIOCGKEYCODE:
case EVIOCSKEYCODE:
case EVIOCSFF:
case EVIOCRMFF:
case EVIOCGEFFECTS:
case EVIOCGRAB:
return evdev_ioctl(file, cmd, (unsigned long) p);
default:
if (_IOC_TYPE(cmd) != 'E')
return -EINVAL;
if (_IOC_DIR(cmd) == _IOC_READ) {
if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
long *bits;
int max;
switch (_IOC_NR(cmd) & EV_MAX) {
case 0: bits = dev->evbit; max = EV_MAX; break;
case EV_KEY: bits = dev->keybit; max = KEY_MAX; break;
case EV_REL: bits = dev->relbit; max = REL_MAX; break;
case EV_ABS: bits = dev->absbit; max = ABS_MAX; break;
case EV_MSC: bits = dev->mscbit; max = MSC_MAX; break;
case EV_LED: bits = dev->ledbit; max = LED_MAX; break;
case EV_SND: bits = dev->sndbit; max = SND_MAX; break;
case EV_FF: bits = dev->ffbit; max = FF_MAX; break;
default: return -EINVAL;
}
bit_to_user(bits, max);
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
bit_to_user(dev->key, KEY_MAX);
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0)))
bit_to_user(dev->led, LED_MAX);
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0)))
bit_to_user(dev->snd, SND_MAX);
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
int len;
if (!dev->name) return -ENOENT;
len = strlen(dev->name) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->name, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) {
int len;
if (!dev->phys) return -ENOENT;
len = strlen(dev->phys) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->phys, len) ? -EFAULT : len;
}
if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) {
int len;
if (!dev->uniq) return -ENOENT;
len = strlen(dev->uniq) + 1;
if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
return copy_to_user(p, dev->uniq, len) ? -EFAULT : len;
}
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
int t = _IOC_NR(cmd) & ABS_MAX;
abs.value = dev->abs[t];
abs.minimum = dev->absmin[t];
abs.maximum = dev->absmax[t];
abs.fuzz = dev->absfuzz[t];
abs.flat = dev->absflat[t];
if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
return -EFAULT;
return 0;
}
}
if (_IOC_DIR(cmd) == _IOC_WRITE) {
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
int t = _IOC_NR(cmd) & ABS_MAX;
if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
return -EFAULT;
dev->abs[t] = abs.value;
dev->absmin[t] = abs.minimum;
dev->absmax[t] = abs.maximum;
dev->absfuzz[t] = abs.fuzz;
dev->absflat[t] = abs.flat;
return 0;
}
}
}
return -EINVAL;
}
#endif
static struct file_operations evdev_fops = {
.owner = THIS_MODULE,
.read = evdev_read,
@ -396,7 +640,10 @@ static struct file_operations evdev_fops = {
.poll = evdev_poll,
.open = evdev_open,
.release = evdev_release,
.ioctl = evdev_ioctl,
.unlocked_ioctl = evdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
.flush = evdev_flush
};

View File

@ -49,22 +49,8 @@ config GAMEPORT_EMU10K1
To compile this driver as a module, choose M here: the
module will be called emu10k1-gp.
config GAMEPORT_VORTEX
tristate "Aureal Vortex, Vortex 2 gameport support"
depends on PCI
help
Say Y here if you have an Aureal Vortex 1 or 2 card and want
to use its gameport.
To compile this driver as a module, choose M here: the
module will be called vortex.
config GAMEPORT_FM801
tristate "ForteMedia FM801 gameport support"
depends on PCI
config GAMEPORT_CS461X
tristate "Crystal SoundFusion gameport support"
depends on PCI
endif

View File

@ -5,9 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_GAMEPORT) += gameport.o
obj-$(CONFIG_GAMEPORT_CS461X) += cs461x.o
obj-$(CONFIG_GAMEPORT_EMU10K1) += emu10k1-gp.o
obj-$(CONFIG_GAMEPORT_FM801) += fm801-gp.o
obj-$(CONFIG_GAMEPORT_L4) += lightning.o
obj-$(CONFIG_GAMEPORT_NS558) += ns558.o
obj-$(CONFIG_GAMEPORT_VORTEX) += vortex.o

View File

@ -1,322 +0,0 @@
/*
The all defines and part of code (such as cs461x_*) are
contributed from ALSA 0.5.8 sources.
See http://www.alsa-project.org/ for sources
Tested on Linux 686 2.4.0-test9, ALSA 0.5.8a and CS4610
*/
#include <asm/io.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/slab.h>
#include <linux/pci.h>
MODULE_AUTHOR("Victor Krapivin");
MODULE_LICENSE("GPL");
/*
These options are experimental
#define CS461X_FULL_MAP
*/
#ifndef PCI_VENDOR_ID_CIRRUS
#define PCI_VENDOR_ID_CIRRUS 0x1013
#endif
#ifndef PCI_DEVICE_ID_CIRRUS_4610
#define PCI_DEVICE_ID_CIRRUS_4610 0x6001
#endif
#ifndef PCI_DEVICE_ID_CIRRUS_4612
#define PCI_DEVICE_ID_CIRRUS_4612 0x6003
#endif
#ifndef PCI_DEVICE_ID_CIRRUS_4615
#define PCI_DEVICE_ID_CIRRUS_4615 0x6004
#endif
/* Registers */
#define BA0_JSPT 0x00000480
#define BA0_JSCTL 0x00000484
#define BA0_JSC1 0x00000488
#define BA0_JSC2 0x0000048C
#define BA0_JSIO 0x000004A0
/* Bits for JSPT */
#define JSPT_CAX 0x00000001
#define JSPT_CAY 0x00000002
#define JSPT_CBX 0x00000004
#define JSPT_CBY 0x00000008
#define JSPT_BA1 0x00000010
#define JSPT_BA2 0x00000020
#define JSPT_BB1 0x00000040
#define JSPT_BB2 0x00000080
/* Bits for JSCTL */
#define JSCTL_SP_MASK 0x00000003
#define JSCTL_SP_SLOW 0x00000000
#define JSCTL_SP_MEDIUM_SLOW 0x00000001
#define JSCTL_SP_MEDIUM_FAST 0x00000002
#define JSCTL_SP_FAST 0x00000003
#define JSCTL_ARE 0x00000004
/* Data register pairs masks */
#define JSC1_Y1V_MASK 0x0000FFFF
#define JSC1_X1V_MASK 0xFFFF0000
#define JSC1_Y1V_SHIFT 0
#define JSC1_X1V_SHIFT 16
#define JSC2_Y2V_MASK 0x0000FFFF
#define JSC2_X2V_MASK 0xFFFF0000
#define JSC2_Y2V_SHIFT 0
#define JSC2_X2V_SHIFT 16
/* JS GPIO */
#define JSIO_DAX 0x00000001
#define JSIO_DAY 0x00000002
#define JSIO_DBX 0x00000004
#define JSIO_DBY 0x00000008
#define JSIO_AXOE 0x00000010
#define JSIO_AYOE 0x00000020
#define JSIO_BXOE 0x00000040
#define JSIO_BYOE 0x00000080
/*
The card initialization code is obfuscated; the module cs461x
need to be loaded after ALSA modules initialized and something
played on the CS 4610 chip (see sources for details of CS4610
initialization code from ALSA)
*/
/* Card specific definitions */
#define CS461X_BA0_SIZE 0x2000
#define CS461X_BA1_DATA0_SIZE 0x3000
#define CS461X_BA1_DATA1_SIZE 0x3800
#define CS461X_BA1_PRG_SIZE 0x7000
#define CS461X_BA1_REG_SIZE 0x0100
#define BA1_SP_DMEM0 0x00000000
#define BA1_SP_DMEM1 0x00010000
#define BA1_SP_PMEM 0x00020000
#define BA1_SP_REG 0x00030000
#define BA1_DWORD_SIZE (13 * 1024 + 512)
#define BA1_MEMORY_COUNT 3
/*
Only one CS461x card is still suppoted; the code requires
redesign to avoid this limitatuion.
*/
static unsigned long ba0_addr;
static unsigned int __iomem *ba0;
#ifdef CS461X_FULL_MAP
static unsigned long ba1_addr;
static union ba1_t {
struct {
unsigned int __iomem *data0;
unsigned int __iomem *data1;
unsigned int __iomem *pmem;
unsigned int __iomem *reg;
} name;
unsigned int __iomem *idx[4];
} ba1;
static void cs461x_poke(unsigned long reg, unsigned int val)
{
writel(val, &ba1.idx[(reg >> 16) & 3][(reg >> 2) & 0x3fff]);
}
static unsigned int cs461x_peek(unsigned long reg)
{
return readl(&ba1.idx[(reg >> 16) & 3][(reg >> 2) & 0x3fff]);
}
#endif
static void cs461x_pokeBA0(unsigned long reg, unsigned int val)
{
writel(val, &ba0[reg >> 2]);
}
static unsigned int cs461x_peekBA0(unsigned long reg)
{
return readl(&ba0[reg >> 2]);
}
static int cs461x_free(struct pci_dev *pdev)
{
struct gameport *port = pci_get_drvdata(pdev);
if (port)
gameport_unregister_port(port);
if (ba0) iounmap(ba0);
#ifdef CS461X_FULL_MAP
if (ba1.name.data0) iounmap(ba1.name.data0);
if (ba1.name.data1) iounmap(ba1.name.data1);
if (ba1.name.pmem) iounmap(ba1.name.pmem);
if (ba1.name.reg) iounmap(ba1.name.reg);
#endif
return 0;
}
static void cs461x_gameport_trigger(struct gameport *gameport)
{
cs461x_pokeBA0(BA0_JSPT, 0xFF); //outb(gameport->io, 0xFF);
}
static unsigned char cs461x_gameport_read(struct gameport *gameport)
{
return cs461x_peekBA0(BA0_JSPT); //inb(gameport->io);
}
static int cs461x_gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
{
unsigned js1, js2, jst;
js1 = cs461x_peekBA0(BA0_JSC1);
js2 = cs461x_peekBA0(BA0_JSC2);
jst = cs461x_peekBA0(BA0_JSPT);
*buttons = (~jst >> 4) & 0x0F;
axes[0] = ((js1 & JSC1_Y1V_MASK) >> JSC1_Y1V_SHIFT) & 0xFFFF;
axes[1] = ((js1 & JSC1_X1V_MASK) >> JSC1_X1V_SHIFT) & 0xFFFF;
axes[2] = ((js2 & JSC2_Y2V_MASK) >> JSC2_Y2V_SHIFT) & 0xFFFF;
axes[3] = ((js2 & JSC2_X2V_MASK) >> JSC2_X2V_SHIFT) & 0xFFFF;
for(jst=0;jst<4;++jst)
if(axes[jst]==0xFFFF) axes[jst] = -1;
return 0;
}
static int cs461x_gameport_open(struct gameport *gameport, int mode)
{
switch (mode) {
case GAMEPORT_MODE_COOKED:
case GAMEPORT_MODE_RAW:
return 0;
default:
return -1;
}
return 0;
}
static struct pci_device_id cs461x_pci_tbl[] = {
{ PCI_VENDOR_ID_CIRRUS, 0x6001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Cirrus CS4610 */
{ PCI_VENDOR_ID_CIRRUS, 0x6003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Cirrus CS4612 */
{ PCI_VENDOR_ID_CIRRUS, 0x6005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Cirrus CS4615 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cs461x_pci_tbl);
static int __devinit cs461x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct gameport* port;
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR "cs461x: Cannot enable PCI gameport (bus %d, devfn %d) error=%d\n",
pdev->bus->number, pdev->devfn, rc);
return rc;
}
ba0_addr = pci_resource_start(pdev, 0);
#ifdef CS461X_FULL_MAP
ba1_addr = pci_resource_start(pdev, 1);
#endif
if (ba0_addr == 0 || ba0_addr == ~0
#ifdef CS461X_FULL_MAP
|| ba1_addr == 0 || ba1_addr == ~0
#endif
) {
printk(KERN_ERR "cs461x: wrong address - ba0 = 0x%lx\n", ba0_addr);
#ifdef CS461X_FULL_MAP
printk(KERN_ERR "cs461x: wrong address - ba1 = 0x%lx\n", ba1_addr);
#endif
cs461x_free(pdev);
return -ENOMEM;
}
ba0 = ioremap(ba0_addr, CS461X_BA0_SIZE);
#ifdef CS461X_FULL_MAP
ba1.name.data0 = ioremap(ba1_addr + BA1_SP_DMEM0, CS461X_BA1_DATA0_SIZE);
ba1.name.data1 = ioremap(ba1_addr + BA1_SP_DMEM1, CS461X_BA1_DATA1_SIZE);
ba1.name.pmem = ioremap(ba1_addr + BA1_SP_PMEM, CS461X_BA1_PRG_SIZE);
ba1.name.reg = ioremap(ba1_addr + BA1_SP_REG, CS461X_BA1_REG_SIZE);
if (ba0 == NULL || ba1.name.data0 == NULL ||
ba1.name.data1 == NULL || ba1.name.pmem == NULL ||
ba1.name.reg == NULL) {
cs461x_free(pdev);
return -ENOMEM;
}
#else
if (ba0 == NULL) {
cs461x_free(pdev);
return -ENOMEM;
}
#endif
if (!(port = gameport_allocate_port())) {
printk(KERN_ERR "cs461x: Memory allocation failed\n");
cs461x_free(pdev);
return -ENOMEM;
}
pci_set_drvdata(pdev, port);
port->open = cs461x_gameport_open;
port->trigger = cs461x_gameport_trigger;
port->read = cs461x_gameport_read;
port->cooked_read = cs461x_gameport_cooked_read;
gameport_set_name(port, "CS416x");
gameport_set_phys(port, "pci%s/gameport0", pci_name(pdev));
port->dev.parent = &pdev->dev;
cs461x_pokeBA0(BA0_JSIO, 0xFF); // ?
cs461x_pokeBA0(BA0_JSCTL, JSCTL_SP_MEDIUM_SLOW);
gameport_register_port(port);
return 0;
}
static void __devexit cs461x_pci_remove(struct pci_dev *pdev)
{
cs461x_free(pdev);
}
static struct pci_driver cs461x_pci_driver = {
.name = "CS461x_gameport",
.id_table = cs461x_pci_tbl,
.probe = cs461x_pci_probe,
.remove = __devexit_p(cs461x_pci_remove),
};
static int __init cs461x_init(void)
{
return pci_register_driver(&cs461x_pci_driver);
}
static void __exit cs461x_exit(void)
{
pci_unregister_driver(&cs461x_pci_driver);
}
module_init(cs461x_init);
module_exit(cs461x_exit);

View File

@ -258,18 +258,18 @@ static int __init ns558_init(void)
{
int i = 0;
if (pnp_register_driver(&ns558_pnp_driver) >= 0)
pnp_registered = 1;
/*
* Probe ISA ports first so that PnP gets to choose free port addresses
* not occupied by the ISA ports.
* Probe ISA ports after PnP, so that PnP ports that are already
* enabled get detected as PnP. This may be suboptimal in multi-device
* configurations, but saves hassle with simple setups.
*/
while (ns558_isa_portlist[i])
ns558_isa_probe(ns558_isa_portlist[i++]);
if (pnp_register_driver(&ns558_pnp_driver) >= 0)
pnp_registered = 1;
return (list_empty(&ns558_list) && !pnp_registered) ? -ENODEV : 0;
}

View File

@ -1,186 +0,0 @@
/*
* $Id: vortex.c,v 1.5 2002/07/01 15:39:30 vojtech Exp $
*
* Copyright (c) 2000-2001 Vojtech Pavlik
*
* Based on the work of:
* Raymond Ingles
*/
/*
* Trident 4DWave and Aureal Vortex gameport driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/gameport.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Aureal Vortex and Vortex2 gameport driver");
MODULE_LICENSE("GPL");
#define VORTEX_GCR 0x0c /* Gameport control register */
#define VORTEX_LEG 0x08 /* Legacy port location */
#define VORTEX_AXD 0x10 /* Axes start */
#define VORTEX_DATA_WAIT 20 /* 20 ms */
struct vortex {
struct gameport *gameport;
struct pci_dev *dev;
unsigned char __iomem *base;
unsigned char __iomem *io;
};
static unsigned char vortex_read(struct gameport *gameport)
{
struct vortex *vortex = gameport->port_data;
return readb(vortex->io + VORTEX_LEG);
}
static void vortex_trigger(struct gameport *gameport)
{
struct vortex *vortex = gameport->port_data;
writeb(0xff, vortex->io + VORTEX_LEG);
}
static int vortex_cooked_read(struct gameport *gameport, int *axes, int *buttons)
{
struct vortex *vortex = gameport->port_data;
int i;
*buttons = (~readb(vortex->base + VORTEX_LEG) >> 4) & 0xf;
for (i = 0; i < 4; i++) {
axes[i] = readw(vortex->io + VORTEX_AXD + i * sizeof(u32));
if (axes[i] == 0x1fff) axes[i] = -1;
}
return 0;
}
static int vortex_open(struct gameport *gameport, int mode)
{
struct vortex *vortex = gameport->port_data;
switch (mode) {
case GAMEPORT_MODE_COOKED:
writeb(0x40, vortex->io + VORTEX_GCR);
msleep(VORTEX_DATA_WAIT);
return 0;
case GAMEPORT_MODE_RAW:
writeb(0x00, vortex->io + VORTEX_GCR);
return 0;
default:
return -1;
}
return 0;
}
static int __devinit vortex_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vortex *vortex;
struct gameport *port;
int i;
vortex = kcalloc(1, sizeof(struct vortex), GFP_KERNEL);
port = gameport_allocate_port();
if (!vortex || !port) {
printk(KERN_ERR "vortex: Memory allocation failed.\n");
kfree(vortex);
gameport_free_port(port);
return -ENOMEM;
}
for (i = 0; i < 6; i++)
if (~pci_resource_flags(dev, i) & IORESOURCE_IO)
break;
pci_enable_device(dev);
vortex->dev = dev;
vortex->gameport = port;
vortex->base = ioremap(pci_resource_start(vortex->dev, i),
pci_resource_len(vortex->dev, i));
vortex->io = vortex->base + id->driver_data;
pci_set_drvdata(dev, vortex);
port->port_data = vortex;
port->fuzz = 64;
gameport_set_name(port, "AU88x0");
gameport_set_phys(port, "pci%s/gameport0", pci_name(dev));
port->dev.parent = &dev->dev;
port->read = vortex_read;
port->trigger = vortex_trigger;
port->cooked_read = vortex_cooked_read;
port->open = vortex_open;
gameport_register_port(port);
return 0;
}
static void __devexit vortex_remove(struct pci_dev *dev)
{
struct vortex *vortex = pci_get_drvdata(dev);
gameport_unregister_port(vortex->gameport);
iounmap(vortex->base);
kfree(vortex);
}
static struct pci_device_id vortex_id_table[] = {
{ 0x12eb, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x11000 },
{ 0x12eb, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x28800 },
{ 0 }
};
static struct pci_driver vortex_driver = {
.name = "vortex_gameport",
.id_table = vortex_id_table,
.probe = vortex_probe,
.remove = __devexit_p(vortex_remove),
};
static int __init vortex_init(void)
{
return pci_register_driver(&vortex_driver);
}
static void __exit vortex_exit(void)
{
pci_unregister_driver(&vortex_driver);
}
module_init(vortex_init);
module_exit(vortex_exit);

View File

@ -219,10 +219,24 @@ void input_release_device(struct input_handle *handle)
int input_open_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
int err;
err = down_interruptible(&dev->sem);
if (err)
return err;
handle->open++;
if (handle->dev->open)
return handle->dev->open(handle->dev);
return 0;
if (!dev->users++ && dev->open)
err = dev->open(dev);
if (err)
handle->open--;
up(&dev->sem);
return err;
}
int input_flush_device(struct input_handle* handle, struct file* file)
@ -235,10 +249,17 @@ int input_flush_device(struct input_handle* handle, struct file* file)
void input_close_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
input_release_device(handle);
if (handle->dev->close)
handle->dev->close(handle->dev);
down(&dev->sem);
if (!--dev->users && dev->close)
dev->close(dev);
handle->open--;
up(&dev->sem);
}
static void input_link_handle(struct input_handle *handle)
@ -415,6 +436,8 @@ void input_register_device(struct input_dev *dev)
set_bit(EV_SYN, dev->evbit);
init_MUTEX(&dev->sem);
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
@ -674,6 +697,8 @@ static int input_handlers_read(char *buf, char **start, off_t pos, int count, in
return (count > cnt) ? cnt : count;
}
static struct file_operations input_fileops;
static int __init input_proc_init(void)
{
struct proc_dir_entry *entry;
@ -688,6 +713,8 @@ static int __init input_proc_init(void)
return -ENOMEM;
}
entry->owner = THIS_MODULE;
input_fileops = *entry->proc_fops;
entry->proc_fops = &input_fileops;
entry->proc_fops->poll = input_devices_poll;
entry = create_proc_read_entry("handlers", 0, proc_bus_input_dir, input_handlers_read, NULL);
if (entry == NULL) {

View File

@ -285,48 +285,33 @@ static unsigned int joydev_poll(struct file *file, poll_table *wait)
(POLLIN | POLLRDNORM) : 0) | (list->joydev->exist ? 0 : (POLLHUP | POLLERR));
}
static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __user *argp)
{
struct joydev_list *list = file->private_data;
struct joydev *joydev = list->joydev;
struct input_dev *dev = joydev->handle.dev;
void __user *argp = (void __user *)arg;
int i, j;
if (!joydev->exist) return -ENODEV;
switch (cmd) {
case JS_SET_CAL:
return copy_from_user(&joydev->glue.JS_CORR, argp,
sizeof(struct JS_DATA_TYPE)) ? -EFAULT : 0;
sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
case JS_GET_CAL:
return copy_to_user(argp, &joydev->glue.JS_CORR,
sizeof(struct JS_DATA_TYPE)) ? -EFAULT : 0;
sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
case JS_SET_TIMEOUT:
return get_user(joydev->glue.JS_TIMEOUT, (int __user *) arg);
return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JS_GET_TIMEOUT:
return put_user(joydev->glue.JS_TIMEOUT, (int __user *) arg);
case JS_SET_TIMELIMIT:
return get_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
case JS_GET_TIMELIMIT:
return put_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
case JS_SET_ALL:
return copy_from_user(&joydev->glue, argp,
sizeof(struct JS_DATA_SAVE_TYPE)) ? -EFAULT : 0;
case JS_GET_ALL:
return copy_to_user(argp, &joydev->glue,
sizeof(struct JS_DATA_SAVE_TYPE)) ? -EFAULT : 0;
return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JSIOCGVERSION:
return put_user(JS_VERSION, (__u32 __user *) arg);
return put_user(JS_VERSION, (__u32 __user *) argp);
case JSIOCGAXES:
return put_user(joydev->nabs, (__u8 __user *) arg);
return put_user(joydev->nabs, (__u8 __user *) argp);
case JSIOCGBUTTONS:
return put_user(joydev->nkey, (__u8 __user *) arg);
return put_user(joydev->nkey, (__u8 __user *) argp);
case JSIOCSCORR:
if (copy_from_user(joydev->corr, argp,
sizeof(struct js_corr) * joydev->nabs))
sizeof(joydev->corr[0]) * joydev->nabs))
return -EFAULT;
for (i = 0; i < joydev->nabs; i++) {
j = joydev->abspam[i];
@ -335,7 +320,7 @@ static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
return 0;
case JSIOCGCORR:
return copy_to_user(argp, joydev->corr,
sizeof(struct js_corr) * joydev->nabs) ? -EFAULT : 0;
sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
case JSIOCSAXMAP:
if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1)))
return -EFAULT;
@ -371,6 +356,84 @@ static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
return -EINVAL;
}
#ifdef CONFIG_COMPAT
static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct joydev_list *list = file->private_data;
struct joydev *joydev = list->joydev;
void __user *argp = (void __user *)arg;
s32 tmp32;
struct JS_DATA_SAVE_TYPE_32 ds32;
int err;
if (!joydev->exist) return -ENODEV;
switch(cmd) {
case JS_SET_TIMELIMIT:
err = get_user(tmp32, (s32 __user *) arg);
if (err == 0)
joydev->glue.JS_TIMELIMIT = tmp32;
break;
case JS_GET_TIMELIMIT:
tmp32 = joydev->glue.JS_TIMELIMIT;
err = put_user(tmp32, (s32 __user *) arg);
break;
case JS_SET_ALL:
err = copy_from_user(&ds32, argp,
sizeof(ds32)) ? -EFAULT : 0;
if (err == 0) {
joydev->glue.JS_TIMEOUT = ds32.JS_TIMEOUT;
joydev->glue.BUSY = ds32.BUSY;
joydev->glue.JS_EXPIRETIME = ds32.JS_EXPIRETIME;
joydev->glue.JS_TIMELIMIT = ds32.JS_TIMELIMIT;
joydev->glue.JS_SAVE = ds32.JS_SAVE;
joydev->glue.JS_CORR = ds32.JS_CORR;
}
break;
case JS_GET_ALL:
ds32.JS_TIMEOUT = joydev->glue.JS_TIMEOUT;
ds32.BUSY = joydev->glue.BUSY;
ds32.JS_EXPIRETIME = joydev->glue.JS_EXPIRETIME;
ds32.JS_TIMELIMIT = joydev->glue.JS_TIMELIMIT;
ds32.JS_SAVE = joydev->glue.JS_SAVE;
ds32.JS_CORR = joydev->glue.JS_CORR;
err = copy_to_user(argp, &ds32,
sizeof(ds32)) ? -EFAULT : 0;
break;
default:
err = joydev_ioctl_common(joydev, cmd, argp);
}
return err;
}
#endif /* CONFIG_COMPAT */
static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct joydev_list *list = file->private_data;
struct joydev *joydev = list->joydev;
void __user *argp = (void __user *)arg;
if (!joydev->exist) return -ENODEV;
switch(cmd) {
case JS_SET_TIMELIMIT:
return get_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
case JS_GET_TIMELIMIT:
return put_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
case JS_SET_ALL:
return copy_from_user(&joydev->glue, argp,
sizeof(joydev->glue)) ? -EFAULT : 0;
case JS_GET_ALL:
return copy_to_user(argp, &joydev->glue,
sizeof(joydev->glue)) ? -EFAULT : 0;
default:
return joydev_ioctl_common(joydev, cmd, argp);
}
}
static struct file_operations joydev_fops = {
.owner = THIS_MODULE,
.read = joydev_read,
@ -379,6 +442,9 @@ static struct file_operations joydev_fops = {
.open = joydev_open,
.release = joydev_release,
.ioctl = joydev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = joydev_compat_ioctl,
#endif
.fasync = joydev_fasync,
};

View File

@ -185,7 +185,7 @@ static void a3d_poll(struct gameport *gameport)
a3d->reads++;
if (a3d_read_packet(a3d->gameport, a3d->length, data) != a3d->length ||
data[0] != a3d->mode || a3d_csum(data, a3d->length))
a3d->bads++;
a3d->bads++;
else
a3d_read(a3d, data);
}

View File

@ -82,7 +82,7 @@ static char adi_cm2_abs[] = { ABS_X, ABS_Y, ABS_Z, ABS_RX, ABS_RY, ABS_RZ };
static char adi_wmf_abs[] = { ABS_WHEEL, ABS_GAS, ABS_BRAKE, ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y, ABS_HAT2X, ABS_HAT2Y };
static short adi_wmgpe_key[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_TL, BTN_TR, BTN_START, BTN_MODE, BTN_SELECT };
static short adi_wmi_key[] = { BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_EXTRA };
static short adi_wmi_key[] = { BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_EXTRA };
static short adi_wmed3d_key[] = { BTN_TRIGGER, BTN_THUMB, BTN_THUMB2, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2 };
static short adi_cm2_key[] = { BTN_1, BTN_2, BTN_3, BTN_4, BTN_5, BTN_6, BTN_7, BTN_8 };
@ -183,7 +183,7 @@ static void adi_move_bits(struct adi_port *port, int length)
int i;
struct adi *adi = port->adi;
adi[0].idx = adi[1].idx = 0;
adi[0].idx = adi[1].idx = 0;
if (adi[0].ret <= 0 || adi[1].ret <= 0) return;
if (adi[0].data[0] & 0x20 || ~adi[1].data[0] & 0x20) return;

View File

@ -51,7 +51,8 @@ MODULE_PARM_DESC(map, "Map of attached joysticks in form of <a>,<b> (default is
__obsolete_setup("amijoy=");
static int amijoy_used[2] = { 0, 0 };
static int amijoy_used;
static DECLARE_MUTEX(amijoy_sem);
static struct input_dev amijoy_dev[2];
static char *amijoy_phys[2] = { "amijoy/input0", "amijoy/input1" };
@ -84,26 +85,30 @@ static irqreturn_t amijoy_interrupt(int irq, void *dummy, struct pt_regs *fp)
static int amijoy_open(struct input_dev *dev)
{
int *used = dev->private;
int err;
if ((*used)++)
return 0;
err = down_interruptible(&amijoy_sem);
if (err)
return err;
if (request_irq(IRQ_AMIGA_VERTB, amijoy_interrupt, 0, "amijoy", amijoy_interrupt)) {
(*used)--;
if (!amijoy_used && request_irq(IRQ_AMIGA_VERTB, amijoy_interrupt, 0, "amijoy", amijoy_interrupt)) {
printk(KERN_ERR "amijoy.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
return -EBUSY;
err = -EBUSY;
goto out;
}
return 0;
amijoy_used++;
out:
up(&amijoy_sem);
return err;
}
static void amijoy_close(struct input_dev *dev)
{
int *used = dev->private;
if (!--(*used))
down(&amijoysem);
if (!--amijoy_used)
free_irq(IRQ_AMIGA_VERTB, amijoy_interrupt);
up(&amijoy_sem);
}
static int __init amijoy_init(void)
@ -138,8 +143,6 @@ static int __init amijoy_init(void)
amijoy_dev[i].id.product = 0x0003;
amijoy_dev[i].id.version = 0x0100;
amijoy_dev[i].private = amijoy_used + i;
input_register_device(amijoy_dev + i);
printk(KERN_INFO "input: %s at joy%ddat\n", amijoy_name, i);
}

View File

@ -87,7 +87,7 @@ __obsolete_setup("db9_3=");
#define DB9_NORMAL 0x0a
#define DB9_NOSELECT 0x08
#define DB9_MAX_DEVICES 2
#define DB9_MAX_DEVICES 2
#define DB9_GENESIS6_DELAY 14
#define DB9_REFRESH_TIME HZ/100
@ -98,6 +98,7 @@ struct db9 {
struct pardevice *pd;
int mode;
int used;
struct semaphore sem;
char phys[2][32];
};
@ -503,6 +504,11 @@ static int db9_open(struct input_dev *dev)
{
struct db9 *db9 = dev->private;
struct parport *port = db9->pd->port;
int err;
err = down_interruptible(&db9->sem);
if (err)
return err;
if (!db9->used++) {
parport_claim(db9->pd);
@ -514,6 +520,7 @@ static int db9_open(struct input_dev *dev)
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
up(&db9->sem);
return 0;
}
@ -522,12 +529,14 @@ static void db9_close(struct input_dev *dev)
struct db9 *db9 = dev->private;
struct parport *port = db9->pd->port;
down(&db9->sem);
if (!--db9->used) {
del_timer(&db9->timer);
del_timer_sync(&db9->timer);
parport_write_control(port, 0x00);
parport_data_forward(port);
parport_release(db9->pd);
}
up(&db9->sem);
}
static struct db9 __init *db9_probe(int *config, int nargs)
@ -563,12 +572,12 @@ static struct db9 __init *db9_probe(int *config, int nargs)
}
}
if (!(db9 = kmalloc(sizeof(struct db9), GFP_KERNEL))) {
if (!(db9 = kcalloc(1, sizeof(struct db9), GFP_KERNEL))) {
parport_put_port(pp);
return NULL;
}
memset(db9, 0, sizeof(struct db9));
init_MUTEX(&db9->sem);
db9->mode = config[1];
init_timer(&db9->timer);
db9->timer.data = (long) db9;

View File

@ -1,12 +1,12 @@
/*
* NES, SNES, N64, MultiSystem, PSX gamepad driver for Linux
*
* Copyright (c) 1999-2004 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2004 Peter Nelson <rufus-kernel@hackish.org>
* Copyright (c) 1999-2004 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2004 Peter Nelson <rufus-kernel@hackish.org>
*
* Based on the work of:
* Andree Borrmann John Dahlstrom
* David Kuder Nathan Hand
* Andree Borrmann John Dahlstrom
* David Kuder Nathan Hand
*/
/*
@ -81,6 +81,7 @@ struct gc {
struct timer_list timer;
unsigned char pads[GC_MAX + 1];
int used;
struct semaphore sem;
char phys[5][32];
};
@ -433,7 +434,7 @@ static void gc_timer(unsigned long private)
gc_psx_read_packet(gc, data_psx, data);
for (i = 0; i < 5; i++) {
switch (data[i]) {
switch (data[i]) {
case GC_PSX_RUMBLE:
@ -503,22 +504,33 @@ static void gc_timer(unsigned long private)
static int gc_open(struct input_dev *dev)
{
struct gc *gc = dev->private;
int err;
err = down_interruptible(&gc->sem);
if (err)
return err;
if (!gc->used++) {
parport_claim(gc->pd);
parport_write_control(gc->pd->port, 0x04);
mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME);
}
up(&gc->sem);
return 0;
}
static void gc_close(struct input_dev *dev)
{
struct gc *gc = dev->private;
down(&gc->sem);
if (!--gc->used) {
del_timer(&gc->timer);
del_timer_sync(&gc->timer);
parport_write_control(gc->pd->port, 0x00);
parport_release(gc->pd);
}
up(&gc->sem);
}
static struct gc __init *gc_probe(int *config, int nargs)
@ -542,11 +554,12 @@ static struct gc __init *gc_probe(int *config, int nargs)
return NULL;
}
if (!(gc = kmalloc(sizeof(struct gc), GFP_KERNEL))) {
if (!(gc = kcalloc(1, sizeof(struct gc), GFP_KERNEL))) {
parport_put_port(pp);
return NULL;
}
memset(gc, 0, sizeof(struct gc));
init_MUTEX(&gc->sem);
gc->pd = parport_register_device(pp, "gamecon", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);

View File

@ -329,7 +329,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
for (i = 0; i < gf2k_axes[gf2k->id]; i++) {
gf2k->dev.absmax[gf2k_abs[i]] = (i < 2) ? gf2k->dev.abs[gf2k_abs[i]] * 2 - 32 :
gf2k->dev.abs[gf2k_abs[0]] + gf2k->dev.abs[gf2k_abs[1]] - 32;
gf2k->dev.abs[gf2k_abs[0]] + gf2k->dev.abs[gf2k_abs[1]] - 32;
gf2k->dev.absmin[gf2k_abs[i]] = 32;
gf2k->dev.absfuzz[gf2k_abs[i]] = 8;
gf2k->dev.absflat[gf2k_abs[i]] = (i < 2) ? 24 : 0;

View File

@ -171,7 +171,7 @@ static int mp_io(struct gameport* gameport, int sendflags, int sendcode, u32 *pa
*packet = 0;
raw_data = gameport_read(gameport);
if (raw_data & 1)
return IO_RETRY;
return IO_RETRY;
for (i = 0; i < 64; i++) {
raw_data = gameport_read(gameport);

View File

@ -78,6 +78,7 @@ static struct iforce_device iforce_device[] = {
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
{ 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce }
};

View File

@ -229,6 +229,7 @@ static struct usb_device_id iforce_usb_ids [] = {
{ USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
{ USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
{ USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
{ } /* Terminating entry */
};

View File

@ -4,8 +4,8 @@
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* Based on the work of:
* David Thompson
* Joseph Krahn
* David Thompson
* Joseph Krahn
*/
/*

View File

@ -4,7 +4,7 @@
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* Based on the work of:
* David Thompson
* David Thompson
*/
/*

View File

@ -79,7 +79,7 @@ static short tmdc_btn_pad[TMDC_BTN] =
{ BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_START, BTN_SELECT, BTN_TL, BTN_TR };
static short tmdc_btn_joy[TMDC_BTN] =
{ BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_THUMB2, BTN_PINKIE,
BTN_BASE3, BTN_BASE4, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z };
BTN_BASE3, BTN_BASE4, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z };
static short tmdc_btn_fm[TMDC_BTN] =
{ BTN_TRIGGER, BTN_C, BTN_B, BTN_A, BTN_THUMB, BTN_X, BTN_Y, BTN_Z, BTN_TOP, BTN_TOP2 };
static short tmdc_btn_at[TMDC_BTN] =

View File

@ -84,6 +84,7 @@ static struct tgfx {
char phys[7][32];
int sticks;
int used;
struct semaphore sem;
} *tgfx_base[3];
/*
@ -99,7 +100,7 @@ static void tgfx_timer(unsigned long private)
for (i = 0; i < 7; i++)
if (tgfx->sticks & (1 << i)) {
dev = tgfx->dev + i;
dev = tgfx->dev + i;
parport_write_data(tgfx->pd->port, ~(1 << i));
data1 = parport_read_status(tgfx->pd->port) ^ 0x7f;
@ -122,23 +123,34 @@ static void tgfx_timer(unsigned long private)
static int tgfx_open(struct input_dev *dev)
{
struct tgfx *tgfx = dev->private;
if (!tgfx->used++) {
struct tgfx *tgfx = dev->private;
int err;
err = down_interruptible(&tgfx->sem);
if (err)
return err;
if (!tgfx->used++) {
parport_claim(tgfx->pd);
parport_write_control(tgfx->pd->port, 0x04);
mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME);
mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME);
}
return 0;
up(&tgfx->sem);
return 0;
}
static void tgfx_close(struct input_dev *dev)
{
struct tgfx *tgfx = dev->private;
if (!--tgfx->used) {
del_timer(&tgfx->timer);
struct tgfx *tgfx = dev->private;
down(&tgfx->sem);
if (!--tgfx->used) {
del_timer_sync(&tgfx->timer);
parport_write_control(tgfx->pd->port, 0x00);
parport_release(tgfx->pd);
parport_release(tgfx->pd);
}
up(&tgfx->sem);
}
/*
@ -166,11 +178,12 @@ static struct tgfx __init *tgfx_probe(int *config, int nargs)
return NULL;
}
if (!(tgfx = kmalloc(sizeof(struct tgfx), GFP_KERNEL))) {
if (!(tgfx = kcalloc(1, sizeof(struct tgfx), GFP_KERNEL))) {
parport_put_port(pp);
return NULL;
}
memset(tgfx, 0, sizeof(struct tgfx));
init_MUTEX(&tgfx->sem);
tgfx->pd = parport_register_device(pp, "turbografx", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);

View File

@ -227,7 +227,7 @@ static ssize_t atkbd_do_set_##_name(struct device *d, struct device_attribute *a
{ \
return atkbd_attr_set_helper(d, b, s, atkbd_set_##_name); \
} \
static struct device_attribute atkbd_attr_##_name = \
static struct device_attribute atkbd_attr_##_name = \
__ATTR(_name, S_IWUSR | S_IRUGO, atkbd_do_show_##_name, atkbd_do_set_##_name);
ATKBD_DEFINE_ATTR(extra);
@ -388,7 +388,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
value = atkbd->release ? 0 :
(1 + (!atkbd->softrepeat && test_bit(atkbd->keycode[code], atkbd->dev.key)));
switch (value) { /* Workaround Toshiba laptop multiple keypress */
switch (value) { /* Workaround Toshiba laptop multiple keypress */
case 0:
atkbd->last = 0;
break;
@ -894,7 +894,7 @@ static int atkbd_reconnect(struct serio *serio)
if (atkbd->write) {
param[0] = (test_bit(LED_SCROLLL, atkbd->dev.led) ? 1 : 0)
| (test_bit(LED_NUML, atkbd->dev.led) ? 2 : 0)
| (test_bit(LED_CAPSL, atkbd->dev.led) ? 4 : 0);
| (test_bit(LED_CAPSL, atkbd->dev.led) ? 4 : 0);
if (atkbd_probe(atkbd))
return -1;

View File

@ -39,6 +39,7 @@
#define CORGI_KEY_CALENDER KEY_F1
#define CORGI_KEY_ADDRESS KEY_F2
#define CORGI_KEY_FN KEY_F3
#define CORGI_KEY_CANCEL KEY_F4
#define CORGI_KEY_OFF KEY_SUSPEND
#define CORGI_KEY_EXOK KEY_F5
#define CORGI_KEY_EXCANCEL KEY_F6
@ -46,6 +47,7 @@
#define CORGI_KEY_EXJOGUP KEY_F8
#define CORGI_KEY_JAP1 KEY_LEFTCTRL
#define CORGI_KEY_JAP2 KEY_LEFTALT
#define CORGI_KEY_MAIL KEY_F10
#define CORGI_KEY_OK KEY_F11
#define CORGI_KEY_MENU KEY_F12
#define CORGI_HINGE_0 KEY_KP0
@ -59,8 +61,8 @@ static unsigned char corgikbd_keycode[NR_SCANCODES] = {
KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
CORGI_HINGE_0, CORGI_HINGE_1, CORGI_HINGE_2 /* 125-127 */
};

View File

@ -15,10 +15,10 @@
* information given below, I will _not_ be liable!
*
* RJ10 pinout: To DE9: Or DB25:
* 1 - RxD <----> Pin 3 (TxD) <-> Pin 2 (TxD)
* 2 - GND <----> Pin 5 (GND) <-> Pin 7 (GND)
* 4 - TxD <----> Pin 2 (RxD) <-> Pin 3 (RxD)
* 3 - +12V (from HDD drive connector), DON'T connect to DE9 or DB25!!!
* 1 - RxD <----> Pin 3 (TxD) <-> Pin 2 (TxD)
* 2 - GND <----> Pin 5 (GND) <-> Pin 7 (GND)
* 4 - TxD <----> Pin 2 (RxD) <-> Pin 3 (RxD)
* 3 - +12V (from HDD drive connector), DON'T connect to DE9 or DB25!!!
*
* Pin numbers for DE9 and DB25 are noted on the plug (quite small:). For
* RJ10, it's like this:

View File

@ -42,7 +42,7 @@ MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
MODULE_DESCRIPTION("LoCoMo keyboard driver");
MODULE_LICENSE("GPL");
#define LOCOMOKBD_NUMKEYS 128
#define LOCOMOKBD_NUMKEYS 128
#define KEY_ACTIVITY KEY_F16
#define KEY_CONTACT KEY_F18
@ -61,7 +61,7 @@ static unsigned char locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
KEY_G, KEY_F, KEY_X, KEY_S, 0, 0, 0, 0, 0, 0, /* 90 - 99 */
0, 0, KEY_DOT, 0, KEY_COMMA, KEY_N, KEY_B, KEY_C, KEY_Z, KEY_A, /* 100 - 109 */
KEY_LEFTSHIFT, KEY_TAB, KEY_LEFTCTRL, 0, 0, 0, 0, 0, 0, 0, /* 110 - 119 */
KEY_M, KEY_SPACE, KEY_V, KEY_APOSTROPHE, KEY_SLASH, 0, 0, 0 /* 120 - 128 */
KEY_M, KEY_SPACE, KEY_V, KEY_APOSTROPHE, KEY_SLASH, 0, 0, 0 /* 120 - 128 */
};
#define KB_ROWS 16
@ -82,7 +82,7 @@ struct locomokbd {
struct locomo_dev *ldev;
unsigned long base;
spinlock_t lock;
struct timer_list timer;
};
@ -95,7 +95,7 @@ static inline void locomokbd_charge_all(unsigned long membase)
static inline void locomokbd_activate_all(unsigned long membase)
{
unsigned long r;
locomo_writel(0, membase + LOCOMO_KSC);
r = locomo_readl(membase + LOCOMO_KIC);
r &= 0xFEFF;
@ -127,7 +127,7 @@ static inline void locomokbd_reset_col(unsigned long membase, int col)
*/
/* Scan the hardware keyboard and push any changes up through the input layer */
static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *regs)
static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *regs)
{
unsigned int row, col, rowd, scancode;
unsigned long flags;
@ -138,7 +138,7 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *
if (regs)
input_regs(&locomokbd->input, regs);
locomokbd_charge_all(membase);
num_pressed = 0;
@ -146,9 +146,9 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *
locomokbd_activate_col(membase, col);
udelay(KB_DELAY);
rowd = ~locomo_readl(membase + LOCOMO_KIB);
for (row = 0; row < KB_ROWS; row++ ) {
for (row = 0; row < KB_ROWS; row++) {
scancode = SCANCODE(col, row);
if (rowd & KB_ROWMASK(row)) {
num_pressed += 1;
@ -170,7 +170,7 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *
spin_unlock_irqrestore(&locomokbd->lock, flags);
}
/*
/*
* LoCoMo keyboard interrupt handler.
*/
static irqreturn_t locomokbd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@ -205,8 +205,8 @@ static int locomokbd_probe(struct locomo_dev *dev)
memset(locomokbd, 0, sizeof(struct locomokbd));
/* try and claim memory region */
if (!request_mem_region((unsigned long) dev->mapbase,
dev->length,
if (!request_mem_region((unsigned long) dev->mapbase,
dev->length,
LOCOMO_DRIVER_NAME(dev))) {
ret = -EBUSY;
printk(KERN_ERR "locomokbd: Can't acquire access to io memory for keyboard\n");
@ -225,7 +225,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
locomokbd->timer.data = (unsigned long) locomokbd;
locomokbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
init_input_dev(&locomokbd->input);
locomokbd->input.keycode = locomokbd->keycode;
locomokbd->input.keycodesize = sizeof(unsigned char);
@ -271,11 +271,11 @@ free:
static int locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
free_irq(dev->irq[0], locomokbd);
del_timer_sync(&locomokbd->timer);
input_unregister_device(&locomokbd->input);
locomo_set_drvdata(dev, NULL);

View File

@ -1,6 +1,6 @@
/*
* $Id: maple_keyb.c,v 1.4 2004/03/22 01:18:15 lethal Exp $
* SEGA Dreamcast keyboard driver
* SEGA Dreamcast keyboard driver
* Based on drivers/usb/usbkbd.c
*/
@ -40,7 +40,6 @@ struct dc_kbd {
struct input_dev dev;
unsigned char new[8];
unsigned char old[8];
int open;
};
@ -95,22 +94,6 @@ static void dc_kbd_callback(struct mapleq *mq)
}
}
static int dc_kbd_open(struct input_dev *dev)
{
struct dc_kbd *kbd = dev->private;
kbd->open++;
return 0;
}
static void dc_kbd_close(struct input_dev *dev)
{
struct dc_kbd *kbd = dev->private;
kbd->open--;
}
static int dc_kbd_connect(struct maple_device *dev)
{
int i;
@ -133,9 +116,6 @@ static int dc_kbd_connect(struct maple_device *dev)
clear_bit(0, kbd->dev.keybit);
kbd->dev.private = kbd;
kbd->dev.open = dc_kbd_open;
kbd->dev.close = dc_kbd_close;
kbd->dev.event = NULL;
kbd->dev.name = dev->product_name;
kbd->dev.id.bustype = BUS_MAPLE;

View File

@ -298,9 +298,11 @@ static int uinput_alloc_device(struct file *file, const char __user *buffer, siz
/* check if absmin/absmax/absfuzz/absflat are filled as
* told in Documentation/input/input-programming.txt */
if (test_bit(EV_ABS, dev->evbit)) {
retval = uinput_validate_absbits(dev);
if (retval < 0)
int err = uinput_validate_absbits(dev);
if (err < 0) {
retval = err;
kfree(dev->name);
}
}
exit:

View File

@ -15,4 +15,4 @@ obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o
obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o
obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
psmouse-objs := psmouse-base.o alps.o logips2pp.o synaptics.o
psmouse-objs := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o

View File

@ -30,10 +30,11 @@
#define ALPS_DUALPOINT 0x01
#define ALPS_WHEEL 0x02
#define ALPS_FW_BK 0x04
#define ALPS_FW_BK_1 0x04
#define ALPS_4BTN 0x08
#define ALPS_OLDPROTO 0x10
#define ALPS_PASS 0x20
#define ALPS_FW_BK_2 0x40
static struct alps_model_info alps_model_data[] = {
{ { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */
@ -43,11 +44,11 @@ static struct alps_model_info alps_model_data[] = {
{ { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
{ { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, 0 },
{ { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
{ { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK }, /* NEC Versa L320 */
{ { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
{ { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
{ { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS }, /* Dell Latitude D800 */
{ { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
{ { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
{ { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
{ { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
{ { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
{ { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
@ -61,11 +62,11 @@ static struct alps_model_info alps_model_data[] = {
/*
* ALPS abolute Mode - new format
*
* byte 0: 1 ? ? ? 1 ? ? ?
*
* byte 0: 1 ? ? ? 1 ? ? ?
* byte 1: 0 x6 x5 x4 x3 x2 x1 x0
* byte 2: 0 x10 x9 x8 x7 ? fin ges
* byte 3: 0 y9 y8 y7 1 M R L
* byte 3: 0 y9 y8 y7 1 M R L
* byte 4: 0 y6 y5 y4 y3 y2 y1 y0
* byte 5: 0 z6 z5 z4 z3 z2 z1 z0
*
@ -81,11 +82,12 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
struct input_dev *dev = &psmouse->dev;
struct input_dev *dev2 = &priv->dev2;
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
input_regs(dev, regs);
if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
input_report_key(dev2, BTN_LEFT, packet[0] & 1);
input_report_key(dev2, BTN_LEFT, packet[0] & 1);
input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
input_report_rel(dev2, REL_X,
@ -112,6 +114,18 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
z = packet[5];
}
if (priv->i->flags & ALPS_FW_BK_1) {
back = packet[2] & 4;
forward = packet[0] & 0x10;
}
if (priv->i->flags & ALPS_FW_BK_2) {
back = packet[3] & 4;
forward = packet[2] & 4;
if ((middle = forward && back))
forward = back = 0;
}
ges = packet[2] & 1;
fin = packet[2] & 2;
@ -155,13 +169,12 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
input_report_abs(dev, ABS_PRESSURE, z);
input_report_key(dev, BTN_TOOL_FINGER, z > 0);
if (priv->i->flags & ALPS_WHEEL)
input_report_rel(dev, REL_WHEEL, ((packet[0] >> 4) & 0x07) | ((packet[2] >> 2) & 0x08));
if (priv->i->flags & ALPS_FW_BK) {
input_report_key(dev, BTN_FORWARD, packet[0] & 0x10);
input_report_key(dev, BTN_BACK, packet[2] & 0x04);
if (priv->i->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
input_report_key(dev, BTN_FORWARD, forward);
input_report_key(dev, BTN_BACK, back);
}
input_sync(dev);
@ -257,7 +270,6 @@ static struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *vers
static int alps_passthrough_mode(struct psmouse *psmouse, int enable)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[3];
int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
if (ps2_command(ps2dev, NULL, cmd) ||
@ -267,7 +279,7 @@ static int alps_passthrough_mode(struct psmouse *psmouse, int enable)
return -1;
/* we may get 3 more bytes, just ignore them */
ps2_command(ps2dev, param, 0x0300);
ps2_drain(ps2dev, 3, 100);
return 0;
}
@ -425,7 +437,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->dev.relbit[LONG(REL_WHEEL)] |= BIT(REL_WHEEL);
}
if (priv->i->flags & ALPS_FW_BK) {
if (priv->i->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
psmouse->dev.keybit[LONG(BTN_FORWARD)] |= BIT(BTN_FORWARD);
psmouse->dev.keybit[LONG(BTN_BACK)] |= BIT(BTN_BACK);
}
@ -436,8 +448,8 @@ int alps_init(struct psmouse *psmouse)
priv->dev2.id.bustype = BUS_I8042;
priv->dev2.id.vendor = 0x0002;
priv->dev2.id.product = PSMOUSE_ALPS;
priv->dev2.id.version = 0x0000;
priv->dev2.id.version = 0x0000;
priv->dev2.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
priv->dev2.relbit[LONG(REL_X)] |= BIT(REL_X) | BIT(REL_Y);
priv->dev2.keybit[LONG(BTN_LEFT)] |= BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
@ -461,17 +473,15 @@ init_fail:
int alps_detect(struct psmouse *psmouse, int set_properties)
{
int version;
struct alps_model_info *model;
struct alps_model_info *model;
if (!(model = alps_get_model(psmouse, &version)))
return -1;
if (set_properties) {
psmouse->vendor = "ALPS";
if (model->flags & ALPS_DUALPOINT)
psmouse->name = "DualPoint TouchPad";
else
psmouse->name = "GlidePoint";
psmouse->name = model->flags & ALPS_DUALPOINT ?
"DualPoint TouchPad" : "GlidePoint";
psmouse->model = version;
}
return 0;

View File

@ -33,7 +33,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga mouse driver");
MODULE_LICENSE("GPL");
static int amimouse_used = 0;
static int amimouse_lastx, amimouse_lasty;
static struct input_dev amimouse_dev;
@ -81,16 +80,12 @@ static int amimouse_open(struct input_dev *dev)
{
unsigned short joy0dat;
if (amimouse_used++)
return 0;
joy0dat = custom.joy0dat;
amimouse_lastx = joy0dat & 0xff;
amimouse_lasty = joy0dat >> 8;
if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) {
amimouse_used--;
printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
return -EBUSY;
}
@ -100,8 +95,7 @@ static int amimouse_open(struct input_dev *dev)
static void amimouse_close(struct input_dev *dev)
{
if (!--amimouse_used)
free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
}
static int __init amimouse_init(void)

View File

@ -17,18 +17,18 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@ -87,29 +87,23 @@ MODULE_PARM_DESC(irq, "IRQ number (5=default)");
__obsolete_setup("inport_irq=");
static int inport_used;
static irqreturn_t inport_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int inport_open(struct input_dev *dev)
{
if (!inport_used++) {
if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL))
return -EBUSY;
outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT);
}
if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL))
return -EBUSY;
outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT);
return 0;
}
static void inport_close(struct input_dev *dev)
{
if (!--inport_used) {
outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
outb(INPORT_MODE_BASE, INPORT_DATA_PORT);
free_irq(inport_irq, NULL);
}
outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
outb(INPORT_MODE_BASE, INPORT_DATA_PORT);
free_irq(inport_irq, NULL);
}
static struct input_dev inport_dev = {
@ -120,11 +114,11 @@ static struct input_dev inport_dev = {
.close = inport_close,
.name = INPORT_NAME,
.phys = "isa023c/input0",
.id = {
.bustype = BUS_ISA,
.vendor = INPORT_VENDOR,
.product = 0x0001,
.version = 0x0100,
.id = {
.bustype = BUS_ISA,
.vendor = INPORT_VENDOR,
.product = 0x0001,
.version = 0x0100,
},
};

View File

@ -0,0 +1,134 @@
/*
* Fujitsu B-series Lifebook PS/2 TouchScreen driver
*
* Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Kenan Esau <kenan.esau@conan.de>
*
* TouchScreen detection, absolute mode setting and packet layout is taken from
* Harald Hoyer's description of the device.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/libps2.h>
#include <linux/dmi.h>
#include "psmouse.h"
#include "lifebook.h"
static struct dmi_system_id lifebook_dmi_table[] = {
{
.ident = "Lifebook B",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK B Series"),
},
},
{ }
};
static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse, struct pt_regs *regs)
{
unsigned char *packet = psmouse->packet;
struct input_dev *dev = &psmouse->dev;
if (psmouse->pktcnt != 3)
return PSMOUSE_GOOD_DATA;
input_regs(dev, regs);
/* calculate X and Y */
if ((packet[0] & 0x08) == 0x00) {
input_report_abs(dev, ABS_X,
(packet[1] | ((packet[0] & 0x30) << 4)));
input_report_abs(dev, ABS_Y,
1024 - (packet[2] | ((packet[0] & 0xC0) << 2)));
} else {
input_report_rel(dev, REL_X,
((packet[0] & 0x10) ? packet[1] - 256 : packet[1]));
input_report_rel(dev, REL_Y,
-(int)((packet[0] & 0x20) ? packet[2] - 256 : packet[2]));
}
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
input_report_key(dev, BTN_TOUCH, packet[0] & 0x04);
input_sync(dev);
return PSMOUSE_FULL_PACKET;
}
static int lifebook_absolute_mode(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param;
if (psmouse_reset(psmouse))
return -1;
/*
Enable absolute output -- ps2_command fails always but if
you leave this call out the touchsreen will never send
absolute coordinates
*/
param = 0x07;
ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES);
return 0;
}
static void lifebook_set_resolution(struct psmouse *psmouse, unsigned int resolution)
{
unsigned char params[] = { 0, 1, 2, 2, 3 };
if (resolution == 0 || resolution > 400)
resolution = 400;
ps2_command(&psmouse->ps2dev, &params[resolution / 100], PSMOUSE_CMD_SETRES);
psmouse->resolution = 50 << params[resolution / 100];
}
static void lifebook_disconnect(struct psmouse *psmouse)
{
psmouse_reset(psmouse);
}
int lifebook_detect(struct psmouse *psmouse, int set_properties)
{
if (!dmi_check_system(lifebook_dmi_table))
return -1;
if (set_properties) {
psmouse->vendor = "Fujitsu";
psmouse->name = "Lifebook TouchScreen";
}
return 0;
}
int lifebook_init(struct psmouse *psmouse)
{
if (lifebook_absolute_mode(psmouse))
return -1;
psmouse->dev.evbit[0] = BIT(EV_ABS) | BIT(EV_KEY) | BIT(EV_REL);
psmouse->dev.keybit[LONG(BTN_LEFT)] = BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
psmouse->dev.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
psmouse->dev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
input_set_abs_params(&psmouse->dev, ABS_X, 0, 1024, 0, 0);
input_set_abs_params(&psmouse->dev, ABS_Y, 0, 1024, 0, 0);
psmouse->protocol_handler = lifebook_process_byte;
psmouse->set_resolution = lifebook_set_resolution;
psmouse->disconnect = lifebook_disconnect;
psmouse->reconnect = lifebook_absolute_mode;
psmouse->pktsize = 3;
return 0;
}

View File

@ -0,0 +1,17 @@
/*
* Fujitsu B-series Lifebook PS/2 TouchScreen driver
*
* Copyright (c) 2005 Vojtech Pavlik
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#ifndef _LIFEBOOK_H
#define _LIFEBOOK_H
int lifebook_detect(struct psmouse *psmouse, int set_properties);
int lifebook_init(struct psmouse *psmouse);
#endif

View File

@ -18,18 +18,18 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@ -77,16 +77,11 @@ MODULE_PARM_DESC(irq, "IRQ number (5=default)");
__obsolete_setup("logibm_irq=");
static int logibm_used = 0;
static irqreturn_t logibm_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int logibm_open(struct input_dev *dev)
{
if (logibm_used++)
return 0;
if (request_irq(logibm_irq, logibm_interrupt, 0, "logibm", NULL)) {
logibm_used--;
printk(KERN_ERR "logibm.c: Can't allocate irq %d\n", logibm_irq);
return -EBUSY;
}
@ -96,8 +91,6 @@ static int logibm_open(struct input_dev *dev)
static void logibm_close(struct input_dev *dev)
{
if (--logibm_used)
return;
outb(LOGIBM_DISABLE_IRQ, LOGIBM_CONTROL_PORT);
free_irq(logibm_irq, NULL);
}
@ -167,7 +160,7 @@ static int __init logibm_init(void)
outb(LOGIBM_DISABLE_IRQ, LOGIBM_CONTROL_PORT);
input_register_device(&logibm_dev);
printk(KERN_INFO "input: Logitech bus mouse at %#x irq %d\n", LOGIBM_BASE, logibm_irq);
return 0;

View File

@ -1,6 +1,6 @@
/*
* $Id: maplemouse.c,v 1.2 2004/03/22 01:18:15 lethal Exp $
* SEGA Dreamcast mouse driver
* SEGA Dreamcast mouse driver
* Based on drivers/usb/usbmouse.c
*/
@ -15,80 +15,51 @@
MODULE_AUTHOR("YAEGASHI Takeshi <t@keshi.org>");
MODULE_DESCRIPTION("SEGA Dreamcast mouse driver");
struct dc_mouse {
struct input_dev dev;
int open;
};
static void dc_mouse_callback(struct mapleq *mq)
{
int buttons, relx, rely, relz;
struct maple_device *mapledev = mq->dev;
struct dc_mouse *mouse = mapledev->private_data;
struct input_dev *dev = &mouse->dev;
struct input_dev *dev = mapledev->private_data;
unsigned char *res = mq->recvbuf;
buttons = ~res[8];
relx=*(unsigned short *)(res+12)-512;
rely=*(unsigned short *)(res+14)-512;
relz=*(unsigned short *)(res+16)-512;
relx = *(unsigned short *)(res + 12) - 512;
rely = *(unsigned short *)(res + 14) - 512;
relz = *(unsigned short *)(res + 16) - 512;
input_report_key(dev, BTN_LEFT, buttons&4);
input_report_key(dev, BTN_MIDDLE, buttons&9);
input_report_key(dev, BTN_RIGHT, buttons&2);
input_report_key(dev, BTN_LEFT, buttons & 4);
input_report_key(dev, BTN_MIDDLE, buttons & 9);
input_report_key(dev, BTN_RIGHT, buttons & 2);
input_report_rel(dev, REL_X, relx);
input_report_rel(dev, REL_Y, rely);
input_report_rel(dev, REL_WHEEL, relz);
input_sync(dev);
}
static int dc_mouse_open(struct input_dev *dev)
{
struct dc_mouse *mouse = dev->private;
mouse->open++;
return 0;
}
static void dc_mouse_close(struct input_dev *dev)
{
struct dc_mouse *mouse = dev->private;
mouse->open--;
}
static int dc_mouse_connect(struct maple_device *dev)
{
unsigned long data = be32_to_cpu(dev->devinfo.function_data[0]);
struct dc_mouse *mouse;
struct input_dev *input_dev;
if (!(mouse = kmalloc(sizeof(struct dc_mouse), GFP_KERNEL)))
if (!(input_dev = kmalloc(sizeof(struct input_dev), GFP_KERNEL)))
return -1;
memset(mouse, 0, sizeof(struct dc_mouse));
dev->private_data = mouse;
dev->private_data = input_dev;
mouse->dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
mouse->dev.keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
mouse->dev.relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
memset(input_dev, 0, sizeof(struct dc_mouse));
init_input_dev(input_dev);
input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
input_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
input_dev->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
init_input_dev(&mouse->dev);
input_dev->name = dev->product_name;
input_dev->id.bustype = BUS_MAPLE;
mouse->dev.private = mouse;
mouse->dev.open = dc_mouse_open;
mouse->dev.close = dc_mouse_close;
mouse->dev.event = NULL;
mouse->dev.name = dev->product_name;
mouse->dev.id.bustype = BUS_MAPLE;
input_register_device(&mouse->dev);
input_register_device(input_dev);
maple_getcond_callback(dev, dc_mouse_callback, 1, MAPLE_FUNC_MOUSE);
printk(KERN_INFO "input: mouse(0x%lx): %s\n", data, mouse->dev.name);
printk(KERN_INFO "input: mouse(0x%lx): %s\n", data, input_dev->name);
return 0;
}
@ -96,10 +67,10 @@ static int dc_mouse_connect(struct maple_device *dev)
static void dc_mouse_disconnect(struct maple_device *dev)
{
struct dc_mouse *mouse = dev->private_data;
struct input_dev *input_dev = dev->private_data;
input_unregister_device(&mouse->dev);
kfree(mouse);
input_unregister_device(input_dev);
kfree(input_dev);
}

View File

@ -4,7 +4,7 @@
* Copyright (c) 2000-2001 Vojtech Pavlik
*
* Based on the work of:
* Alan Cox Robin O'Leary
* Alan Cox Robin O'Leary
*/
/*
@ -56,7 +56,6 @@ static int pc110pad_io = 0x15e0;
static struct input_dev pc110pad_dev;
static int pc110pad_data[3];
static int pc110pad_count;
static int pc110pad_used;
static char *pc110pad_name = "IBM PC110 TouchPad";
static char *pc110pad_phys = "isa15e0/input0";
@ -74,7 +73,7 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr, struct pt_regs *regs)
if (pc110pad_count < 3)
return IRQ_HANDLED;
input_regs(&pc110pad_dev, regs);
input_report_key(&pc110pad_dev, BTN_TOUCH,
pc110pad_data[0] & 0x01);
@ -90,15 +89,11 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr, struct pt_regs *regs)
static void pc110pad_close(struct input_dev *dev)
{
if (!--pc110pad_used)
outb(PC110PAD_OFF, pc110pad_io + 2);
outb(PC110PAD_OFF, pc110pad_io + 2);
}
static int pc110pad_open(struct input_dev *dev)
{
if (pc110pad_used++)
return 0;
pc110pad_interrupt(0,NULL,NULL);
pc110pad_interrupt(0,NULL,NULL);
pc110pad_interrupt(0,NULL,NULL);
@ -145,7 +140,7 @@ static int __init pc110pad_init(void)
pc110pad_dev.absmax[ABS_X] = 0x1ff;
pc110pad_dev.absmax[ABS_Y] = 0x0ff;
pc110pad_dev.open = pc110pad_open;
pc110pad_dev.close = pc110pad_close;
@ -156,17 +151,17 @@ static int __init pc110pad_init(void)
pc110pad_dev.id.product = 0x0001;
pc110pad_dev.id.version = 0x0100;
input_register_device(&pc110pad_dev);
input_register_device(&pc110pad_dev);
printk(KERN_INFO "input: %s at %#x irq %d\n",
pc110pad_name, pc110pad_io, pc110pad_irq);
return 0;
}
static void __exit pc110pad_exit(void)
{
input_unregister_device(&pc110pad_dev);
input_unregister_device(&pc110pad_dev);
outb(PC110PAD_OFF, pc110pad_io + 2);

View File

@ -24,6 +24,7 @@
#include "synaptics.h"
#include "logips2pp.h"
#include "alps.h"
#include "lifebook.h"
#define DRIVER_DESC "PS/2 mouse driver"
@ -31,10 +32,9 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static unsigned int psmouse_max_proto = -1U;
static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
static int psmouse_set_maxproto(const char *val, struct kernel_param *kp);
static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp);
static char *psmouse_proto_abbrev[] = { NULL, "bare", NULL, NULL, NULL, "imps", "exps", NULL, NULL, NULL };
#define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int)
#define param_set_proto_abbrev psmouse_set_maxproto
#define param_get_proto_abbrev psmouse_get_maxproto
@ -57,6 +57,7 @@ static unsigned int psmouse_resetafter;
module_param_named(resetafter, psmouse_resetafter, uint, 0644);
MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
PSMOUSE_DEFINE_ATTR(protocol);
PSMOUSE_DEFINE_ATTR(rate);
PSMOUSE_DEFINE_ATTR(resolution);
PSMOUSE_DEFINE_ATTR(resetafter);
@ -67,7 +68,23 @@ __obsolete_setup("psmouse_smartscroll=");
__obsolete_setup("psmouse_resetafter=");
__obsolete_setup("psmouse_rate=");
static char *psmouse_protocols[] = { "None", "PS/2", "PS2++", "ThinkPS/2", "GenPS/2", "ImPS/2", "ImExPS/2", "SynPS/2", "AlpsPS/2" };
/*
* psmouse_sem protects all operations changing state of mouse
* (connecting, disconnecting, changing rate or resolution via
* sysfs). We could use a per-device semaphore but since there
* rarely more than one PS/2 mouse connected and since semaphore
* is taken in "slow" paths it is not worth it.
*/
static DECLARE_MUTEX(psmouse_sem);
struct psmouse_protocol {
enum psmouse_type type;
char *name;
char *alias;
int maxproto;
int (*detect)(struct psmouse *, int);
int (*init)(struct psmouse *);
};
/*
* psmouse_process_byte() analyzes the PS/2 data stream and reports
@ -407,12 +424,15 @@ static int thinking_detect(struct psmouse *psmouse, int set_properties)
*/
static int ps2bare_detect(struct psmouse *psmouse, int set_properties)
{
if (!psmouse->vendor) psmouse->vendor = "Generic";
if (!psmouse->name) psmouse->name = "Mouse";
if (set_properties) {
if (!psmouse->vendor) psmouse->vendor = "Generic";
if (!psmouse->name) psmouse->name = "Mouse";
}
return 0;
}
/*
* psmouse_extensions() probes for any extensions to the basic PS/2 protocol
* the mouse may have.
@ -423,6 +443,17 @@ static int psmouse_extensions(struct psmouse *psmouse,
{
int synaptics_hardware = 0;
/*
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
*/
if (lifebook_detect(psmouse, set_properties) == 0) {
if (max_proto > PSMOUSE_IMEX) {
if (!set_properties || lifebook_init(psmouse) == 0)
return PSMOUSE_LIFEBOOK;
}
}
/*
* Try Kensington ThinkingMouse (we try first, because synaptics probe
* upsets the thinkingmouse).
@ -506,6 +537,103 @@ static int psmouse_extensions(struct psmouse *psmouse,
return PSMOUSE_PS2;
}
static struct psmouse_protocol psmouse_protocols[] = {
{
.type = PSMOUSE_PS2,
.name = "PS/2",
.alias = "bare",
.maxproto = 1,
.detect = ps2bare_detect,
},
{
.type = PSMOUSE_PS2PP,
.name = "PS2++",
.alias = "logitech",
.detect = ps2pp_init,
},
{
.type = PSMOUSE_THINKPS,
.name = "ThinkPS/2",
.alias = "thinkps",
.detect = thinking_detect,
},
{
.type = PSMOUSE_GENPS,
.name = "GenPS/2",
.alias = "genius",
.detect = genius_detect,
},
{
.type = PSMOUSE_IMPS,
.name = "ImPS/2",
.alias = "imps",
.maxproto = 1,
.detect = intellimouse_detect,
},
{
.type = PSMOUSE_IMEX,
.name = "ImExPS/2",
.alias = "exps",
.maxproto = 1,
.detect = im_explorer_detect,
},
{
.type = PSMOUSE_SYNAPTICS,
.name = "SynPS/2",
.alias = "synaptics",
.detect = synaptics_detect,
.init = synaptics_init,
},
{
.type = PSMOUSE_ALPS,
.name = "AlpsPS/2",
.alias = "alps",
.detect = alps_detect,
.init = alps_init,
},
{
.type = PSMOUSE_LIFEBOOK,
.name = "LBPS/2",
.alias = "lifebook",
.init = lifebook_init,
},
{
.type = PSMOUSE_AUTO,
.name = "auto",
.alias = "any",
.maxproto = 1,
},
};
static struct psmouse_protocol *psmouse_protocol_by_type(enum psmouse_type type)
{
int i;
for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++)
if (psmouse_protocols[i].type == type)
return &psmouse_protocols[i];
WARN_ON(1);
return &psmouse_protocols[0];
}
static struct psmouse_protocol *psmouse_protocol_by_name(const char *name, size_t len)
{
struct psmouse_protocol *p;
int i;
for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) {
p = &psmouse_protocols[i];
if ((strlen(p->name) == len && !strncmp(p->name, name, len)) ||
(strlen(p->alias) == len && !strncmp(p->alias, name, len)))
return &psmouse_protocols[i];
}
return NULL;
}
/*
* psmouse_probe() probes for a PS/2 mouse.
*/
@ -653,30 +781,84 @@ static void psmouse_cleanup(struct serio *serio)
static void psmouse_disconnect(struct serio *serio)
{
struct psmouse *psmouse, *parent;
struct psmouse *psmouse, *parent = NULL;
psmouse = serio_get_drvdata(serio);
device_remove_file(&serio->dev, &psmouse_attr_protocol);
device_remove_file(&serio->dev, &psmouse_attr_rate);
device_remove_file(&serio->dev, &psmouse_attr_resolution);
device_remove_file(&serio->dev, &psmouse_attr_resetafter);
psmouse = serio_get_drvdata(serio);
down(&psmouse_sem);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
if (parent->pt_deactivate)
parent->pt_deactivate(parent);
psmouse_deactivate(parent);
}
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
if (parent && parent->pt_deactivate)
parent->pt_deactivate(parent);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
input_unregister_device(&psmouse->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
kfree(psmouse);
if (parent)
psmouse_activate(parent);
up(&psmouse_sem);
}
static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_protocol *proto)
{
memset(&psmouse->dev, 0, sizeof(struct input_dev));
init_input_dev(&psmouse->dev);
psmouse->dev.private = psmouse;
psmouse->dev.dev = &psmouse->ps2dev.serio->dev;
psmouse->dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
psmouse->dev.keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
psmouse->dev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
psmouse->protocol_handler = psmouse_process_byte;
psmouse->pktsize = 3;
if (proto && (proto->detect || proto->init)) {
if (proto->detect && proto->detect(psmouse, 1) < 0)
return -1;
if (proto->init && proto->init(psmouse) < 0)
return -1;
psmouse->type = proto->type;
}
else
psmouse->type = psmouse_extensions(psmouse, psmouse_max_proto, 1);
sprintf(psmouse->devname, "%s %s %s",
psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name);
psmouse->dev.name = psmouse->devname;
psmouse->dev.phys = psmouse->phys;
psmouse->dev.id.bustype = BUS_I8042;
psmouse->dev.id.vendor = 0x0002;
psmouse->dev.id.product = psmouse->type;
psmouse->dev.id.version = psmouse->model;
return 0;
}
/*
@ -688,6 +870,8 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
struct psmouse *psmouse, *parent = NULL;
int retval;
down(&psmouse_sem);
/*
* If this is a pass-through port deactivate parent so the device
* connected to this port can be successfully identified
@ -697,20 +881,14 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
psmouse_deactivate(parent);
}
if (!(psmouse = kmalloc(sizeof(struct psmouse), GFP_KERNEL))) {
if (!(psmouse = kcalloc(1, sizeof(struct psmouse), GFP_KERNEL))) {
retval = -ENOMEM;
goto out;
}
memset(psmouse, 0, sizeof(struct psmouse));
ps2_init(&psmouse->ps2dev, serio);
sprintf(psmouse->phys, "%s/input0", serio->phys);
psmouse->dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
psmouse->dev.keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
psmouse->dev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
psmouse->dev.private = psmouse;
psmouse->dev.dev = &serio->dev;
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
serio_set_drvdata(serio, psmouse);
@ -734,25 +912,10 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
psmouse->resolution = psmouse_resolution;
psmouse->resetafter = psmouse_resetafter;
psmouse->smartscroll = psmouse_smartscroll;
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
psmouse->protocol_handler = psmouse_process_byte;
psmouse->pktsize = 3;
psmouse->type = psmouse_extensions(psmouse, psmouse_max_proto, 1);
sprintf(psmouse->devname, "%s %s %s",
psmouse_protocols[psmouse->type], psmouse->vendor, psmouse->name);
psmouse->dev.name = psmouse->devname;
psmouse->dev.phys = psmouse->phys;
psmouse->dev.id.bustype = BUS_I8042;
psmouse->dev.id.vendor = 0x0002;
psmouse->dev.id.product = psmouse->type;
psmouse->dev.id.version = psmouse->model;
psmouse_switch_protocol(psmouse, NULL);
input_register_device(&psmouse->dev);
printk(KERN_INFO "input: %s on %s\n", psmouse->devname, serio->phys);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
@ -762,6 +925,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
if (parent && parent->pt_activate)
parent->pt_activate(parent);
device_create_file(&serio->dev, &psmouse_attr_protocol);
device_create_file(&serio->dev, &psmouse_attr_rate);
device_create_file(&serio->dev, &psmouse_attr_resolution);
device_create_file(&serio->dev, &psmouse_attr_resetafter);
@ -771,10 +935,11 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
retval = 0;
out:
/* If this is a pass-through port the parent awaits to be activated */
/* If this is a pass-through port the parent needs to be re-activated */
if (parent)
psmouse_activate(parent);
up(&psmouse_sem);
return retval;
}
@ -791,6 +956,8 @@ static int psmouse_reconnect(struct serio *serio)
return -1;
}
down(&psmouse_sem);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
@ -823,6 +990,7 @@ out:
if (parent)
psmouse_activate(parent);
up(&psmouse_sem);
return rc;
}
@ -893,26 +1061,109 @@ ssize_t psmouse_attr_set_helper(struct device *dev, const char *buf, size_t coun
if (serio->drv != &psmouse_drv) {
retval = -ENODEV;
goto out;
goto out_unpin;
}
retval = down_interruptible(&psmouse_sem);
if (retval)
goto out_unpin;
if (psmouse->state == PSMOUSE_IGNORE) {
retval = -ENODEV;
goto out_up;
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_deactivate(psmouse);
retval = handler(psmouse, buf, count);
psmouse_activate(psmouse);
if (retval != -ENODEV)
psmouse_activate(psmouse);
if (parent)
psmouse_activate(parent);
out:
out_up:
up(&psmouse_sem);
out_unpin:
serio_unpin_driver(serio);
return retval;
}
static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, char *buf)
{
return sprintf(buf, "%s\n", psmouse_protocol_by_type(psmouse->type)->name);
}
static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, const char *buf, size_t count)
{
struct serio *serio = psmouse->ps2dev.serio;
struct psmouse *parent = NULL;
struct psmouse_protocol *proto;
int retry = 0;
if (!(proto = psmouse_protocol_by_name(buf, count)))
return -EINVAL;
if (psmouse->type == proto->type)
return count;
while (serio->child) {
if (++retry > 3) {
printk(KERN_WARNING "psmouse: failed to destroy child port, protocol change aborted.\n");
return -EIO;
}
up(&psmouse_sem);
serio_unpin_driver(serio);
serio_unregister_child_port(serio);
serio_pin_driver_uninterruptible(serio);
down(&psmouse_sem);
if (serio->drv != &psmouse_drv)
return -ENODEV;
if (psmouse->type == proto->type)
return count; /* switched by other thread */
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
if (parent->pt_deactivate)
parent->pt_deactivate(parent);
}
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
input_unregister_device(&psmouse->dev);
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
if (psmouse_switch_protocol(psmouse, proto) < 0) {
psmouse_reset(psmouse);
/* default to PSMOUSE_PS2 */
psmouse_switch_protocol(psmouse, &psmouse_protocols[0]);
}
psmouse_initialize(psmouse);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
input_register_device(&psmouse->dev);
printk(KERN_INFO "input: %s on %s\n", psmouse->devname, serio->phys);
if (parent && parent->pt_activate)
parent->pt_activate(parent);
return count;
}
static ssize_t psmouse_attr_show_rate(struct psmouse *psmouse, char *buf)
{
return sprintf(buf, "%d\n", psmouse->rate);
@ -969,34 +1220,26 @@ static ssize_t psmouse_attr_set_resetafter(struct psmouse *psmouse, const char *
static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
{
int i;
struct psmouse_protocol *proto;
if (!val)
return -EINVAL;
if (!strncmp(val, "any", 3)) {
*((unsigned int *)kp->arg) = -1U;
return 0;
}
proto = psmouse_protocol_by_name(val, strlen(val));
for (i = 0; i < ARRAY_SIZE(psmouse_proto_abbrev); i++) {
if (!psmouse_proto_abbrev[i])
continue;
if (!proto || !proto->maxproto)
return -EINVAL;
if (!strncmp(val, psmouse_proto_abbrev[i], strlen(psmouse_proto_abbrev[i]))) {
*((unsigned int *)kp->arg) = i;
return 0;
}
}
*((unsigned int *)kp->arg) = proto->type;
return -EINVAL; \
return 0; \
}
static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
{
return sprintf(buffer, "%s\n",
psmouse_max_proto < ARRAY_SIZE(psmouse_proto_abbrev) ?
psmouse_proto_abbrev[psmouse_max_proto] : "any");
int type = *((unsigned int *)kp->arg);
return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
}
static int __init psmouse_init(void)

View File

@ -77,6 +77,8 @@ enum psmouse_type {
PSMOUSE_IMEX,
PSMOUSE_SYNAPTICS,
PSMOUSE_ALPS,
PSMOUSE_LIFEBOOK,
PSMOUSE_AUTO /* This one should always be last */
};
int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command);
@ -99,7 +101,7 @@ static ssize_t psmouse_do_set_##_name(struct device *d, struct device_attribute
{ \
return psmouse_attr_set_helper(d, b, s, psmouse_attr_set_##_name); \
} \
static struct device_attribute psmouse_attr_##_name = \
static struct device_attribute psmouse_attr_##_name = \
__ATTR(_name, S_IWUSR | S_IRUGO, \
psmouse_do_show_##_name, psmouse_do_set_##_name);

View File

@ -59,7 +59,7 @@ static irqreturn_t rpcmouse_irq(int irq, void *dev_id, struct pt_regs *regs)
b = (short) (__raw_readl(0xe0310000) ^ 0x70);
dx = x - rpcmouse_lastx;
dy = y - rpcmouse_lasty;
dy = y - rpcmouse_lasty;
rpcmouse_lastx = x;
rpcmouse_lasty = y;

View File

@ -1,7 +1,7 @@
/*
* Driver for DEC VSXXX-AA mouse (hockey-puck mouse, ball or two rollers)
* DEC VSXXX-GA mouse (rectangular mouse, with ball)
* DEC VSXXX-AB tablet (digitizer with hair cross or stylus)
* DEC VSXXX-GA mouse (rectangular mouse, with ball)
* DEC VSXXX-AB tablet (digitizer with hair cross or stylus)
*
* Copyright (C) 2003-2004 by Jan-Benedict Glaw <jbglaw@lug-owl.de>
*

View File

@ -220,6 +220,7 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
struct mousedev_list *list;
struct mousedev_motion *p;
unsigned long flags;
int wake_readers = 0;
list_for_each_entry(list, &mousedev->list, node) {
spin_lock_irqsave(&list->packet_lock, flags);
@ -255,11 +256,14 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
spin_unlock_irqrestore(&list->packet_lock, flags);
if (list->ready)
if (list->ready) {
kill_fasync(&list->fasync, SIGIO, POLL_IN);
wake_readers = 1;
}
}
wake_up_interruptible(&mousedev->wait);
if (wake_readers)
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)

View File

@ -29,6 +29,7 @@ MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ps2_init);
EXPORT_SYMBOL(ps2_sendbyte);
EXPORT_SYMBOL(ps2_drain);
EXPORT_SYMBOL(ps2_command);
EXPORT_SYMBOL(ps2_schedule_command);
EXPORT_SYMBOL(ps2_handle_ack);
@ -45,11 +46,11 @@ struct ps2work {
/*
* ps2_sendbyte() sends a byte to the mouse, and waits for acknowledge.
* It doesn't handle retransmission, though it could - because when there would
* be need for retransmissions, the mouse has to be replaced anyway.
* ps2_sendbyte() sends a byte to the device and waits for acknowledge.
* It doesn't handle retransmission, though it could - because if there
* is a need for retransmissions device has to be replaced anyway.
*
* ps2_sendbyte() can only be called from a process context
* ps2_sendbyte() can only be called from a process context.
*/
int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout)
@ -71,6 +72,91 @@ int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout)
return -ps2dev->nak;
}
/*
* ps2_drain() waits for device to transmit requested number of bytes
* and discards them.
*/
void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
{
if (maxbytes > sizeof(ps2dev->cmdbuf)) {
WARN_ON(1);
maxbytes = sizeof(ps2dev->cmdbuf);
}
down(&ps2dev->cmd_sem);
serio_pause_rx(ps2dev->serio);
ps2dev->flags = PS2_FLAG_CMD;
ps2dev->cmdcnt = maxbytes;
serio_continue_rx(ps2dev->serio);
wait_event_timeout(ps2dev->wait,
!(ps2dev->flags & PS2_FLAG_CMD),
msecs_to_jiffies(timeout));
up(&ps2dev->cmd_sem);
}
/*
* ps2_is_keyboard_id() checks received ID byte against the list of
* known keyboard IDs.
*/
static inline int ps2_is_keyboard_id(char id_byte)
{
static char keyboard_ids[] = {
0xab, /* Regular keyboards */
0xac, /* NCD Sun keyboard */
0x2b, /* Trust keyboard, translated */
0x5d, /* Trust keyboard */
0x60, /* NMB SGI keyboard, translated */
0x47, /* NMB SGI keyboard */
};
return memchr(keyboard_ids, id_byte, sizeof(keyboard_ids)) != NULL;
}
/*
* ps2_adjust_timeout() is called after receiving 1st byte of command
* response and tries to reduce remaining timeout to speed up command
* completion.
*/
static int ps2_adjust_timeout(struct ps2dev *ps2dev, int command, int timeout)
{
switch (command) {
case PS2_CMD_RESET_BAT:
/*
* Device has sent the first response byte after
* reset command, reset is thus done, so we can
* shorten the timeout.
* The next byte will come soon (keyboard) or not
* at all (mouse).
*/
if (timeout > msecs_to_jiffies(100))
timeout = msecs_to_jiffies(100);
break;
case PS2_CMD_GETID:
/*
* If device behind the port is not a keyboard there
* won't be 2nd byte of ID response.
*/
if (!ps2_is_keyboard_id(ps2dev->cmdbuf[1])) {
serio_pause_rx(ps2dev->serio);
ps2dev->flags = ps2dev->cmdcnt = 0;
serio_continue_rx(ps2dev->serio);
timeout = 0;
}
break;
default:
break;
}
return timeout;
}
/*
* ps2_command() sends a command and its parameters to the mouse,
* then waits for the response and puts it in the param array.
@ -86,6 +172,11 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
int rc = -1;
int i;
if (receive > sizeof(ps2dev->cmdbuf)) {
WARN_ON(1);
return -1;
}
down(&ps2dev->cmd_sem);
serio_pause_rx(ps2dev->serio);
@ -101,10 +192,9 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
* ACKing the reset command, and so it can take a long
* time before the ACK arrrives.
*/
if (command & 0xff)
if (ps2_sendbyte(ps2dev, command & 0xff,
command == PS2_CMD_RESET_BAT ? 1000 : 200))
goto out;
if (ps2_sendbyte(ps2dev, command & 0xff,
command == PS2_CMD_RESET_BAT ? 1000 : 200))
goto out;
for (i = 0; i < send; i++)
if (ps2_sendbyte(ps2dev, param[i], 200))
@ -120,33 +210,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
if (ps2dev->cmdcnt && timeout > 0) {
if (command == PS2_CMD_RESET_BAT && timeout > msecs_to_jiffies(100)) {
/*
* Device has sent the first response byte
* after a reset command, reset is thus done,
* shorten the timeout. The next byte will come
* soon (keyboard) or not at all (mouse).
*/
timeout = msecs_to_jiffies(100);
}
if (command == PS2_CMD_GETID &&
ps2dev->cmdbuf[receive - 1] != 0xab && /* Regular keyboards */
ps2dev->cmdbuf[receive - 1] != 0xac && /* NCD Sun keyboard */
ps2dev->cmdbuf[receive - 1] != 0x2b && /* Trust keyboard, translated */
ps2dev->cmdbuf[receive - 1] != 0x5d && /* Trust keyboard */
ps2dev->cmdbuf[receive - 1] != 0x60 && /* NMB SGI keyboard, translated */
ps2dev->cmdbuf[receive - 1] != 0x47) { /* NMB SGI keyboard */
/*
* Device behind the port is not a keyboard
* so we don't need to wait for the 2nd byte
* of ID response.
*/
serio_pause_rx(ps2dev->serio);
ps2dev->flags = ps2dev->cmdcnt = 0;
serio_continue_rx(ps2dev->serio);
}
timeout = ps2_adjust_timeout(ps2dev, command, timeout);
wait_event_timeout(ps2dev->wait,
!(ps2dev->flags & PS2_FLAG_CMD), timeout);
}
@ -160,7 +224,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
rc = 0;
out:
out:
serio_pause_rx(ps2dev->serio);
ps2dev->flags = 0;
serio_continue_rx(ps2dev->serio);

Some files were not shown because too many files have changed in this diff Show More