Merge branch 'parisc-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
 "The PA-RISC updates for v3.11 include a gcc miscompilation fix,
  gzip-compressed vmlinuz support, a fix in the PCI code for ATI FireGL
  support on c8000 machines, a fix to prevent that %sr1 is being
  clobbered and a few smaller optimizations and documentation updates"

* 'parisc-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Fix gcc miscompilation in pa_memcpy()
  parisc: Ensure volatile space register %sr1 is not clobbered
  parisc: optimize mtsp(0,sr) inline assembly
  parisc: switch to gzip-compressed vmlinuz kernel
  parisc: document the shadow registers
  parisc: more capabilities info in /proc/cpuinfo
  parisc: fix LMMIO mismatch between PAT length and MASK register
This commit is contained in:
Linus Torvalds 2013-07-10 10:10:02 -07:00
commit b247759642
10 changed files with 158 additions and 49 deletions

View File

@ -77,6 +77,14 @@ PSW default E value 0
Shadow Registers used by interruption handler code
TOC enable bit 1
=========================================================================
The PA-RISC architecture defines 7 registers as "shadow registers".
Those are used in RETURN FROM INTERRUPTION AND RESTORE instruction to reduce
the state save and restore time by eliminating the need for general register
(GR) saves and restores in interruption handlers.
Shadow registers are the GRs 1, 8, 9, 16, 17, 24, and 25.
=========================================================================
Register usage notes, originally from John Marvin, with some additional
notes from Randolph Chung.

View File

@ -17,6 +17,8 @@
# Mike Shaver, Helge Deller and Martin K. Petersen
#
KBUILD_IMAGE := vmlinuz
KBUILD_DEFCONFIG := default_defconfig
NM = sh $(srctree)/arch/parisc/nm
@ -92,7 +94,7 @@ PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \
else echo $(obj)/palo.conf; \
fi)
palo: vmlinux
palo: vmlinuz
@if test ! -x "$(PALO)"; then \
echo 'ERROR: Please install palo first (apt-get install palo)';\
echo 'or build it from source and install it somewhere in your $$PATH';\
@ -107,10 +109,14 @@ palo: vmlinux
fi
$(PALO) -f $(PALOCONF)
# Shorthands for known targets not supported by parisc, use vmlinux as default
Image zImage bzImage: vmlinux
# Shorthands for known targets not supported by parisc, use vmlinux/vmlinuz as default
Image: vmlinux
zImage bzImage: vmlinuz
install: vmlinux
vmlinuz: vmlinux
@gzip -cf -9 $< > $@
install: vmlinuz
sh $(src)/arch/parisc/install.sh \
$(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
@ -119,6 +125,7 @@ MRPROPER_FILES += palo.conf
define archhelp
@echo '* vmlinux - Uncompressed kernel image (./vmlinux)'
@echo ' vmlinuz - Compressed kernel image (./vmlinuz)'
@echo ' palo - Bootable image (./lifimage)'
@echo ' install - Install kernel using'
@echo ' (your) ~/bin/$(INSTALLKERNEL) or'

View File

@ -4,7 +4,7 @@
# Most people using 'make palo' want a bootable file, usable for
# network or tape booting for example.
--init-tape=lifimage
--recoverykernel=vmlinux
--recoverykernel=vmlinuz
########## Pick your ROOT here! ##########
# You need at least one 'root='!
@ -12,10 +12,10 @@
# If you want a root ramdisk, use the next 2 lines
# (Edit the ramdisk image name!!!!)
--ramdisk=ram-disk-image-file
--commandline=0/vmlinux HOME=/ root=/dev/ram initrd=0/ramdisk
--commandline=0/vmlinuz HOME=/ root=/dev/ram initrd=0/ramdisk panic_timeout=60 panic=-1
# If you want NFS root, use the following command line (Edit the HOSTNAME!!!)
#--commandline=0/vmlinux HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp
#--commandline=0/vmlinuz HOME=/ root=/dev/nfs nfsroot=HOSTNAME ip=bootp
# If you have root on a disk partition, use this (Edit the partition name!!!)
#--commandline=0/vmlinux HOME=/ root=/dev/sda1
#--commandline=0/vmlinuz HOME=/ root=/dev/sda1

View File

@ -32,9 +32,12 @@ static inline void set_eiem(unsigned long val)
cr; \
})
#define mtsp(gr, cr) \
__asm__ __volatile__("mtsp %0,%1" \
#define mtsp(val, cr) \
{ if (__builtin_constant_p(val) && ((val) == 0)) \
__asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
else \
__asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
: "r" (gr), "i" (cr) : "memory")
: "r" (val), "i" (cr) : "memory"); }
#endif /* __PARISC_SPECIAL_INSNS_H */

View File

@ -63,13 +63,14 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
unsigned long flags;
unsigned long flags, sid;
/* For one page, it's not worth testing the split_tlb variable */
mb();
mtsp(vma->vm_mm->context,1);
sid = vma->vm_mm->context;
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);

View File

@ -26,13 +26,13 @@ if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install
if [ -f $4/vmlinux ]; then
mv $4/vmlinux $4/vmlinux.old
if [ -f $4/vmlinuz ]; then
mv $4/vmlinuz $4/vmlinuz.old
fi
if [ -f $4/System.map ]; then
mv $4/System.map $4/System.old
fi
cat $2 > $4/vmlinux
cat $2 > $4/vmlinuz
cp $3 $4/System.map

View File

@ -440,8 +440,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
else {
unsigned long flags;
mtsp(sid, 1);
purge_tlb_start(flags);
mtsp(sid, 1);
if (split_tlb) {
while (npages--) {
pdtlb(start);

View File

@ -371,10 +371,23 @@ show_cpuinfo (struct seq_file *m, void *v)
seq_printf(m, "capabilities\t:");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
seq_printf(m, " os32");
seq_puts(m, " os32");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
seq_printf(m, " os64");
seq_printf(m, "\n");
seq_puts(m, " os64");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
seq_puts(m, " iopdir_fdc");
switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
case PDC_MODEL_NVA_SUPPORTED:
seq_puts(m, " nva_supported");
break;
case PDC_MODEL_NVA_SLOW:
seq_puts(m, " nva_slow");
break;
case PDC_MODEL_NVA_UNSUPPORTED:
seq_puts(m, " needs_equivalent_aliasing");
break;
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",

View File

@ -2,6 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
* Copyright (C) 2013 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr)
#define prefetch_dst(addr) do { } while(0)
#endif
#define PA_MEMCPY_OK 0
#define PA_MEMCPY_LOAD_ERROR 1
#define PA_MEMCPY_STORE_ERROR 2
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
static inline unsigned long copy_dstaligned(unsigned long dst,
unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
* they cannot be. Initialize a2/a3 to shut gcc up.
*/
register unsigned int a0, a1, a2 = 0, a3 = 0;
int sh_1, sh_2;
struct exception_data *d;
/* prefetch_src((const void *)src); */
@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src
goto do2;
case 0:
if (len == 0)
return 0;
return PA_MEMCPY_OK;
/* a3 = ((unsigned int *) src)[0];
a0 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a3, cda_ldw_exc);
@ -256,42 +261,35 @@ do0:
preserve_branch(handle_load_error);
preserve_branch(handle_store_error);
return 0;
return PA_MEMCPY_OK;
handle_load_error:
__asm__ __volatile__ ("cda_ldw_exc:\n");
d = &__get_cpu_var(exception_data);
DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
return o_len * 4 - d->fault_addr + o_src;
return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("cda_stw_exc:\n");
d = &__get_cpu_var(exception_data);
DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
return o_len * 4 - d->fault_addr + o_dst;
return PA_MEMCPY_STORE_ERROR;
}
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
* In case of an access fault the faulty address can be read from the per_cpu
* exception data struct. */
static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
register unsigned int *pws, *pwd;
register double *pds, *pdd;
unsigned long ret = 0;
unsigned long o_dst, o_src, o_len;
struct exception_data *d;
unsigned long ret;
src = (unsigned long)srcp;
dst = (unsigned long)dstp;
pcs = (unsigned char *)srcp;
pcd = (unsigned char *)dstp;
o_dst = dst; o_src = src; o_len = len;
/* prefetch_src((const void *)srcp); */
if (len < THRESHOLD)
@ -401,7 +399,7 @@ byte_copy:
len--;
}
return 0;
return PA_MEMCPY_OK;
unaligned_copy:
/* possibly we are aligned on a word, but not on a double... */
@ -438,8 +436,7 @@ unaligned_copy:
src = (unsigned long)pcs;
}
ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),
o_dst, o_src, o_len);
ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
if (ret)
return ret;
@ -454,17 +451,41 @@ unaligned_copy:
handle_load_error:
__asm__ __volatile__ ("pmc_load_exc:\n");
d = &__get_cpu_var(exception_data);
DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
return o_len - d->fault_addr + o_src;
return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("pmc_store_exc:\n");
return PA_MEMCPY_STORE_ERROR;
}
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
{
unsigned long ret, fault_addr, reference;
struct exception_data *d;
ret = pa_memcpy_internal(dstp, srcp, len);
if (likely(ret == PA_MEMCPY_OK))
return 0;
/* if a load or store fault occured we can get the faulty addr */
d = &__get_cpu_var(exception_data);
DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
return o_len - d->fault_addr + o_dst;
fault_addr = d->fault_addr;
/* error in load or store? */
if (ret == PA_MEMCPY_LOAD_ERROR)
reference = (unsigned long) srcp;
else
reference = (unsigned long) dstp;
DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
ret, len, fault_addr, reference);
if (fault_addr >= reference)
return len - (fault_addr - reference);
else
return len;
}
#ifdef __KERNEL__

View File

@ -613,6 +613,54 @@ truncate_pat_collision(struct resource *root, struct resource *new)
return 0; /* truncation successful */
}
/*
* extend_lmmio_len: extend lmmio range to maximum length
*
* This is needed at least on C8000 systems to get the ATI FireGL card
* working. On other systems we will currently not extend the lmmio space.
*/
static unsigned long
extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len)
{
struct resource *tmp;
pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n",
end - start, lba_len);
lba_len = min(lba_len+1, 256UL*1024*1024); /* limit to 256 MB */
pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end);
if (boot_cpu_data.cpu_type < mako) {
pr_info("LBA: Not a C8000 system - not extending LMMIO range.\n");
return end;
}
end += lba_len;
if (end < start) /* fix overflow */
end = -1ULL;
pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end);
/* first overlap */
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
pr_debug("LBA: testing %pR\n", tmp);
if (tmp->start == start)
continue; /* ignore ourself */
if (tmp->end < start)
continue;
if (tmp->start > end)
continue;
if (end >= tmp->start)
end = tmp->start - 1;
}
pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end);
/* return new end */
return end;
}
#else
#define truncate_pat_collision(r,n) (0)
#endif
@ -994,6 +1042,14 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
case PAT_LMMIO:
/* used to fix up pre-initialized MEM BARs */
if (!lba_dev->hba.lmmio_space.flags) {
unsigned long lba_len;
lba_len = ~READ_REG32(lba_dev->hba.base_addr
+ LBA_LMMIO_MASK);
if ((p->end - p->start) != lba_len)
p->end = extend_lmmio_len(p->start,
p->end, lba_len);
sprintf(lba_dev->hba.lmmio_name,
"PCI%02x LMMIO",
(int)lba_dev->hba.bus_num.start);