Merge branch 'core-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  do_generic_file_read: s/EINTR/EIO/ if lock_page_killable() fails
  softirq, warning fix: correct a format to avoid a warning
  softirqs, debug: preemption check
  x86, pci-hotplug, calgary / rio: fix EBDA ioremap()
  IO resources, x86: ioremap sanity check to catch mapping requests exceeding, fix
  IO resources, x86: ioremap sanity check to catch mapping requests exceeding the BAR sizes
  softlockup: Documentation/sysctl/kernel.txt: fix softlockup_thresh description
  dmi scan: warn about too early calls to dmi_check_system()
  generic: redefine resource_size_t as phys_addr_t
  generic: make PFN_PHYS explicitly return phys_addr_t
  generic: add phys_addr_t for holding physical addresses
  softirq: allocate less vectors
  IO resources: fix/remove printk
  printk: robustify printk, update comment
  printk: robustify printk, fix #2
  printk: robustify printk, fix
  printk: robustify printk

Fixed up conflicts in:
	arch/powerpc/include/asm/types.h
	arch/powerpc/platforms/Kconfig.cputype
manually.
This commit is contained in:
Linus Torvalds 2008-10-16 15:17:40 -07:00
commit e533b22705
26 changed files with 144 additions and 68 deletions

View File

@ -111,9 +111,9 @@ unsigned long __init setup_memory(void)
initrd_start, INITRD_SIZE); initrd_start, INITRD_SIZE);
} else { } else {
printk("initrd extends beyond end of memory " printk("initrd extends beyond end of memory "
"(0x%08lx > 0x%08lx)\ndisabling initrd\n", "(0x%08lx > 0x%08llx)\ndisabling initrd\n",
INITRD_START + INITRD_SIZE, INITRD_START + INITRD_SIZE,
PFN_PHYS(max_low_pfn)); (unsigned long long)PFN_PHYS(max_low_pfn));
initrd_start = 0; initrd_start = 0;
} }

View File

@ -22,6 +22,9 @@ config WORD_SIZE
config PPC_MERGE config PPC_MERGE
def_bool y def_bool y
config ARCH_PHYS_ADDR_T_64BIT
def_bool PPC64 || PHYS_64BIT
config MMU config MMU
bool bool
default y default y

View File

@ -48,13 +48,6 @@ typedef struct {
typedef __vector128 vector128; typedef __vector128 vector128;
/* Physical address used by some IO functions */
#if defined(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT)
typedef u64 phys_addr_t;
#else
typedef u32 phys_addr_t;
#endif
#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT) #if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)
typedef u64 dma_addr_t; typedef u64 dma_addr_t;
#else #else

View File

@ -135,7 +135,6 @@ config PTE_64BIT
config PHYS_64BIT config PHYS_64BIT
bool 'Large physical address support' if E500 || PPC_86xx bool 'Large physical address support' if E500 || PPC_86xx
depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
select RESOURCES_64BIT
---help--- ---help---
This option enables kernel support for larger than 32-bit physical This option enables kernel support for larger than 32-bit physical
addresses. This feature may not be available on all cores. addresses. This feature may not be available on all cores.

View File

@ -39,13 +39,10 @@ static int dma_offset_set;
#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL)) #define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32)) #define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
#ifdef CONFIG_RESOURCES_64BIT #define RES_TO_U32_LOW(val) \
#define RES_TO_U32_LOW(val) U64_TO_U32_LOW(val) ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
#define RES_TO_U32_HIGH(val) U64_TO_U32_HIGH(val) #define RES_TO_U32_HIGH(val) \
#else ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
#define RES_TO_U32_LOW(val) (val)
#define RES_TO_U32_HIGH(val) (0)
#endif
static inline int ppc440spe_revA(void) static inline int ppc440spe_revA(void)
{ {
@ -144,12 +141,11 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
/* Use that */ /* Use that */
res->start = pci_addr; res->start = pci_addr;
#ifndef CONFIG_RESOURCES_64BIT
/* Beware of 32 bits resources */ /* Beware of 32 bits resources */
if ((pci_addr + size) > 0x100000000ull) if (sizeof(resource_size_t) == sizeof(u32) &&
(pci_addr + size) > 0x100000000ull)
res->end = 0xffffffff; res->end = 0xffffffff;
else else
#endif
res->end = res->start + size - 1; res->end = res->start + size - 1;
break; break;
} }

View File

@ -935,13 +935,15 @@ config HIGHMEM
config X86_PAE config X86_PAE
bool "PAE (Physical Address Extension) Support" bool "PAE (Physical Address Extension) Support"
depends on X86_32 && !HIGHMEM4G depends on X86_32 && !HIGHMEM4G
select RESOURCES_64BIT
help help
PAE is required for NX support, and furthermore enables PAE is required for NX support, and furthermore enables
larger swapspace support for non-overcommit purposes. It larger swapspace support for non-overcommit purposes. It
has the cost of more pagetable lookup overhead, and also has the cost of more pagetable lookup overhead, and also
consumes more pagetable space per process. consumes more pagetable space per process.
config ARCH_PHYS_ADDR_T_64BIT
def_bool X86_64 || X86_PAE
# Common NUMA Features # Common NUMA Features
config NUMA config NUMA
bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"

View File

@ -1282,12 +1282,10 @@ void __init e820_reserve_resources(void)
e820_res = res; e820_res = res;
for (i = 0; i < e820.nr_map; i++) { for (i = 0; i < e820.nr_map; i++) {
end = e820.map[i].addr + e820.map[i].size - 1; end = e820.map[i].addr + e820.map[i].size - 1;
#ifndef CONFIG_RESOURCES_64BIT if (end != (resource_size_t)end) {
if (end > 0x100000000ULL) {
res++; res++;
continue; continue;
} }
#endif
res->name = e820_type_to_string(e820.map[i].type); res->name = e820_type_to_string(e820.map[i].type);
res->start = e820.map[i].addr; res->start = e820.map[i].addr;
res->end = end; res->end = end;

View File

@ -219,6 +219,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
if (is_ISA_range(phys_addr, last_addr)) if (is_ISA_range(phys_addr, last_addr))
return (__force void __iomem *)phys_to_virt(phys_addr); return (__force void __iomem *)phys_to_virt(phys_addr);
/*
* Check if the request spans more than any BAR in the iomem resource
* tree.
*/
WARN_ON(iomem_map_sanity_check(phys_addr, size));
/* /*
* Don't allow anybody to remap normal RAM that we're using.. * Don't allow anybody to remap normal RAM that we're using..
*/ */

View File

@ -15,6 +15,11 @@
*/ */
static char dmi_empty_string[] = " "; static char dmi_empty_string[] = " ";
/*
* Catch too early calls to dmi_check_system():
*/
static int dmi_initialized;
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{ {
const u8 *bp = ((u8 *) dm) + dm->length; const u8 *bp = ((u8 *) dm) + dm->length;
@ -366,7 +371,7 @@ void __init dmi_scan_machine(void)
if (efi_enabled) { if (efi_enabled) {
if (efi.smbios == EFI_INVALID_TABLE_ADDR) if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto out; goto error;
/* This is called as a core_initcall() because it isn't /* This is called as a core_initcall() because it isn't
* needed during early boot. This also means we can * needed during early boot. This also means we can
@ -374,13 +379,13 @@ void __init dmi_scan_machine(void)
*/ */
p = dmi_ioremap(efi.smbios, 32); p = dmi_ioremap(efi.smbios, 32);
if (p == NULL) if (p == NULL)
goto out; goto error;
rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
dmi_iounmap(p, 32); dmi_iounmap(p, 32);
if (!rc) { if (!rc) {
dmi_available = 1; dmi_available = 1;
return; goto out;
} }
} }
else { else {
@ -391,19 +396,22 @@ void __init dmi_scan_machine(void)
*/ */
p = dmi_ioremap(0xF0000, 0x10000); p = dmi_ioremap(0xF0000, 0x10000);
if (p == NULL) if (p == NULL)
goto out; goto error;
for (q = p; q < p + 0x10000; q += 16) { for (q = p; q < p + 0x10000; q += 16) {
rc = dmi_present(q); rc = dmi_present(q);
if (!rc) { if (!rc) {
dmi_available = 1; dmi_available = 1;
dmi_iounmap(p, 0x10000); dmi_iounmap(p, 0x10000);
return; goto out;
} }
} }
dmi_iounmap(p, 0x10000); dmi_iounmap(p, 0x10000);
} }
out: printk(KERN_INFO "DMI not present or invalid.\n"); error:
printk(KERN_INFO "DMI not present or invalid.\n");
out:
dmi_initialized = 1;
} }
/** /**
@ -424,6 +432,8 @@ int dmi_check_system(const struct dmi_system_id *list)
int i, count = 0; int i, count = 0;
const struct dmi_system_id *d = list; const struct dmi_system_id *d = list;
WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n");
while (d->ident) { while (d->ident) {
for (i = 0; i < ARRAY_SIZE(d->matches); i++) { for (i = 0; i < ARRAY_SIZE(d->matches); i++) {
int s = d->matches[i].slot; int s = d->matches[i].slot;

View File

@ -276,7 +276,7 @@ int __init ibmphp_access_ebda (void)
iounmap (io_mem); iounmap (io_mem);
debug ("returned ebda segment: %x\n", ebda_seg); debug ("returned ebda segment: %x\n", ebda_seg);
io_mem = ioremap (ebda_seg<<4, 65000); io_mem = ioremap(ebda_seg<<4, 1024);
if (!io_mem ) if (!io_mem )
return -ENOMEM; return -ENOMEM;
next_offset = 0x180; next_offset = 0x180;

View File

@ -378,11 +378,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
align = 0; align = 0;
min_align = 0; min_align = 0;
for (order = 0; order <= max_order; order++) { for (order = 0; order <= max_order; order++) {
#ifdef CONFIG_RESOURCES_64BIT resource_size_t align1 = 1;
resource_size_t align1 = 1ULL << (order + 20);
#else align1 <<= (order + 20);
resource_size_t align1 = 1U << (order + 20);
#endif
if (!align) if (!align)
min_align = align1; min_align = align1;
else if (ALIGN(align + min_align, min_align) < align1) else if (ALIGN(align + min_align, min_align) < align1)

View File

@ -39,7 +39,6 @@ typedef u64 pmdval_t;
typedef u64 pudval_t; typedef u64 pudval_t;
typedef u64 pgdval_t; typedef u64 pgdval_t;
typedef u64 pgprotval_t; typedef u64 pgprotval_t;
typedef u64 phys_addr_t;
typedef union { typedef union {
struct { struct {
@ -60,7 +59,6 @@ typedef unsigned long pmdval_t;
typedef unsigned long pudval_t; typedef unsigned long pudval_t;
typedef unsigned long pgdval_t; typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t; typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;
typedef union { typedef union {
pteval_t pte; pteval_t pte;

View File

@ -79,7 +79,6 @@ typedef unsigned long pmdval_t;
typedef unsigned long pudval_t; typedef unsigned long pudval_t;
typedef unsigned long pgdval_t; typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t; typedef unsigned long pgprotval_t;
typedef unsigned long phys_addr_t;
typedef struct page *pgtable_t; typedef struct page *pgtable_t;

View File

@ -76,13 +76,13 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
static inline xmaddr_t phys_to_machine(xpaddr_t phys) static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{ {
unsigned offset = phys.paddr & ~PAGE_MASK; unsigned offset = phys.paddr & ~PAGE_MASK;
return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
} }
static inline xpaddr_t machine_to_phys(xmaddr_t machine) static inline xpaddr_t machine_to_phys(xmaddr_t machine)
{ {
unsigned offset = machine.maddr & ~PAGE_MASK; unsigned offset = machine.maddr & ~PAGE_MASK;
return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
} }
/* /*

View File

@ -252,6 +252,8 @@ enum
HRTIMER_SOFTIRQ, HRTIMER_SOFTIRQ,
#endif #endif
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
}; };
/* softirq mask and active fields moved to irq_cpustat_t in /* softirq mask and active fields moved to irq_cpustat_t in

View File

@ -174,6 +174,7 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent, extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n); resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */ #endif /* _LINUX_IOPORT_H */

View File

@ -214,6 +214,9 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
{ return false; } { return false; }
#endif #endif
extern int printk_needs_cpu(int cpu);
extern void printk_tick(void);
extern void asmlinkage __attribute__((format(printf, 1, 2))) extern void asmlinkage __attribute__((format(printf, 1, 2)))
early_printk(const char *fmt, ...); early_printk(const char *fmt, ...);

View File

@ -1,9 +1,13 @@
#ifndef _LINUX_PFN_H_ #ifndef _LINUX_PFN_H_
#define _LINUX_PFN_H_ #define _LINUX_PFN_H_
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT) #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
#endif #endif

View File

@ -191,12 +191,14 @@ typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__ #ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ gfp_t;
#ifdef CONFIG_RESOURCES_64BIT #ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 resource_size_t; typedef u64 phys_addr_t;
#else #else
typedef u32 resource_size_t; typedef u32 phys_addr_t;
#endif #endif
typedef phys_addr_t resource_size_t;
struct ustat { struct ustat {
__kernel_daddr_t f_tfree; __kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode; __kernel_ino_t f_tinode;

View File

@ -577,9 +577,6 @@ static int have_callable_console(void)
* @fmt: format string * @fmt: format string
* *
* This is printk(). It can be called from any context. We want it to work. * This is printk(). It can be called from any context. We want it to work.
* Be aware of the fact that if oops_in_progress is not set, we might try to
* wake klogd up which could deadlock on runqueue lock if printk() is called
* from scheduler code.
* *
* We try to grab the console_sem. If we succeed, it's easy - we log the output and * We try to grab the console_sem. If we succeed, it's easy - we log the output and
* call the console drivers. If we fail to get the semaphore we place the output * call the console drivers. If we fail to get the semaphore we place the output
@ -984,10 +981,25 @@ int is_console_locked(void)
return console_locked; return console_locked;
} }
static DEFINE_PER_CPU(int, printk_pending);
void printk_tick(void)
{
if (__get_cpu_var(printk_pending)) {
__get_cpu_var(printk_pending) = 0;
wake_up_interruptible(&log_wait);
}
}
int printk_needs_cpu(int cpu)
{
return per_cpu(printk_pending, cpu);
}
void wake_up_klogd(void) void wake_up_klogd(void)
{ {
if (!oops_in_progress && waitqueue_active(&log_wait)) if (waitqueue_active(&log_wait))
wake_up_interruptible(&log_wait); __raw_get_cpu_var(printk_pending) = 1;
} }
/** /**

View File

@ -38,10 +38,6 @@ EXPORT_SYMBOL(iomem_resource);
static DEFINE_RWLOCK(resource_lock); static DEFINE_RWLOCK(resource_lock);
#ifdef CONFIG_PROC_FS
enum { MAX_IORES_LEVEL = 5 };
static void *r_next(struct seq_file *m, void *v, loff_t *pos) static void *r_next(struct seq_file *m, void *v, loff_t *pos)
{ {
struct resource *p = v; struct resource *p = v;
@ -53,6 +49,10 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
return p->sibling; return p->sibling;
} }
#ifdef CONFIG_PROC_FS
enum { MAX_IORES_LEVEL = 5 };
static void *r_start(struct seq_file *m, loff_t *pos) static void *r_start(struct seq_file *m, loff_t *pos)
__acquires(resource_lock) __acquires(resource_lock)
{ {
@ -549,13 +549,9 @@ static void __init __reserve_region_with_split(struct resource *root,
} }
if (!res) { if (!res) {
printk(KERN_DEBUG " __reserve_region_with_split: (%s) [%llx, %llx], res: (%s) [%llx, %llx]\n",
conflict->name, conflict->start, conflict->end,
name, start, end);
/* failed, split and try again */ /* failed, split and try again */
/* conflict coverred whole area */ /* conflict covered whole area */
if (conflict->start <= start && conflict->end >= end) if (conflict->start <= start && conflict->end >= end)
return; return;
@ -832,3 +828,40 @@ static int __init reserve_setup(char *str)
} }
__setup("reserve=", reserve_setup); __setup("reserve=", reserve_setup);
/*
* Check if the requested addr and size spans more than any slot in the
* iomem resource tree.
*/
int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
{
struct resource *p = &iomem_resource;
int err = 0;
loff_t l;
read_lock(&resource_lock);
for (p = p->child; p ; p = r_next(NULL, p, &l)) {
/*
* We can probably skip the resources without
* IORESOURCE_IO attribute?
*/
if (p->start >= addr + size)
continue;
if (p->end < addr)
continue;
if (p->start <= addr && (p->end >= addr + size - 1))
continue;
printk(KERN_WARNING "resource map sanity check conflict: "
"0x%llx 0x%llx 0x%llx 0x%llx %s\n",
(unsigned long long)addr,
(unsigned long long)(addr + size - 1),
(unsigned long long)p->start,
(unsigned long long)p->end,
p->name);
err = -1;
break;
}
read_unlock(&resource_lock);
return err;
}

View File

@ -46,7 +46,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat); EXPORT_SYMBOL(irq_stat);
#endif #endif
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
@ -205,7 +205,18 @@ restart:
do { do {
if (pending & 1) { if (pending & 1) {
int prev_count = preempt_count();
h->action(h); h->action(h);
if (unlikely(prev_count != preempt_count())) {
printk(KERN_ERR "huh, entered softirq %d %p"
"with preempt_count %08x,"
" exited with %08x?\n", h - softirq_vec,
h->action, prev_count, preempt_count());
preempt_count() = prev_count;
}
rcu_bh_qsctr_inc(cpu); rcu_bh_qsctr_inc(cpu);
} }
h++; h++;

View File

@ -270,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle)
next_jiffies = get_next_timer_interrupt(last_jiffies); next_jiffies = get_next_timer_interrupt(last_jiffies);
delta_jiffies = next_jiffies - last_jiffies; delta_jiffies = next_jiffies - last_jiffies;
if (rcu_needs_cpu(cpu)) if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
delta_jiffies = 1; delta_jiffies = 1;
/* /*
* Do not stop the tick, if we are only one off * Do not stop the tick, if we are only one off

View File

@ -978,6 +978,7 @@ void update_process_times(int user_tick)
run_local_timers(); run_local_timers();
if (rcu_pending(cpu)) if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick); rcu_check_callbacks(cpu, user_tick);
printk_tick();
scheduler_tick(); scheduler_tick();
run_posix_cpu_timers(p); run_posix_cpu_timers(p);
} }

View File

@ -187,6 +187,9 @@ config RESOURCES_64BIT
help help
This option allows memory and IO resources to be 64 bit. This option allows memory and IO resources to be 64 bit.
config PHYS_ADDR_T_64BIT
def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
config ZONE_DMA_FLAG config ZONE_DMA_FLAG
int int
default "0" if !ZONE_DMA default "0" if !ZONE_DMA

View File

@ -1100,8 +1100,9 @@ page_ok:
page_not_up_to_date: page_not_up_to_date:
/* Get exclusive access to the page ... */ /* Get exclusive access to the page ... */
if (lock_page_killable(page)) error = lock_page_killable(page);
goto readpage_eio; if (unlikely(error))
goto readpage_error;
page_not_up_to_date_locked: page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */ /* Did it get truncated before we got the lock? */
@ -1130,8 +1131,9 @@ readpage:
} }
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
if (lock_page_killable(page)) error = lock_page_killable(page);
goto readpage_eio; if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
if (page->mapping == NULL) { if (page->mapping == NULL) {
/* /*
@ -1143,15 +1145,14 @@ readpage:
} }
unlock_page(page); unlock_page(page);
shrink_readahead_size_eio(filp, ra); shrink_readahead_size_eio(filp, ra);
goto readpage_eio; error = -EIO;
goto readpage_error;
} }
unlock_page(page); unlock_page(page);
} }
goto page_ok; goto page_ok;
readpage_eio:
error = -EIO;
readpage_error: readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */ /* UHHUH! A synchronous read error occurred. Report it */
desc->error = error; desc->error = error;