From ef1fd2df85f5ea8ca8876a45d0ebb152d3a144d1 Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Thu, 31 Mar 2011 12:31:09 +0530 Subject: [PATCH 01/11] powerpc/85xx: Don't add disabled PCIe devices PCIe nodes with the property status="disabled" are not usable and so avoid adding "disabled" PCIe bridge with the system. Signed-off-by: Prabhakar Kushwaha Signed-off-by: Kumar Gala --- arch/powerpc/sysdev/fsl_pci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index f8f7f28c6343..68ca9290df94 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) struct resource rsrc; const int *bus_range; + if (!of_device_is_available(dev)) { + pr_warning("%s: disabled\n", dev->full_name); + return -ENODEV; + } + pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ From 07d9fce24d871785dbd25458469032fea73f17b8 Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Wed, 6 Apr 2011 12:56:29 +0530 Subject: [PATCH 02/11] powerpc: Check device status before adding serial device serial port nodes with the property status="disabled" are not usable and so avoid adding "disabled" port with the system. Signed-off-by: Prabhakar Kushwaha Signed-off-by: Kumar Gala --- arch/powerpc/kernel/legacy_serial.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c834757bebc0..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) if (!parent) continue; if (of_match_node(legacy_serial_parents, parent) != NULL) { - index = add_legacy_soc_port(np, np); - if (index >= 0 && np == stdout) - legacy_serial_console = index; + if (of_device_is_available(np)) { + index = add_legacy_soc_port(np, np); + if (index >= 0 && np == stdout) + legacy_serial_console = index; + } } of_node_put(parent); } From 11ed0db9f6c7811233632d2ab79c50c011b89902 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 6 Apr 2011 00:11:06 -0500 Subject: [PATCH 03/11] powerpc/book3e: Fix CPU feature handling on 64-bit e5500 The CPU_FTRS_POSSIBLE and CPU_FTRS_ALWAYS defines did not encompass e5500 CPU features when built for 64-bit. This causes issues with cpu_has_feature() as it utilizes the POSSIBLE & ALWAYS defines as part of its check. Create a unique CPU_FTRS_E5500 (as its different from CPU_FTRS_E500MC), created a new group for 64-bit Book3e based CPUs and add CPU_FTRS_E5500 to that group. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/cputable.h | 13 +++++++++++++ arch/powerpc/kernel/cputable.c | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index be3cdf9134ce..f1fbf6092ed2 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -386,6 +386,9 @@ extern const char *powerpc_base_platform; CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL) +#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ @@ -435,11 +438,15 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500) +#else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) +#endif #else enum { CPU_FTRS_POSSIBLE = @@ -473,16 +480,21 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | + CPU_FTRS_E5500 | #endif 0, }; #endif /* __powerpc64__ */ #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500) +#else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) +#endif #else enum { CPU_FTRS_ALWAYS = @@ -513,6 +525,7 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & + CPU_FTRS_E5500 & #endif CPU_FTRS_POSSIBLE, }; diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c9b68d07ac4f..b9602ee06deb 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", - .cpu_features = CPU_FTRS_E500MC, + .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, From d51ad91535b75c043f074f093ef913fe20ff2b5e Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Thu, 27 May 2010 17:35:12 -0500 Subject: [PATCH 04/11] powerpc/e500mc: Remove CPU_FTR_MAYBE_CAN_NAP/CPU_FTR_MAYBE_CAN_DOZE e500mc does not support the HID0/MSR mechanism that is used by e500_idle (and there are also issues with waking on certain types of interrupts). Further, even if napping is never actually enabled, just having CPU_FTR_CAN_NAP will cause machine_init() to overwrite the board's supplied ppc_md.power_save(). We drop CPU_FTR_MAYBE_CAN_DOZE becuase we should use 'wait' instead on e500mc. Signed-off-by: Scott Wood Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/cputable.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index f1fbf6092ed2..1833d1a07e79 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -382,8 +382,7 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL) #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ From e5462d16f76ad7a9156a82a97fbafba298da9ca6 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 8 Apr 2011 04:20:54 -0500 Subject: [PATCH 05/11] powerpc/85xx: disable Suspend support if SMP enabled We currently dont have CPU Hotplug support working on 85xx so we need to disable Suspsend support as it will force enabling of CPU Hotplug. arch/powerpc/kernel/built-in.o: In function `cpu_die': arch/powerpc/kernel/smp.c:702: undefined reference to `start_secondary_resume' make: *** [.tmp_vmlinux1] Error 1 Signed-off-by: Kumar Gala --- arch/powerpc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b6ff882f695b..8f4d50b0adfa 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE config ARCH_SUSPEND_POSSIBLE def_bool y depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ - PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x + (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x config PPC_DCR_NATIVE bool From 7c7a81b53e581d727d069cc45df5510516faac31 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 13 Apr 2011 06:30:08 +0000 Subject: [PATCH 06/11] powerpc/kexec: Fix regression causing compile failure on UP Recent commit b987812b3fcaf70fdf0037589e5d2f5f2453e6ce caused a compile failure on UP because a considerably large block of the file was included within CONFIG_SMP, hence making a stub function not exposed on UP builds when it needed to be. Relocate the stub to the #else /* ! CONFIG_SMP */ section and also annotate the relevant else/endif so that nobody else falls into the same trap I did. Reported-by: Michael Guntsche Signed-off-by: Paul Gortmaker Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/crash.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d3d416339dd..5b5e1f002a8e 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) } /* wait for all the CPUs to hit real mode but timeout if they don't come in */ -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) +#ifdef CONFIG_PPC_STD_MMU_64 static void crash_kexec_wait_realmode(int cpu) { unsigned int msecs; @@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } -#else -static inline void crash_kexec_wait_realmode(int cpu) {} -#endif +#endif /* CONFIG_PPC_STD_MMU_64 */ /* * This function will be called by secondary cpus or by kexec cpu @@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs) crash_ipi_callback(regs); } -#else +#else /* ! CONFIG_SMP */ +static inline void crash_kexec_wait_realmode(int cpu) {} + static void crash_kexec_prepare_cpus(int cpu) { /* @@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs) { cpus_in_sr = CPU_MASK_NONE; } -#endif +#endif /* CONFIG_SMP */ /* * Register a function to be called on shutdown. Only use this if you From 127493d5dc73589cbe00ea5ec8357cc2a4c0d82a Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Wed, 13 Apr 2011 19:45:59 +0000 Subject: [PATCH 07/11] powerpc/pseries: Use a kmem cache for DTL buffers PAPR specifies that DTL buffers can not cross AMS environments (aka CMO in the PAPR) and can not cross a memory entitlement granule boundary (4k). This is found in section 14.11.3.2 H_REGISTER_VPA of the PAPR. kmalloc does not guarantee an alignment of the allocation, though, beyond 8 bytes (at least in my understanding). Create a special kmem cache for DTL buffers with the alignment requirement. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/pseries/setup.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 000724149089..6c42cfde8415 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void) int cpu, ret; struct paca_struct *pp; struct dtl_entry *dtl; + struct kmem_cache *dtl_cache; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; + dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, NULL); + if (!dtl_cache) { + pr_warn("Failed to create dispatch trace log buffer cache\n"); + pr_warn("Stolen time statistics will be unreliable\n"); + return 0; + } + for_each_possible_cpu(cpu) { pp = &paca[cpu]; - dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, - cpu_to_node(cpu)); + dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", cpu); From 84ffae55af79d7b8834fd0c08d0d1ebf2c77f91e Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 7 Apr 2011 21:44:21 +0000 Subject: [PATCH 08/11] powerpc: Fix oops if scan_dispatch_log is called too early We currently enable interrupts before the dispatch log for the boot cpu is setup. If a timer interrupt comes in early enough we oops in scan_dispatch_log: Unable to handle kernel paging request for data at address 0x00000010 ... .scan_dispatch_log+0xb0/0x170 .account_system_vtime+0xa0/0x220 .irq_enter+0x88/0xc0 .do_IRQ+0x48/0x230 The patch below adds a check to scan_dispatch_log to ensure the dispatch log has been allocated. Signed-off-by: Anton Blanchard Cc: Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/time.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 375480c56eb9..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) u64 stolen = 0; u64 dtb; + if (!dtl) + return 0; + if (i == vpa->dtl_idx) return 0; while (i < vpa->dtl_idx) { From 09597cfe93d3cc2c6e064a3ead5956b882511560 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 14 Apr 2011 23:49:53 +0000 Subject: [PATCH 09/11] powerpc: Don't write protect kernel text with CONFIG_DYNAMIC_FTRACE enabled This problem was noticed on an MPC855T platform. Ftrace did oops when trying to write to the kernel text segment. Many thanks to Joakim for finding the root cause of this problem. Signed-off-by: Stefan Roese Cc: Joakim Tjernlund Cc: Benjamin Herrenschmidt Cc: Steven Rostedt Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pte-common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 811f04ac3660..8d1569c29042 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); * on platforms where such control is possible. */ #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) #define PAGE_KERNEL_TEXT PAGE_KERNEL_X #else #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX From 86c74ab317c1ef4d37325e0d7ca8a01a796b0bd7 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 15 Apr 2011 08:12:30 +0000 Subject: [PATCH 10/11] powerpc/perf_event: Skip updating kernel counters if register value shrinks Because of speculative event roll back, it is possible for some event coutners to decrease between reads on POWER7. This causes a problem with the way that counters are updated. Delta calues are calculated in a 64 bit value and the top 32 bits are masked. If the register value has decreased, this leaves us with a very large positive value added to the kernel counters. This patch protects against this by skipping the update if the delta would be negative. This can lead to a lack of precision in the coutner values, but from my testing the value is typcially fewer than 10 samples at a time. Signed-off-by: Eric B Munson Cc: stable@kernel.org Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/perf_event.c | 37 ++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c4063b7f49a0..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], return 0; } +static u64 check_and_compute_delta(u64 prev, u64 val) +{ + u64 delta = (val - prev) & 0xfffffffful; + + /* + * POWER7 can roll back counter values, if the new value is smaller + * than the previous value it will cause the delta and the counter to + * have bogus values unless we rolled a counter over. If a coutner is + * rolled back, it will be smaller, but within 256, which is the maximum + * number of events to rollback at once. If we dectect a rollback + * return 0. This can lead to a small lack of precision in the + * counters. + */ + if (prev > val && (prev - val) < 256) + delta = 0; + + return delta; +} + static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + delta = check_and_compute_delta(prev, val); + if (!delta) + return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - local64_add(delta, &event->count); + delta = check_and_compute_delta(prev, val); + if (delta) + local64_add(delta, &event->count); } } @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; - u64 val; + u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; - local64_set(&event->hw.prev_count, val); + prev = local64_read(&event->hw.prev_count); + if (check_and_compute_delta(prev, val)) + local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); - delta = (val - prev) & 0xfffffffful; + delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* From 7b84b29b8c2711fe64e0dba4db22f02ce0f16015 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 18 Apr 2011 15:46:35 +1000 Subject: [PATCH 11/11] powerpc/powermac: Build fix with SMP and CPU hotplug Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powermac/smp.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index a830c5e80657..bc5f0dc6ae1e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) mpic_setup_this_cpu(); } +#ifdef CONFIG_PPC64 #ifdef CONFIG_HOTPLUG_CPU static int smp_core99_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { static void __init smp_core99_bringup_done(void) { -#ifdef CONFIG_PPC64 extern void g5_phy_disable_cpu1(void); /* Close i2c bus if it was used for tb sync */ @@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void) set_cpu_present(1, false); g5_phy_disable_cpu1(); } -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_HOTPLUG_CPU register_cpu_notifier(&smp_core99_cpu_nb); #endif + if (ppc_md.progress) ppc_md.progress("smp_core99_bringup_done", 0x349); } +#endif /* CONFIG_PPC64 */ #ifdef CONFIG_HOTPLUG_CPU @@ -975,7 +975,9 @@ static void pmac_cpu_die(void) struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, +#ifdef CONFIG_PPC64 .bringup_done = smp_core99_bringup_done, +#endif .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase,