arm64 fixes:
- Fix icache/dcache sync for anonymous pages under migration - Correct the ASID limit check - Fix parallel builds of Image and Image.gz - Refuse to hibernate when we have CPUs that we can't offline -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJXasPyAAoJELescNyEwWM0nYAIAJhcPoeaSEgnVGfnh4gAup/F Wu8JLRaibaGGnLxF7Lt00N4+oe/oIi1SIJrPAe7YwzxpLcChP/SvaOBNnIa/PUm1 QC7EuYtDXJnzj483k3Iu5+XXKX5iSdzM1F3YLmFnV1IeScCDCAmSqDCwJ5mXpAOj xFvNvI8P7WAOCKD32kiahm/38lwDgMkIY/DQq6+7li6ZMrDk5W3b6NP+8Og2D3qE mRb/uNLZ3hBe5bDYGyqiBAwHEAmB9u7kFydh2g4gq1IKy17QjEXv4U7hhsW0RyEP VU0ha+fQKIcFMjx2FvMPUuzejoY0SiynF0Z4K48xkhaQQQUxkWIudmLUrFvv5YM= =ABKe -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Here are a few more arm64 fixes, but things do finally appear to be slowing down. The main fix is avoiding hibernation in a previously unanticipated situation where we have CPUs parked in the kernel, but it's all good stuff. - Fix icache/dcache sync for anonymous pages under migration - Correct the ASID limit check - Fix parallel builds of Image and Image.gz - Refuse to hibernate when we have CPUs that we can't offline" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: hibernate: Don't hibernate on systems with stuck CPUs arm64: smp: Add function to determine if cpus are stuck in the kernel arm64: mm: remove page_mapping check in __sync_icache_dcache arm64: fix boot image dependencies to not generate invalid images arm64: update ASID limit
This commit is contained in:
commit
d05be0d7e8
@ -95,7 +95,7 @@ boot := arch/arm64/boot
|
|||||||
Image: vmlinux
|
Image: vmlinux
|
||||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||||
|
|
||||||
Image.%: vmlinux
|
Image.%: Image
|
||||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||||
|
|
||||||
zinstall install:
|
zinstall install:
|
||||||
|
@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
|
|||||||
cpu_park_loop();
|
cpu_park_loop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a secondary CPU enters the kernel but fails to come online,
|
||||||
|
* (e.g. due to mismatched features), and cannot exit the kernel,
|
||||||
|
* we increment cpus_stuck_in_kernel and leave the CPU in a
|
||||||
|
* quiesecent loop within the kernel text. The memory containing
|
||||||
|
* this loop must not be re-used for anything else as the 'stuck'
|
||||||
|
* core is executing it.
|
||||||
|
*
|
||||||
|
* This function is used to inhibit features like kexec and hibernate.
|
||||||
|
*/
|
||||||
|
bool cpus_are_stuck_in_kernel(void);
|
||||||
|
|
||||||
#endif /* ifndef __ASSEMBLY__ */
|
#endif /* ifndef __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* ifndef __ASM_SMP_H */
|
#endif /* ifndef __ASM_SMP_H */
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/pgtable-hwdef.h>
|
#include <asm/pgtable-hwdef.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
#include <asm/smp.h>
|
||||||
#include <asm/suspend.h>
|
#include <asm/suspend.h>
|
||||||
#include <asm/virt.h>
|
#include <asm/virt.h>
|
||||||
|
|
||||||
@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct sleep_stack_data state;
|
struct sleep_stack_data state;
|
||||||
|
|
||||||
|
if (cpus_are_stuck_in_kernel()) {
|
||||||
|
pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
local_dbg_save(flags);
|
local_dbg_save(flags);
|
||||||
|
|
||||||
if (__cpu_suspend_enter(&state)) {
|
if (__cpu_suspend_enter(&state)) {
|
||||||
|
@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool have_cpu_die(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int any_cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
|
if (cpu_ops[any_cpu]->cpu_die)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool cpus_are_stuck_in_kernel(void)
|
||||||
|
{
|
||||||
|
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
|
||||||
|
|
||||||
|
return !!cpus_stuck_in_kernel || smp_spin_tables;
|
||||||
|
}
|
||||||
|
@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|||||||
&asid_generation);
|
&asid_generation);
|
||||||
flush_context(cpu);
|
flush_context(cpu);
|
||||||
|
|
||||||
/* We have at least 1 ASID per CPU, so this will always succeed */
|
/* We have more ASIDs than CPUs, so this will always succeed */
|
||||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
||||||
|
|
||||||
set_asid:
|
set_asid:
|
||||||
@ -227,8 +227,11 @@ switch_mm_fastpath:
|
|||||||
static int asids_init(void)
|
static int asids_init(void)
|
||||||
{
|
{
|
||||||
asid_bits = get_cpu_asid_bits();
|
asid_bits = get_cpu_asid_bits();
|
||||||
/* If we end up with more CPUs than ASIDs, expect things to crash */
|
/*
|
||||||
WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
|
* Expect allocation after rollover to fail if we don't have at least
|
||||||
|
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
|
||||||
|
*/
|
||||||
|
WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
|
||||||
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
|
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
|
||||||
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
|
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
|
|||||||
{
|
{
|
||||||
struct page *page = pte_page(pte);
|
struct page *page = pte_page(pte);
|
||||||
|
|
||||||
/* no flushing needed for anonymous pages */
|
|
||||||
if (!page_mapping(page))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||||
sync_icache_aliases(page_address(page),
|
sync_icache_aliases(page_address(page),
|
||||||
PAGE_SIZE << compound_order(page));
|
PAGE_SIZE << compound_order(page));
|
||||||
|
Loading…
Reference in New Issue
Block a user