arch/csky patches for 6.0-rc1

The pull request we've done:
  - Add jump-label
  - Add qspinlock
  - Enable ARCH_INLINE_READ*/WRITE*/SPIN*
  - Some fixups and a coding convention
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEE2KAv+isbWR/viAKHAXH1GYaIxXsFAmLrPFoSHGd1b3JlbkBr
 ZXJuZWwub3JnAAoJEAFx9RmGiMV737kQAI7SZ6LhbD/7i900RxMTNf4cZdb5t5XF
 wX3ajKB6uUaYRvB9SPotUsqulw7divDJiTrGWlJXMa//us0Z48AzIp15ampXtp64
 tJ9PSl/BHYWAXbuNbl43qMkDtgNGWp7beTLjsV0dWr9o1AbSV+6ae3qA1xdizLpQ
 DDnl3EwvuQKPJuWV37ewr3wl+X5Mq/BpgA2yhCBZebo10ob+YusYo9vzTQ1ywW13
 aIi3ROZI0NanX0pozcHpr2grK3UADp/Ut7nZ6udGs9pl7eDeRsTm8Zxgh+W5B6iO
 LvR6VXMCAlyNcs/y4iGyh01lC9B5z4fka5eeSY0/7wwmJOTiLwxCl4e72lA9Tme7
 hHMRZYTez2GGxpBo+69hx16nkQ/I95bHLtJGpyuMkyBhm4QWTF3WPHqljS6klfeO
 N6uPzQyhQfJVKJpe44H4hGE6y0KHbvkYEkRnSsST6Uc4hh5Ie8WBbtJo3zvjQfvp
 +ub1zBqu9lfLGs1beNSJYmI/A19lIEllFXT28Ni/itRrzaR5pYXGh7bp3SMxjOmt
 gwRhQax4WQzZh+NIu72/Di3H5MWcW1sU2zazO9FA7uiibfyzeoUWaLuiqU1Zef76
 rSduc2v23dg87sX0ZegFPiATZFZfaLve5l0qru8m35rk0rSdzgevdfBL6wqiosib
 5+yH6Jsif8vf
 =rQLI
 -----END PGP SIGNATURE-----

Merge tag 'csky-for-linus-6.0-rc1' of https://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:

 - Add jump-label implementation

 - Add qspinlock support

 - Enable ARCH_INLINE_READ*/WRITE*/SPIN*

 - Some fixups and a coding convention

* tag 'csky-for-linus-6.0-rc1' of https://github.com/c-sky/csky-linux:
  csky: abiv1: Fixup compile error
  csky: cmpxchg: Coding convention for BUILD_BUG()
  csky: Enable ARCH_INLINE_READ*/WRITE*/SPIN*
  csky: Add qspinlock support
  csky: Add jump-label implementation
  csky: Move HEAD_TEXT_SECTION out of __init_begin-end
  csky: Correct position of _stext
  csky: Use the bitmap API to allocate bitmaps
  csky/kprobe: reclaim insn_slot on kprobe unregistration
This commit is contained in:
Linus Torvalds 2022-08-04 15:27:20 -07:00
commit 7df9075e23
14 changed files with 211 additions and 20 deletions

View File

@ -8,6 +8,33 @@ config CSKY
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
@ -40,6 +67,8 @@ config CSKY
select GX6605S_TIMER if CPU_CK610
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !CPU_CK610
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_CONTEXT_TRACKING_USER

View File

@ -6,4 +6,10 @@
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
#endif /* __ABI_CSKY_STRING_H */

View File

@ -3,10 +3,10 @@ generic-y += asm-offsets.h
generic-y += extable.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += spinlock.h
generic-y += spinlock_types.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
generic-y += qspinlock.h
generic-y += parport.h
generic-y += user.h
generic-y += vmlinux.lds.h

View File

@ -4,10 +4,9 @@
#define __ASM_CSKY_CMPXCHG_H
#ifdef CONFIG_SMP
#include <linux/bug.h>
#include <asm/barrier.h>
extern void __bad_xchg(void);
#define __xchg_relaxed(new, ptr, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
@ -15,6 +14,26 @@ extern void __bad_xchg(void);
__typeof__(*(ptr)) __ret; \
unsigned long tmp; \
switch (size) { \
case 2: { \
u32 ret; \
u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \
u32 mask = 0xffff << shif; \
__ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \
__asm__ __volatile__ ( \
"1: ldex.w %0, (%4)\n" \
" and %1, %0, %2\n" \
" or %1, %1, %3\n" \
" stex.w %1, (%4)\n" \
" bez %1, 1b\n" \
: "=&r" (ret), "=&r" (tmp) \
: "r" (~mask), \
"r" ((u32)__new << shif), \
"r" (__ptr) \
: "memory"); \
__ret = (__typeof__(*(ptr))) \
((ret & mask) >> shif); \
break; \
} \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@ -26,7 +45,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
__bad_xchg(); \
BUILD_BUG(); \
} \
__ret; \
})
@ -56,7 +75,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
__bad_xchg(); \
BUILD_BUG(); \
} \
__ret; \
})
@ -87,7 +106,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
__bad_xchg(); \
BUILD_BUG(); \
} \
__ret; \
})
@ -119,7 +138,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
__bad_xchg(); \
BUILD_BUG(); \
} \
__ret; \
})

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CSKY_JUMP_LABEL_H
#define __ASM_CSKY_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 4
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto(
"1: nop32 \n"
" .pushsection __jump_table, \"aw\" \n"
" .align 2 \n"
" .long 1b - ., %l[label] - . \n"
" .long %0 - . \n"
" .popsection \n"
: : "i"(&((char *)key)[branch]) : : label);
return false;
label:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
asm_volatile_goto(
"1: bsr32 %l[label] \n"
" .pushsection __jump_table, \"aw\" \n"
" .align 2 \n"
" .long 1b - ., %l[label] - . \n"
" .long %0 - . \n"
" .popsection \n"
: : "i"(&((char *)key)[branch]) : : label);
return false;
label:
return true;
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_JUMP_LABEL_H */

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SECTIONS_H
#define __ASM_SECTIONS_H
#include <asm-generic/sections.h>
extern char _start[];
#endif /* __ASM_SECTIONS_H */

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_SPINLOCK_H
#define __ASM_CSKY_SPINLOCK_H
#include <asm/qspinlock.h>
#include <asm/qrwlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
#endif /* __ASM_CSKY_SPINLOCK_H */

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
#define __ASM_CSKY_SPINLOCK_TYPES_H
#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */

View File

@ -13,6 +13,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)

View File

@ -0,0 +1,54 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#define NOP32_HI 0xc400
#define NOP32_LO 0x4820
#define BSR_LINK 0xe000
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
unsigned long addr = jump_entry_code(entry);
u16 insn[2];
int ret = 0;
if (type == JUMP_LABEL_JMP) {
long offset = jump_entry_target(entry) - jump_entry_code(entry);
if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864))
return;
offset = offset >> 1;
insn[0] = BSR_LINK |
((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
insn[1] = (uint16_t)((unsigned long) offset & 0xffff);
} else {
insn[0] = NOP32_HI;
insn[1] = NOP32_LO;
}
ret = copy_to_kernel_nofault((void *)addr, insn, 4);
WARN_ON(ret);
flush_icache_range(addr, addr + 4);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* We use the same instructions in the arch_static_branch and
* arch_static_branch_jump inline functions, so there's no
* need to patch them up here.
* The core will call arch_jump_label_transform when those
* instructions need to be replaced.
*/
arch_jump_label_transform(entry, type);
}

View File

@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.api.insn) {
free_insn_slot(p->ainsn.api.insn, 0);
p->ainsn.api.insn = NULL;
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)

View File

@ -31,7 +31,7 @@ static void __init csky_memblock_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
signed long size;
memblock_reserve(__pa(_stext), _end - _stext);
memblock_reserve(__pa(_start), _end - _start);
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
@ -78,7 +78,7 @@ void __init setup_arch(char **cmdline_p)
pr_info("Phys. mem: %ldMB\n",
(unsigned long) memblock_phys_mem_size()/1024/1024);
setup_initial_init_mm(_stext, _etext, _edata, _end);
setup_initial_init_mm(_start, _etext, _edata, _end);
parse_early_param();

View File

@ -22,17 +22,13 @@ SECTIONS
{
. = PAGE_OFFSET + PHYS_OFFSET_OFFSET;
_stext = .;
__init_begin = .;
_start = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
_stext = .;
VBR_BASE
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@ -48,7 +44,12 @@ SECTIONS
/* __init_begin __init_end must be page aligned for free_initmem */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
_sdata = .;
RO_DATA(PAGE_SIZE)

View File

@ -27,7 +27,7 @@ static void flush_context(struct asid_info *info)
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
bitmap_zero(info->map, NUM_CTXT_ASIDS(info));
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
@ -178,8 +178,7 @@ int asid_allocator_init(struct asid_info *info,
*/
WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
sizeof(*info->map), GFP_KERNEL);
info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL);
if (!info->map)
return -ENOMEM;