Currently we don't have any coverage of the syscall ABI so let's add a very dumb test program which sets up register patterns, does a sysscall and then checks that the register state after the syscall matches what we expect. The program is written in an extremely simplistic fashion with the goal of making it easy to verify that it's doing what it thinks it's doing, it is not a model of how one should write actual code. Currently we validate the general purpose, FPSIMD and SVE registers. There are other thing things that could be covered like FPCR and flags registers, these can be covered incrementally - my main focus at the minute is covering the ABI for the SVE registers. The program repeats the tests for all possible SVE vector lengths in case some vector length specific optimisation causes issues, as well as testing FPSIMD only. It tries two syscalls, getpid() and sched_yield(), in an effort to cover both immediate return to userspace and scheduling another task though there are no guarantees which cases will be hit. A new test directory "abi" is added to hold the test, it doesn't seem to fit well into any of the existing directories. Signed-off-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20211210184133.320748-7-broonie@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
		
			
				
	
	
		
			241 lines
		
	
	
		
			5.6 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			241 lines
		
	
	
		
			5.6 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0-only
 | 
						|
// Copyright (C) 2021 ARM Limited.
 | 
						|
//
 | 
						|
// Assembly portion of the syscall ABI test
 | 
						|
 | 
						|
//
 | 
						|
// Load values from memory into registers, invoke a syscall and save the
 | 
						|
// register values back to memory for later checking.  The syscall to be
 | 
						|
// invoked is configured in x8 of the input GPR data.
 | 
						|
//
 | 
						|
// x0:	SVE VL, 0 for FP only
 | 
						|
//
 | 
						|
//	GPRs:	gpr_in, gpr_out
 | 
						|
//	FPRs:	fpr_in, fpr_out
 | 
						|
//	Zn:	z_in, z_out
 | 
						|
//	Pn:	p_in, p_out
 | 
						|
//	FFR:	ffr_in, ffr_out
 | 
						|
 | 
						|
.arch_extension sve
 | 
						|
 | 
						|
.globl do_syscall
 | 
						|
do_syscall:
 | 
						|
	// Store callee saved registers x19-x29 (80 bytes) plus x0 and x1
 | 
						|
	stp	x29, x30, [sp, #-112]!
 | 
						|
	mov	x29, sp
 | 
						|
	stp	x0, x1, [sp, #16]
 | 
						|
	stp	x19, x20, [sp, #32]
 | 
						|
	stp	x21, x22, [sp, #48]
 | 
						|
	stp	x23, x24, [sp, #64]
 | 
						|
	stp	x25, x26, [sp, #80]
 | 
						|
	stp	x27, x28, [sp, #96]
 | 
						|
 | 
						|
	// Load GPRs x8-x28, and save our SP/FP for later comparison
 | 
						|
	ldr	x2, =gpr_in
 | 
						|
	add	x2, x2, #64
 | 
						|
	ldp	x8, x9, [x2], #16
 | 
						|
	ldp	x10, x11, [x2], #16
 | 
						|
	ldp	x12, x13, [x2], #16
 | 
						|
	ldp	x14, x15, [x2], #16
 | 
						|
	ldp	x16, x17, [x2], #16
 | 
						|
	ldp	x18, x19, [x2], #16
 | 
						|
	ldp	x20, x21, [x2], #16
 | 
						|
	ldp	x22, x23, [x2], #16
 | 
						|
	ldp	x24, x25, [x2], #16
 | 
						|
	ldp	x26, x27, [x2], #16
 | 
						|
	ldr	x28, [x2], #8
 | 
						|
	str	x29, [x2], #8		// FP
 | 
						|
	str	x30, [x2], #8		// LR
 | 
						|
 | 
						|
	// Load FPRs if we're not doing SVE
 | 
						|
	cbnz	x0, 1f
 | 
						|
	ldr	x2, =fpr_in
 | 
						|
	ldp	q0, q1, [x2]
 | 
						|
	ldp	q2, q3, [x2, #16 * 2]
 | 
						|
	ldp	q4, q5, [x2, #16 * 4]
 | 
						|
	ldp	q6, q7, [x2, #16 * 6]
 | 
						|
	ldp	q8, q9, [x2, #16 * 8]
 | 
						|
	ldp	q10, q11, [x2, #16 * 10]
 | 
						|
	ldp	q12, q13, [x2, #16 * 12]
 | 
						|
	ldp	q14, q15, [x2, #16 * 14]
 | 
						|
	ldp	q16, q17, [x2, #16 * 16]
 | 
						|
	ldp	q18, q19, [x2, #16 * 18]
 | 
						|
	ldp	q20, q21, [x2, #16 * 20]
 | 
						|
	ldp	q22, q23, [x2, #16 * 22]
 | 
						|
	ldp	q24, q25, [x2, #16 * 24]
 | 
						|
	ldp	q26, q27, [x2, #16 * 26]
 | 
						|
	ldp	q28, q29, [x2, #16 * 28]
 | 
						|
	ldp	q30, q31, [x2, #16 * 30]
 | 
						|
1:
 | 
						|
 | 
						|
	// Load the SVE registers if we're doing SVE
 | 
						|
	cbz	x0, 1f
 | 
						|
 | 
						|
	ldr	x2, =z_in
 | 
						|
	ldr	z0, [x2, #0, MUL VL]
 | 
						|
	ldr	z1, [x2, #1, MUL VL]
 | 
						|
	ldr	z2, [x2, #2, MUL VL]
 | 
						|
	ldr	z3, [x2, #3, MUL VL]
 | 
						|
	ldr	z4, [x2, #4, MUL VL]
 | 
						|
	ldr	z5, [x2, #5, MUL VL]
 | 
						|
	ldr	z6, [x2, #6, MUL VL]
 | 
						|
	ldr	z7, [x2, #7, MUL VL]
 | 
						|
	ldr	z8, [x2, #8, MUL VL]
 | 
						|
	ldr	z9, [x2, #9, MUL VL]
 | 
						|
	ldr	z10, [x2, #10, MUL VL]
 | 
						|
	ldr	z11, [x2, #11, MUL VL]
 | 
						|
	ldr	z12, [x2, #12, MUL VL]
 | 
						|
	ldr	z13, [x2, #13, MUL VL]
 | 
						|
	ldr	z14, [x2, #14, MUL VL]
 | 
						|
	ldr	z15, [x2, #15, MUL VL]
 | 
						|
	ldr	z16, [x2, #16, MUL VL]
 | 
						|
	ldr	z17, [x2, #17, MUL VL]
 | 
						|
	ldr	z18, [x2, #18, MUL VL]
 | 
						|
	ldr	z19, [x2, #19, MUL VL]
 | 
						|
	ldr	z20, [x2, #20, MUL VL]
 | 
						|
	ldr	z21, [x2, #21, MUL VL]
 | 
						|
	ldr	z22, [x2, #22, MUL VL]
 | 
						|
	ldr	z23, [x2, #23, MUL VL]
 | 
						|
	ldr	z24, [x2, #24, MUL VL]
 | 
						|
	ldr	z25, [x2, #25, MUL VL]
 | 
						|
	ldr	z26, [x2, #26, MUL VL]
 | 
						|
	ldr	z27, [x2, #27, MUL VL]
 | 
						|
	ldr	z28, [x2, #28, MUL VL]
 | 
						|
	ldr	z29, [x2, #29, MUL VL]
 | 
						|
	ldr	z30, [x2, #30, MUL VL]
 | 
						|
	ldr	z31, [x2, #31, MUL VL]
 | 
						|
 | 
						|
	ldr	x2, =ffr_in
 | 
						|
	ldr	p0, [x2, #0]
 | 
						|
	wrffr	p0.b
 | 
						|
 | 
						|
	ldr	x2, =p_in
 | 
						|
	ldr	p0, [x2, #0, MUL VL]
 | 
						|
	ldr	p1, [x2, #1, MUL VL]
 | 
						|
	ldr	p2, [x2, #2, MUL VL]
 | 
						|
	ldr	p3, [x2, #3, MUL VL]
 | 
						|
	ldr	p4, [x2, #4, MUL VL]
 | 
						|
	ldr	p5, [x2, #5, MUL VL]
 | 
						|
	ldr	p6, [x2, #6, MUL VL]
 | 
						|
	ldr	p7, [x2, #7, MUL VL]
 | 
						|
	ldr	p8, [x2, #8, MUL VL]
 | 
						|
	ldr	p9, [x2, #9, MUL VL]
 | 
						|
	ldr	p10, [x2, #10, MUL VL]
 | 
						|
	ldr	p11, [x2, #11, MUL VL]
 | 
						|
	ldr	p12, [x2, #12, MUL VL]
 | 
						|
	ldr	p13, [x2, #13, MUL VL]
 | 
						|
	ldr	p14, [x2, #14, MUL VL]
 | 
						|
	ldr	p15, [x2, #15, MUL VL]
 | 
						|
1:
 | 
						|
 | 
						|
	// Do the syscall
 | 
						|
	svc	#0
 | 
						|
 | 
						|
	// Save GPRs x8-x30
 | 
						|
	ldr	x2, =gpr_out
 | 
						|
	add	x2, x2, #64
 | 
						|
	stp	x8, x9, [x2], #16
 | 
						|
	stp	x10, x11, [x2], #16
 | 
						|
	stp	x12, x13, [x2], #16
 | 
						|
	stp	x14, x15, [x2], #16
 | 
						|
	stp	x16, x17, [x2], #16
 | 
						|
	stp	x18, x19, [x2], #16
 | 
						|
	stp	x20, x21, [x2], #16
 | 
						|
	stp	x22, x23, [x2], #16
 | 
						|
	stp	x24, x25, [x2], #16
 | 
						|
	stp	x26, x27, [x2], #16
 | 
						|
	stp	x28, x29, [x2], #16
 | 
						|
	str	x30, [x2]
 | 
						|
 | 
						|
	// Restore x0 and x1 for feature checks
 | 
						|
	ldp	x0, x1, [sp, #16]
 | 
						|
 | 
						|
	// Save FPSIMD state
 | 
						|
	ldr	x2, =fpr_out
 | 
						|
	stp	q0, q1, [x2]
 | 
						|
	stp	q2, q3, [x2, #16 * 2]
 | 
						|
	stp	q4, q5, [x2, #16 * 4]
 | 
						|
	stp	q6, q7, [x2, #16 * 6]
 | 
						|
	stp	q8, q9, [x2, #16 * 8]
 | 
						|
	stp	q10, q11, [x2, #16 * 10]
 | 
						|
	stp	q12, q13, [x2, #16 * 12]
 | 
						|
	stp	q14, q15, [x2, #16 * 14]
 | 
						|
	stp	q16, q17, [x2, #16 * 16]
 | 
						|
	stp	q18, q19, [x2, #16 * 18]
 | 
						|
	stp	q20, q21, [x2, #16 * 20]
 | 
						|
	stp	q22, q23, [x2, #16 * 22]
 | 
						|
	stp	q24, q25, [x2, #16 * 24]
 | 
						|
	stp	q26, q27, [x2, #16 * 26]
 | 
						|
	stp	q28, q29, [x2, #16 * 28]
 | 
						|
	stp	q30, q31, [x2, #16 * 30]
 | 
						|
 | 
						|
	// Save the SVE state if we have some
 | 
						|
	cbz	x0, 1f
 | 
						|
 | 
						|
	ldr	x2, =z_out
 | 
						|
	str	z0, [x2, #0, MUL VL]
 | 
						|
	str	z1, [x2, #1, MUL VL]
 | 
						|
	str	z2, [x2, #2, MUL VL]
 | 
						|
	str	z3, [x2, #3, MUL VL]
 | 
						|
	str	z4, [x2, #4, MUL VL]
 | 
						|
	str	z5, [x2, #5, MUL VL]
 | 
						|
	str	z6, [x2, #6, MUL VL]
 | 
						|
	str	z7, [x2, #7, MUL VL]
 | 
						|
	str	z8, [x2, #8, MUL VL]
 | 
						|
	str	z9, [x2, #9, MUL VL]
 | 
						|
	str	z10, [x2, #10, MUL VL]
 | 
						|
	str	z11, [x2, #11, MUL VL]
 | 
						|
	str	z12, [x2, #12, MUL VL]
 | 
						|
	str	z13, [x2, #13, MUL VL]
 | 
						|
	str	z14, [x2, #14, MUL VL]
 | 
						|
	str	z15, [x2, #15, MUL VL]
 | 
						|
	str	z16, [x2, #16, MUL VL]
 | 
						|
	str	z17, [x2, #17, MUL VL]
 | 
						|
	str	z18, [x2, #18, MUL VL]
 | 
						|
	str	z19, [x2, #19, MUL VL]
 | 
						|
	str	z20, [x2, #20, MUL VL]
 | 
						|
	str	z21, [x2, #21, MUL VL]
 | 
						|
	str	z22, [x2, #22, MUL VL]
 | 
						|
	str	z23, [x2, #23, MUL VL]
 | 
						|
	str	z24, [x2, #24, MUL VL]
 | 
						|
	str	z25, [x2, #25, MUL VL]
 | 
						|
	str	z26, [x2, #26, MUL VL]
 | 
						|
	str	z27, [x2, #27, MUL VL]
 | 
						|
	str	z28, [x2, #28, MUL VL]
 | 
						|
	str	z29, [x2, #29, MUL VL]
 | 
						|
	str	z30, [x2, #30, MUL VL]
 | 
						|
	str	z31, [x2, #31, MUL VL]
 | 
						|
 | 
						|
	ldr	x2, =p_out
 | 
						|
	str	p0, [x2, #0, MUL VL]
 | 
						|
	str	p1, [x2, #1, MUL VL]
 | 
						|
	str	p2, [x2, #2, MUL VL]
 | 
						|
	str	p3, [x2, #3, MUL VL]
 | 
						|
	str	p4, [x2, #4, MUL VL]
 | 
						|
	str	p5, [x2, #5, MUL VL]
 | 
						|
	str	p6, [x2, #6, MUL VL]
 | 
						|
	str	p7, [x2, #7, MUL VL]
 | 
						|
	str	p8, [x2, #8, MUL VL]
 | 
						|
	str	p9, [x2, #9, MUL VL]
 | 
						|
	str	p10, [x2, #10, MUL VL]
 | 
						|
	str	p11, [x2, #11, MUL VL]
 | 
						|
	str	p12, [x2, #12, MUL VL]
 | 
						|
	str	p13, [x2, #13, MUL VL]
 | 
						|
	str	p14, [x2, #14, MUL VL]
 | 
						|
	str	p15, [x2, #15, MUL VL]
 | 
						|
 | 
						|
	ldr	x2, =ffr_out
 | 
						|
	rdffr	p0.b
 | 
						|
	str	p0, [x2, #0]
 | 
						|
1:
 | 
						|
 | 
						|
	// Restore callee saved registers x19-x30
 | 
						|
	ldp	x19, x20, [sp, #32]
 | 
						|
	ldp	x21, x22, [sp, #48]
 | 
						|
	ldp	x23, x24, [sp, #64]
 | 
						|
	ldp	x25, x26, [sp, #80]
 | 
						|
	ldp	x27, x28, [sp, #96]
 | 
						|
	ldp	x29, x30, [sp], #112
 | 
						|
 | 
						|
	ret
 |