execve updates for v6.11-rc1

- Use value of kernel.randomize_va_space once per exec (Alexey Dobriyan)
 
 - Honor PT_LOAD alignment for static PIE
 
 - Make bprm->argmin only visible under CONFIG_MMU
 
 - Add KUnit testing of bprm_stack_limits()
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEpcP2jyKd1g9yPm4TiXL039xtwCYFAmaVTFAACgkQiXL039xt
 wCYyZw//ZcPV2hu48WqqOImL8LI9HIUaZqKQpixGQRD5VcTRb5MKg8g3Wi4EBHz+
 Kg6QvTEOQdg6NbfE9fH8VIIwcp3dAxdWN6g+3A0HHDSRdb8Ye1ucnzB2kgmEkM1l
 huBRn5tnoS0vn2fxafu1O5tj330kKAvTsemsy316cxmbKNs7ckHdfwuVgZHcuyEt
 OrOA3ZSTWwjkSiA9tatsi5iAQ34tQYGwDEosf06avlnPkQqsRzn3wNlohAPjQF6V
 kjRfX/Mxz2EHa0mjXy2OkhNyPSn6wu0OcmF0ympySHzxm726uRggG+olT5ziUc+2
 DW6Gz6TJ1P8Gu+uTEoz6AY+l5Bpo9ZLYSBm+Mp88sxAT6+Xcc68XeZsFZHmefJzs
 6g6EdmwhDEP/Xd3sIsNphdkS5q1RMgc7tdAtyK8GCaACsHUlU4CfzRYh2mWxpIg6
 hA7oM5KF9FuToLtaIS6K/yYQIVsTKAaA7t+5K/a1RUyKzcJ0O7UpMx1oEge2sPEK
 RnETCYhQs0Cxm11iJ/eqEFzWm0Puxjsjz/P/j5H5U8usx9VUoz0HuS91fNEIY3S9
 y7bn09wxuUv4QddKYgltkurxCCB//Nv7jPYo96pKIW3T56XkfsrYLvNH2W95cCNz
 OMvZImA1J/vQubSODrgeQsfMRsaJodHU3acWyYQ90HmmoWx4JS4=
 =bO7x
 -----END PGP SIGNATURE-----

Merge tag 'execve-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull execve updates from Kees Cook:

 - Use value of kernel.randomize_va_space once per exec (Alexey
   Dobriyan)

 - Honor PT_LOAD alignment for static PIE

 - Make bprm->argmin only visible under CONFIG_MMU

 - Add KUnit testing of bprm_stack_limits()

* tag 'execve-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  exec: Avoid pathological argc, envc, and bprm->p values
  execve: Keep bprm->argmin behind CONFIG_MMU
  ELF: fix kernel.randomize_va_space double read
  exec: Add KUnit test for bprm_stack_limits()
  binfmt_elf: Honor PT_LOAD alignment for static PIE
  binfmt_elf: Calculate total_size earlier
  selftests/exec: Build both static and non-static load_address tests
This commit is contained in:
Linus Torvalds 2024-07-16 12:59:20 -07:00
commit 72fda6c8e5
8 changed files with 337 additions and 70 deletions

View File

@ -8285,7 +8285,9 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
F: Documentation/userspace-api/ELF.rst
F: fs/*binfmt_*.c
F: fs/Kconfig.binfmt
F: fs/exec.c
F: fs/exec_test.c
F: include/linux/binfmts.h
F: include/linux/elf.h
F: include/uapi/linux/binfmts.h

View File

@ -176,4 +176,12 @@ config COREDUMP
certainly want to say Y here. Not necessary on systems that never
need debugging or only ever run flawless code.
config EXEC_KUNIT_TEST
bool "Build execve tests" if !KUNIT_ALL_TESTS
depends on KUNIT=y
default KUNIT_ALL_TESTS
help
This builds the exec KUnit tests, which tests boundary conditions
of various aspects of the exec internals.
endmenu

View File

@ -1003,7 +1003,8 @@ out_free_interp:
if (elf_read_implies_exec(*elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
@ -1061,47 +1062,7 @@ out_free_interp:
* Header for ET_DYN binaries to calculate the
* randomization (load_bias) for all the LOAD
* Program Headers.
*
* There are effectively two types of ET_DYN
* binaries: programs (i.e. PIE: ET_DYN with INTERP)
* and loaders (ET_DYN without INTERP, since they
* _are_ the ELF interpreter). The loaders must
* be loaded away from programs since the program
* may otherwise collide with the loader (especially
* for ET_EXEC which does not have a randomized
* position). For example to handle invocations of
* "./ld.so someprog" to test out a new version of
* the loader, the subsequent program that the
* loader loads must avoid the loader itself, so
* they cannot share the same load range. Sufficient
* room for the brk must be allocated with the
* loader as well, since brk must be available with
* the loader.
*
* Therefore, programs are loaded offset from
* ELF_ET_DYN_BASE and loaders are loaded into the
* independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
if (interpreter) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
} else
load_bias = 0;
/*
* Since load_bias is used for all subsequent loading
* calculations, we must lower it by the first vaddr
* so that the remaining calculations based on the
* ELF vaddrs will be correctly offset. The result
* is then page aligned.
*/
load_bias = ELF_PAGESTART(load_bias - vaddr);
/*
* Calculate the entire size of the ELF mapping
@ -1127,6 +1088,80 @@ out_free_interp:
retval = -EINVAL;
goto out_free_dentry;
}
/* Calculate any requested alignment. */
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
/*
* There are effectively two types of ET_DYN
* binaries: programs (i.e. PIE: ET_DYN with PT_INTERP)
* and loaders (ET_DYN without PT_INTERP, since they
* _are_ the ELF interpreter). The loaders must
* be loaded away from programs since the program
* may otherwise collide with the loader (especially
* for ET_EXEC which does not have a randomized
* position). For example to handle invocations of
* "./ld.so someprog" to test out a new version of
* the loader, the subsequent program that the
* loader loads must avoid the loader itself, so
* they cannot share the same load range. Sufficient
* room for the brk must be allocated with the
* loader as well, since brk must be available with
* the loader.
*
* Therefore, programs are loaded offset from
* ELF_ET_DYN_BASE and loaders are loaded into the
* independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
if (interpreter) {
/* On ET_DYN with PT_INTERP, we do the ASLR. */
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
/* Adjust alignment as requested. */
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
} else {
/*
* For ET_DYN without PT_INTERP, we rely on
* the architectures's (potentially ASLR) mmap
* base address (via a load_bias of 0).
*
* When a large alignment is requested, we
* must do the allocation at address "0" right
* now to discover where things will load so
* that we can adjust the resulting alignment.
* In this case (load_bias != 0), we can use
* MAP_FIXED_NOREPLACE to make sure the mapping
* doesn't collide with anything.
*/
if (alignment > ELF_MIN_ALIGN) {
load_bias = elf_load(bprm->file, 0, elf_ppnt,
elf_prot, elf_flags, total_size);
if (BAD_ADDR(load_bias)) {
retval = IS_ERR_VALUE(load_bias) ?
PTR_ERR((void*)load_bias) : -EINVAL;
goto out_free_dentry;
}
vm_munmap(load_bias, total_size);
/* Adjust alignment as requested. */
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
} else
load_bias = 0;
}
/*
* Since load_bias is used for all subsequent loading
* calculations, we must lower it by the first vaddr
* so that the remaining calculations based on the
* ELF vaddrs will be correctly offset. The result
* is then page aligned.
*/
load_bias = ELF_PAGESTART(load_bias - vaddr);
}
error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt,
@ -1250,7 +1285,7 @@ out_free_interp:
mm->end_data = end_data;
mm->start_stack = bprm->p;
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
/*
* For architectures with ELF randomization, when executing
* a loader directly (i.e. no interpreter listed in ELF

View File

@ -486,6 +486,35 @@ static int count_strings_kernel(const char *const *argv)
return i;
}
static inline int bprm_set_stack_limit(struct linux_binprm *bprm,
unsigned long limit)
{
#ifdef CONFIG_MMU
/* Avoid a pathological bprm->p. */
if (bprm->p < limit)
return -E2BIG;
bprm->argmin = bprm->p - limit;
#endif
return 0;
}
static inline bool bprm_hit_stack_limit(struct linux_binprm *bprm)
{
#ifdef CONFIG_MMU
return bprm->p < bprm->argmin;
#else
return false;
#endif
}
/*
* Calculate bprm->argmin from:
* - _STK_LIM
* - ARG_MAX
* - bprm->rlim_stack.rlim_cur
* - bprm->argc
* - bprm->envc
* - bprm->p
*/
static int bprm_stack_limits(struct linux_binprm *bprm)
{
unsigned long limit, ptr_size;
@ -505,6 +534,9 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* of argument strings even with small stacks
*/
limit = max_t(unsigned long, limit, ARG_MAX);
/* Reject totally pathological counts. */
if (bprm->argc < 0 || bprm->envc < 0)
return -E2BIG;
/*
* We must account for the size of all the argv and envp pointers to
* the argv and envp strings, since they will also take up space in
@ -518,13 +550,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* argc can never be 0, to keep them from walking envp by accident.
* See do_execveat_common().
*/
ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
if (check_add_overflow(max(bprm->argc, 1), bprm->envc, &ptr_size) ||
check_mul_overflow(ptr_size, sizeof(void *), &ptr_size))
return -E2BIG;
if (limit <= ptr_size)
return -E2BIG;
limit -= ptr_size;
bprm->argmin = bprm->p - limit;
return 0;
return bprm_set_stack_limit(bprm, limit);
}
/*
@ -562,10 +595,8 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
pos = bprm->p;
str += len;
bprm->p -= len;
#ifdef CONFIG_MMU
if (bprm->p < bprm->argmin)
if (bprm_hit_stack_limit(bprm))
goto out;
#endif
while (len > 0) {
int offset, bytes_to_copy;
@ -640,7 +671,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
/* We're going to work our way backwards. */
arg += len;
bprm->p -= len;
if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
if (bprm_hit_stack_limit(bprm))
return -E2BIG;
while (len > 0) {
@ -2203,3 +2234,7 @@ static int __init init_fs_exec_sysctls(void)
fs_initcall(init_fs_exec_sysctls);
#endif /* CONFIG_SYSCTL */
#ifdef CONFIG_EXEC_KUNIT_TEST
#include "exec_test.c"
#endif

141
fs/exec_test.c Normal file
View File

@ -0,0 +1,141 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <kunit/test.h>
struct bprm_stack_limits_result {
struct linux_binprm bprm;
int expected_rc;
unsigned long expected_argmin;
};
static const struct bprm_stack_limits_result bprm_stack_limits_results[] = {
/* Negative argc/envc counts produce -E2BIG */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = INT_MIN, .envc = INT_MIN }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 5, .envc = -1 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = -1, .envc = 10 }, .expected_rc = -E2BIG },
/* The max value of argc or envc is MAX_ARG_STRINGS. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = INT_MAX, .envc = INT_MAX }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = MAX_ARG_STRINGS, .envc = MAX_ARG_STRINGS }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 0, .envc = MAX_ARG_STRINGS }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = MAX_ARG_STRINGS, .envc = 0 }, .expected_rc = -E2BIG },
/*
* On 32-bit system these argc and envc counts, while likely impossible
* to represent within the associated TASK_SIZE, could overflow the
* limit calculation, and bypass the ptr_size <= limit check.
*/
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 0x20000001, .envc = 0x20000001 }, .expected_rc = -E2BIG },
#ifdef CONFIG_MMU
/* Make sure a pathological bprm->p doesn't cause an overflow. */
{ { .p = sizeof(void *), .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 10, .envc = 10 }, .expected_rc = -E2BIG },
#endif
/*
* 0 rlim_stack will get raised to ARG_MAX. With 1 string pointer,
* we should see p - ARG_MAX + sizeof(void *).
*/
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 1, .envc = 0 }, .expected_argmin = ULONG_MAX - ARG_MAX + sizeof(void *)},
/* Validate that argc is always raised to a minimum of 1. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = 0 }, .expected_argmin = ULONG_MAX - ARG_MAX + sizeof(void *)},
/*
* 0 rlim_stack will get raised to ARG_MAX. With pointers filling ARG_MAX,
* we should see -E2BIG. (Note argc is always raised to at least 1.)
*/
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = ARG_MAX / sizeof(void *), .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = ARG_MAX / sizeof(void *) + 1, .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = ARG_MAX / sizeof(void *) }, .expected_rc = -E2BIG },
/* And with one less, we see space for exactly 1 pointer. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = (ARG_MAX / sizeof(void *)) - 1, .envc = 0 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = (ARG_MAX / sizeof(void *)) - 2, },
.expected_argmin = ULONG_MAX - sizeof(void *) },
/* If we raise rlim_stack / 4 to exactly ARG_MAX, nothing changes. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = ARG_MAX / sizeof(void *), .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = ARG_MAX / sizeof(void *) + 1, .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = 0, .envc = ARG_MAX / sizeof(void *) }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = (ARG_MAX / sizeof(void *)) - 1, .envc = 0 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = 0, .envc = (ARG_MAX / sizeof(void *)) - 2, },
.expected_argmin = ULONG_MAX - sizeof(void *) },
/* But raising it another pointer * 4 will provide space for 1 more pointer. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = (ARG_MAX + sizeof(void *)) * 4,
.argc = ARG_MAX / sizeof(void *), .envc = 0 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = (ARG_MAX + sizeof(void *)) * 4,
.argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
/* Raising rlim_stack / 4 to _STK_LIM / 4 * 3 will see more space. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
/* But raising it any further will see no increase. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3 + sizeof(void *)),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * + sizeof(void *)),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
};
static void exec_test_bprm_stack_limits(struct kunit *test)
{
/* Double-check the constants. */
KUNIT_EXPECT_EQ(test, _STK_LIM, SZ_8M);
KUNIT_EXPECT_EQ(test, ARG_MAX, 32 * SZ_4K);
KUNIT_EXPECT_EQ(test, MAX_ARG_STRINGS, 0x7FFFFFFF);
for (int i = 0; i < ARRAY_SIZE(bprm_stack_limits_results); i++) {
const struct bprm_stack_limits_result *result = &bprm_stack_limits_results[i];
struct linux_binprm bprm = result->bprm;
int rc;
rc = bprm_stack_limits(&bprm);
KUNIT_EXPECT_EQ_MSG(test, rc, result->expected_rc, "on loop %d", i);
#ifdef CONFIG_MMU
KUNIT_EXPECT_EQ_MSG(test, bprm.argmin, result->expected_argmin, "on loop %d", i);
#endif
}
}
static struct kunit_case exec_test_cases[] = {
KUNIT_CASE(exec_test_bprm_stack_limits),
{},
};
static struct kunit_suite exec_test_suite = {
.name = "exec",
.test_cases = exec_test_cases,
};
kunit_test_suite(exec_test_suite);

View File

@ -19,13 +19,13 @@ struct linux_binprm {
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
unsigned long vma_pages;
unsigned long argmin; /* rlimit marker for copy_strings() */
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
/* Should an execfd be passed to userspace? */
have_execfd:1,

View File

@ -3,8 +3,13 @@ CFLAGS = -Wall
CFLAGS += -Wno-nonnull
CFLAGS += -D_GNU_SOURCE
ALIGNS := 0x1000 0x200000 0x1000000
ALIGN_PIES := $(patsubst %,load_address.%,$(ALIGNS))
ALIGN_STATIC_PIES := $(patsubst %,load_address.static.%,$(ALIGNS))
ALIGNMENT_TESTS := $(ALIGN_PIES) $(ALIGN_STATIC_PIES)
TEST_PROGS := binfmt_script.py
TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
TEST_GEN_PROGS := execveat non-regular $(ALIGNMENT_TESTS)
TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
@ -28,9 +33,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat
$(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
cp $< $@
chmod -x $@
$(OUTPUT)/load_address_4096: load_address.c
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
$(OUTPUT)/load_address_2097152: load_address.c
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
$(OUTPUT)/load_address_16777216: load_address.c
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
$(OUTPUT)/load_address.0x%: load_address.c
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
-fPIE -pie $< -o $@
$(OUTPUT)/load_address.static.0x%: load_address.c
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
-fPIE -static-pie $< -o $@

View File

@ -5,11 +5,13 @@
#include <link.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "../kselftest.h"
struct Statistics {
unsigned long long load_address;
unsigned long long alignment;
bool interp;
};
int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
@ -26,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
stats->alignment = 0;
for (i = 0; i < info->dlpi_phnum; i++) {
unsigned long long align;
if (info->dlpi_phdr[i].p_type == PT_INTERP) {
stats->interp = true;
continue;
}
if (info->dlpi_phdr[i].p_type != PT_LOAD)
continue;
if (info->dlpi_phdr[i].p_align > stats->alignment)
stats->alignment = info->dlpi_phdr[i].p_align;
align = info->dlpi_phdr[i].p_align;
if (align > stats->alignment)
stats->alignment = align;
}
return 1; // Terminate dl_iterate_phdr.
@ -38,27 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
int main(int argc, char **argv)
{
struct Statistics extracted;
unsigned long long misalign;
struct Statistics extracted = { };
unsigned long long misalign, pow2;
bool interp_needed;
char buf[1024];
FILE *maps;
int ret;
ksft_print_header();
ksft_set_plan(1);
ksft_set_plan(4);
/* Dump maps file for debugging reference. */
maps = fopen("/proc/self/maps", "r");
if (!maps)
ksft_exit_fail_msg("FAILED: /proc/self/maps: %s\n", strerror(errno));
while (fgets(buf, sizeof(buf), maps)) {
ksft_print_msg("%s", buf);
}
fclose(maps);
/* Walk the program headers. */
ret = dl_iterate_phdr(ExtractStatistics, &extracted);
if (ret != 1)
ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n");
if (extracted.alignment == 0)
ksft_exit_fail_msg("FAILED: No alignment found\n");
else if (extracted.alignment & (extracted.alignment - 1))
ksft_exit_fail_msg("FAILED: Alignment is not a power of 2\n");
/* Report our findings. */
ksft_print_msg("load_address=%#llx alignment=%#llx\n",
extracted.load_address, extracted.alignment);
/* If we're named with ".static." we expect no INTERP. */
interp_needed = strstr(argv[0], ".static.") == NULL;
/* Were we built as expected? */
ksft_test_result(interp_needed == extracted.interp,
"%s INTERP program header %s\n",
interp_needed ? "Wanted" : "Unwanted",
extracted.interp ? "seen" : "missing");
/* Did we find an alignment? */
ksft_test_result(extracted.alignment != 0,
"Alignment%s found\n", extracted.alignment ? "" : " NOT");
/* Is the alignment sane? */
pow2 = extracted.alignment & (extracted.alignment - 1);
ksft_test_result(pow2 == 0,
"Alignment is%s a power of 2: %#llx\n",
pow2 == 0 ? "" : " NOT", extracted.alignment);
/* Is the load address aligned? */
misalign = extracted.load_address & (extracted.alignment - 1);
if (misalign)
ksft_exit_fail_msg("FAILED: alignment = %llu, load_address = %llu\n",
extracted.alignment, extracted.load_address);
ksft_test_result(misalign == 0, "Load Address is %saligned (%#llx)\n",
misalign ? "MIS" : "", misalign);
ksft_test_result_pass("Completed\n");
ksft_finished();
}