16 hotfixes, 11 of which are cc:stable.

A few nilfs2 fixes, the remainder are for MM: a couple of selftests fixes,
 various singletons fixing various issues in various parts.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZlIOUgAKCRDdBJ7gKXxA
 jrYnAP9UeOw8YchTIsjEllmAbTMAqWGI+54CU/qD78jdIHoVWAEAmp0QqgFW3r2p
 jze4jBkh3lGQjykTjkUskaR71h9AZww=
 =AHeV
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2024-05-25-09-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "16 hotfixes, 11 of which are cc:stable.

  A few nilfs2 fixes, the remainder are for MM: a couple of selftests
  fixes, various singletons fixing various issues in various parts"

* tag 'mm-hotfixes-stable-2024-05-25-09-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm/ksm: fix possible UAF of stable_node
  mm/memory-failure: fix handling of dissolved but not taken off from buddy pages
  mm: /proc/pid/smaps_rollup: avoid skipping vma after getting mmap_lock again
  nilfs2: fix potential hang in nilfs_detach_log_writer()
  nilfs2: fix unexpected freezing of nilfs_segctor_sync()
  nilfs2: fix use-after-free of timer for log writer thread
  selftests/mm: fix build warnings on ppc64
  arm64: patching: fix handling of execmem addresses
  selftests/mm: compaction_test: fix bogus test success and reduce probability of OOM-killer invocation
  selftests/mm: compaction_test: fix incorrect write of zero to nr_hugepages
  selftests/mm: compaction_test: fix bogus test success on Aarch64
  mailmap: update email address for Satya Priya
  mm/huge_memory: don't unpoison huge_zero_folio
  kasan, fortify: properly rename memintrinsics
  lib: add version into /proc/allocinfo output
  mm/vmalloc: fix vmalloc which may return null if called with __GFP_NOFAIL
This commit is contained in:
Linus Torvalds 2024-05-25 15:10:33 -07:00
commit 9b62e02e63
13 changed files with 187 additions and 69 deletions

View File

@ -572,7 +572,7 @@ Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
Sahitya Tummala <quic_stummala@quicinc.com> <stummala@codeaurora.org>
Sathishkumar Muruganandam <quic_murugana@quicinc.com> <murugana@codeaurora.org>
Satya Priya <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
Satya Priya <quic_skakitap@quicinc.com> <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
S.Çağlar Onur <caglar@pardus.org.tr>
Sayali Lokhande <quic_sayalil@quicinc.com> <sayalil@codeaurora.org>
Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>

View File

@ -961,13 +961,14 @@ Provides information about memory allocations at all locations in the code
base. Each allocation in the code is identified by its source file, line
number, module (if originates from a loadable module) and the function calling
the allocation. The number of bytes allocated and number of calls at each
location are reported.
location are reported. The first line indicates the version of the file, the
second line is the header listing fields in the file.
Example output.
::
> sort -rn /proc/allocinfo
> tail -n +3 /proc/allocinfo | sort -rn
127664128 31168 mm/page_ext.c:270 func:alloc_page_ext
56373248 4737 mm/slub.c:2259 func:alloc_slab_page
14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded

View File

@ -36,7 +36,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)
if (image)
page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
else if (IS_ENABLED(CONFIG_EXECMEM))
page = vmalloc_to_page(addr);
else
return addr;

View File

@ -2118,8 +2118,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
{
spin_lock(&sci->sc_state_lock);
if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
sci->sc_timer.expires = jiffies + sci->sc_interval;
add_timer(&sci->sc_timer);
if (sci->sc_task) {
sci->sc_timer.expires = jiffies + sci->sc_interval;
add_timer(&sci->sc_timer);
}
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
}
spin_unlock(&sci->sc_state_lock);
@ -2166,19 +2168,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
struct nilfs_segctor_wait_request wait_req;
int err = 0;
spin_lock(&sci->sc_state_lock);
init_wait(&wait_req.wq);
wait_req.err = 0;
atomic_set(&wait_req.done, 0);
init_waitqueue_entry(&wait_req.wq, current);
/*
* To prevent a race issue where completion notifications from the
* log writer thread are missed, increment the request sequence count
* "sc_seq_request" and insert a wait queue entry using the current
* sequence number into the "sc_wait_request" queue at the same time
* within the lock section of "sc_state_lock".
*/
spin_lock(&sci->sc_state_lock);
wait_req.seq = ++sci->sc_seq_request;
add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
spin_unlock(&sci->sc_state_lock);
init_waitqueue_entry(&wait_req.wq, current);
add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
set_current_state(TASK_INTERRUPTIBLE);
wake_up(&sci->sc_wait_daemon);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
/*
* Synchronize only while the log writer thread is alive.
* Leave flushing out after the log writer thread exits to
* the cleanup work in nilfs_segctor_destroy().
*/
if (!sci->sc_task)
break;
if (atomic_read(&wait_req.done)) {
err = wait_req.err;
break;
@ -2194,7 +2213,7 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
return err;
}
static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
{
struct nilfs_segctor_wait_request *wrq, *n;
unsigned long flags;
@ -2202,7 +2221,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
if (!atomic_read(&wrq->done) &&
nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
(force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
wrq->err = err;
atomic_set(&wrq->done, 1);
}
@ -2320,10 +2339,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
*/
static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
bool thread_is_alive;
spin_lock(&sci->sc_state_lock);
sci->sc_seq_accepted = sci->sc_seq_request;
thread_is_alive = (bool)sci->sc_task;
spin_unlock(&sci->sc_state_lock);
del_timer_sync(&sci->sc_timer);
/*
* This function does not race with the log writer thread's
* termination. Therefore, deleting sc_timer, which should not be
* done after the log writer thread exits, can be done safely outside
* the area protected by sc_state_lock.
*/
if (thread_is_alive)
del_timer_sync(&sci->sc_timer);
}
/**
@ -2340,7 +2370,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
sci->sc_seq_done = sci->sc_seq_accepted;
nilfs_segctor_wakeup(sci, err);
nilfs_segctor_wakeup(sci, err, false);
sci->sc_flush_request = 0;
} else {
if (mode == SC_FLUSH_FILE)
@ -2349,7 +2379,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
time_before(jiffies, sci->sc_timer.expires))
add_timer(&sci->sc_timer);
}
@ -2539,6 +2569,7 @@ static int nilfs_segctor_thread(void *arg)
int timeout = 0;
sci->sc_timer_task = current;
timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
/* start sync. */
sci->sc_task = current;
@ -2606,6 +2637,7 @@ static int nilfs_segctor_thread(void *arg)
end_thread:
/* end sync. */
sci->sc_task = NULL;
timer_shutdown_sync(&sci->sc_timer);
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
@ -2669,7 +2701,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_iput_queue);
INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
@ -2723,6 +2754,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
/*
* Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
* be called from delayed iput() via nilfs_evict_inode() and can race
* with the above log writer thread termination.
*/
nilfs_segctor_wakeup(sci, 0, true);
if (flush_work(&sci->sc_iput_work))
flag = true;
@ -2748,7 +2786,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
down_write(&nilfs->ns_segctor_sem);
timer_shutdown_sync(&sci->sc_timer);
kfree(sci);
}

View File

@ -970,12 +970,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
break;
/* Case 1 and 2 above */
if (vma->vm_start >= last_vma_end)
if (vma->vm_start >= last_vma_end) {
smap_gather_stats(vma, &mss, 0);
last_vma_end = vma->vm_end;
continue;
}
/* Case 4 above */
if (vma->vm_end > last_vma_end)
if (vma->vm_end > last_vma_end) {
smap_gather_stats(vma, &mss, last_vma_end);
last_vma_end = vma->vm_end;
}
}
} for_each_vma(vmi, vma);

View File

@ -75,17 +75,30 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
__ret; \
})
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#if defined(__SANITIZE_ADDRESS__)
#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
#elif defined(CONFIG_KASAN_GENERIC)
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
#else /* CONFIG_KASAN_SW_TAGS */
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
#endif
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
#else
#if defined(__SANITIZE_MEMORY__)
@ -110,6 +123,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#define __underlying_strlen __builtin_strlen
#define __underlying_strncat __builtin_strncat
#define __underlying_strncpy __builtin_strncpy
#endif
/**

View File

@ -16,47 +16,60 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
struct allocinfo_private {
struct codetag_iterator iter;
bool print_header;
};
static void *allocinfo_start(struct seq_file *m, loff_t *pos)
{
struct codetag_iterator *iter;
struct allocinfo_private *priv;
struct codetag *ct;
loff_t node = *pos;
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
m->private = iter;
if (!iter)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
m->private = priv;
if (!priv)
return NULL;
priv->print_header = (node == 0);
codetag_lock_module_list(alloc_tag_cttype, true);
*iter = codetag_get_ct_iter(alloc_tag_cttype);
while ((ct = codetag_next_ct(iter)) != NULL && node)
priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
node--;
return ct ? iter : NULL;
return ct ? priv : NULL;
}
static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
{
struct codetag_iterator *iter = (struct codetag_iterator *)arg;
struct codetag *ct = codetag_next_ct(iter);
struct allocinfo_private *priv = (struct allocinfo_private *)arg;
struct codetag *ct = codetag_next_ct(&priv->iter);
(*pos)++;
if (!ct)
return NULL;
return iter;
return priv;
}
static void allocinfo_stop(struct seq_file *m, void *arg)
{
struct codetag_iterator *iter = (struct codetag_iterator *)m->private;
struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
if (iter) {
if (priv) {
codetag_lock_module_list(alloc_tag_cttype, false);
kfree(iter);
kfree(priv);
}
}
static void print_allocinfo_header(struct seq_buf *buf)
{
/* Output format version, so we can change it. */
seq_buf_printf(buf, "allocinfo - version: 1.0\n");
seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
}
static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
{
struct alloc_tag *tag = ct_to_alloc_tag(ct);
@ -71,13 +84,17 @@ static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
static int allocinfo_show(struct seq_file *m, void *arg)
{
struct codetag_iterator *iter = (struct codetag_iterator *)arg;
struct allocinfo_private *priv = (struct allocinfo_private *)arg;
char *bufp;
size_t n = seq_get_buf(m, &bufp);
struct seq_buf buf;
seq_buf_init(&buf, bufp, n);
alloc_tag_to_text(&buf, iter->ct);
if (priv->print_header) {
print_allocinfo_header(&buf);
priv->print_header = false;
}
alloc_tag_to_text(&buf, priv->iter.ct);
seq_commit(m, seq_buf_used(&buf));
return 0;
}

View File

@ -2153,7 +2153,6 @@ again:
INIT_HLIST_HEAD(&stable_node_dup->hlist);
stable_node_dup->kpfn = kpfn;
folio_set_stable_node(kfolio, stable_node_dup);
stable_node_dup->rmap_hlist_len = 0;
DO_NUMA(stable_node_dup->nid = nid);
if (!need_chain) {
@ -2172,6 +2171,8 @@ again:
stable_node_chain_add_dup(stable_node_dup, stable_node);
}
folio_set_stable_node(kfolio, stable_node_dup);
return stable_node_dup;
}

View File

@ -1221,7 +1221,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
* subpages.
*/
folio_put(folio);
if (__page_handle_poison(p) >= 0) {
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
@ -2091,7 +2091,7 @@ retry:
*/
if (res == 0) {
folio_unlock(folio);
if (__page_handle_poison(p) >= 0) {
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
@ -2546,6 +2546,13 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
if (is_huge_zero_folio(folio)) {
unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
pfn, &unpoison_rs);
ret = -EOPNOTSUPP;
goto unlock_mutex;
}
if (!PageHWPoison(p)) {
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
pfn, &unpoison_rs);

View File

@ -3498,7 +3498,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
{
unsigned int nr_allocated = 0;
gfp_t alloc_gfp = gfp;
bool nofail = false;
bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
@ -3555,12 +3555,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* and compaction etc.
*/
alloc_gfp &= ~__GFP_NOFAIL;
nofail = true;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
if (fatal_signal_pending(current))
if (!nofail && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)

View File

@ -82,12 +82,16 @@ int prereq(void)
return -1;
}
int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
unsigned long initial_nr_hugepages)
{
unsigned long nr_hugepages_ul;
int fd, ret = -1;
int compaction_index = 0;
char initial_nr_hugepages[10] = {0};
char nr_hugepages[10] = {0};
char nr_hugepages[20] = {0};
char init_nr_hugepages[20] = {0};
sprintf(init_nr_hugepages, "%lu", initial_nr_hugepages);
/* We want to test with 80% of available memory. Else, OOM killer comes
in to play */
@ -101,21 +105,6 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
goto out;
}
if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto close_fd;
}
/* Start with the initial condition of 0 huge pages*/
if (write(fd, "0", sizeof(char)) != sizeof(char)) {
ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto close_fd;
}
lseek(fd, 0, SEEK_SET);
/* Request a large number of huge pages. The Kernel will allocate
as much as it can */
if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
@ -134,22 +123,27 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
/* We should have been able to request at least 1/3 rd of the memory in
huge pages */
compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10);
if (!nr_hugepages_ul) {
ksft_print_msg("ERROR: No memory is available as huge pages\n");
goto close_fd;
}
compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
lseek(fd, 0, SEEK_SET);
if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
if (write(fd, init_nr_hugepages, strlen(init_nr_hugepages))
!= strlen(init_nr_hugepages)) {
ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto close_fd;
}
ksft_print_msg("Number of huge pages allocated = %d\n",
atoi(nr_hugepages));
ksft_print_msg("Number of huge pages allocated = %lu\n",
nr_hugepages_ul);
if (compaction_index > 3) {
ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
ksft_print_msg("ERROR: Less than 1/%d of memory is available\n"
"as huge pages\n", compaction_index);
goto close_fd;
}
@ -163,6 +157,41 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
return ret;
}
int set_zero_hugepages(unsigned long *initial_nr_hugepages)
{
int fd, ret = -1;
char nr_hugepages[20] = {0};
fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
if (fd < 0) {
ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto out;
}
if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto close_fd;
}
lseek(fd, 0, SEEK_SET);
/* Start with the initial condition of 0 huge pages */
if (write(fd, "0", sizeof(char)) != sizeof(char)) {
ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto close_fd;
}
*initial_nr_hugepages = strtoul(nr_hugepages, NULL, 10);
ret = 0;
close_fd:
close(fd);
out:
return ret;
}
int main(int argc, char **argv)
{
@ -173,6 +202,7 @@ int main(int argc, char **argv)
unsigned long mem_free = 0;
unsigned long hugepage_size = 0;
long mem_fragmentable_MB = 0;
unsigned long initial_nr_hugepages;
ksft_print_header();
@ -181,6 +211,10 @@ int main(int argc, char **argv)
ksft_set_plan(1);
/* Start the test without hugepages reducing mem_free */
if (set_zero_hugepages(&initial_nr_hugepages))
ksft_exit_fail();
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_MEMLOCK, &lim))
@ -224,7 +258,8 @@ int main(int argc, char **argv)
entry = entry->next;
}
if (check_compaction(mem_free, hugepage_size) == 0)
if (check_compaction(mem_free, hugepage_size,
initial_nr_hugepages) == 0)
ksft_exit_pass();
ksft_exit_fail();

View File

@ -1,3 +1,4 @@
#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>

View File

@ -8,6 +8,7 @@
#define __UFFD_COMMON_H__
#define _GNU_SOURCE
#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <stdio.h>
#include <errno.h>
#include <unistd.h>