21 hotfixes. Thirteen of these address pre-6.1 issues and hence have

the cc:stable tag.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY8XcmAAKCRDdBJ7gKXxA
 jsSsAQC98lXwu4wz+3S7f2Y0u+rwttZ/PlGM3s+37XO50fDtqQEA1XVV3ABWr46M
 XlwiwCtj7tFiM3zT1nLGS+SmOodvogA=
 =WrCJ
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2023-01-16-15-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc hotfixes from Andrew Morton:
 "21 hotfixes. Thirteen of these address pre-6.1 issues and hence have
  the cc:stable tag"

* tag 'mm-hotfixes-stable-2023-01-16-15-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits)
  init/Kconfig: fix typo (usafe -> unsafe)
  nommu: fix split_vma() map_count error
  nommu: fix do_munmap() error path
  nommu: fix memory leak in do_mmap() error path
  MAINTAINERS: update Robert Foss' email address
  proc: fix PIE proc-empty-vm, proc-pid-vm tests
  mm: update mmap_sem comments to refer to mmap_lock
  include/linux/mm: fix release_pages_arg kernel doc comment
  lib/win_minmax: use /* notation for regular comments
  kasan: mark kasan_kunit_executing as static
  nilfs2: fix general protection fault in nilfs_btree_insert()
  Docs/admin-guide/mm/zswap: remove zsmalloc's lack of writeback warning
  mm/hugetlb: pre-allocate pgtable pages for uffd wr-protects
  hugetlb: unshare some PMDs when splitting VMAs
  mm: fix vma->anon_name memory leak for anonymous shmem VMAs
  mm/shmem: restore SHMEM_HUGE_DENY precedence over MADV_COLLAPSE
  mm/MADV_COLLAPSE: don't expand collapse when vm_end is past requested end
  mm/userfaultfd: enable writenotify while userfaultfd-wp is enabled for a VMA
  mm/khugepaged: fix collapse_pte_mapped_thp() to allow anon_vma
  mm/hugetlb: fix uffd-wp handling for migration entries in hugetlb_change_protection()
  ...
This commit is contained in:
Linus Torvalds 2023-01-16 16:36:39 -08:00
commit 6e50979a9c
20 changed files with 146 additions and 88 deletions

View File

@ -371,6 +371,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>

View File

@ -70,9 +70,7 @@ e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs
The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which
means the compression ratio will always be 2:1 or worse (because of half-full
zbud pages). The zsmalloc type zpool has a more complex compressed page
storage method, and it can achieve greater storage densities. However,
zsmalloc does not implement compressed page eviction, so once zswap fills it
cannot evict the oldest page, it can only reject new pages.
storage method, and it can achieve greater storage densities.
When a swap page is passed from frontswap to zswap, zswap maintains a mapping
of the swap entry, a combination of the swap type and swap offset, to the zpool

View File

@ -6948,7 +6948,7 @@ F: drivers/gpu/drm/atmel-hlcdc/
DRM DRIVERS FOR BRIDGE CHIPS
M: Andrzej Hajda <andrzej.hajda@intel.com>
M: Neil Armstrong <neil.armstrong@linaro.org>
M: Robert Foss <robert.foss@linaro.org>
M: Robert Foss <rfoss@kernel.org>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
@ -17238,7 +17238,7 @@ F: Documentation/devicetree/bindings/net/qcom,bam-dmux.yaml
F: drivers/net/wwan/qcom_bam_dmux.c
QUALCOMM CAMERA SUBSYSTEM DRIVER
M: Robert Foss <robert.foss@linaro.org>
M: Robert Foss <rfoss@kernel.org>
M: Todor Tomov <todor.too@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
@ -17318,7 +17318,7 @@ F: drivers/dma/qcom/hidma*
QUALCOMM I2C CCI DRIVER
M: Loic Poulain <loic.poulain@linaro.org>
M: Robert Foss <robert.foss@linaro.org>
M: Robert Foss <rfoss@kernel.org>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained

View File

@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, &bh,
&submit_ptr);
if (ret) {
if (ret != -EEXIST)
return ret;
if (likely(ret == -EEXIST))
goto out_check;
if (ret == -ENOENT) {
/*
* Block address translation failed due to invalid
* value of 'ptr'. In this case, return internal code
* -EINVAL (broken bmap) to notify bmap layer of fatal
* metadata corruption.
*/
ret = -EINVAL;
}
return ret;
}
if (ra) {

View File

@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
return ctx->features & UFFD_FEATURE_INITIALIZED;
}
static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
vm_flags_t flags)
{
const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
vma->vm_flags = flags;
/*
* For shared mappings, we want to enable writenotify while
* userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
* recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
*/
if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
vma_set_page_prot(vma);
}
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
int wake_flags, void *key)
{
@ -618,7 +633,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
for_each_vma(vmi, vma) {
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~__VM_UFFD_FLAGS;
userfaultfd_set_vm_flags(vma,
vma->vm_flags & ~__VM_UFFD_FLAGS);
}
}
mmap_write_unlock(mm);
@ -652,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
octx = vma->vm_userfaultfd_ctx.ctx;
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~__VM_UFFD_FLAGS;
userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
return 0;
}
@ -733,7 +749,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
} else {
/* Drop uffd context if remap feature not enabled */
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~__VM_UFFD_FLAGS;
userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
}
}
@ -895,7 +911,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
prev = vma;
}
vma->vm_flags = new_flags;
userfaultfd_set_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
mmap_write_unlock(mm);
@ -1463,7 +1479,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
userfaultfd_set_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx.ctx = ctx;
if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
@ -1651,7 +1667,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
userfaultfd_set_vm_flags(vma, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
skip:

View File

@ -1270,10 +1270,10 @@ static inline void folio_put_refs(struct folio *folio, int refs)
__folio_put(folio);
}
/**
* release_pages - release an array of pages or folios
/*
* union release_pages_arg - an array of pages or folios
*
* This just releases a simple array of multiple pages, and
* release_pages() releases a simple array of multiple pages, and
* accepts various different forms of said page array: either
* a regular old boring array of pages, an array of folios, or
* an array of encoded page pointers.

View File

@ -413,7 +413,6 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
* Not using anon_vma_name because it generates a warning if mmap_lock
* is not held, which might be the case here.
*/
if (!vma->vm_file)
anon_vma_name_put(vma->anon_name);
}

View File

@ -581,7 +581,7 @@ struct vm_area_struct {
/*
* For private and shared anonymous mappings, a pointer to a null
* terminated string containing the name given to the vma, or NULL if
* unnamed. Serialized by mmap_sem. Use anon_vma_name to access.
* unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
*/
struct anon_vma_name *anon_name;
#endif

View File

@ -301,7 +301,7 @@ static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
*
* You can also use this function if you're holding a lock that prevents
* pages being frozen & removed; eg the i_pages lock for the page cache
* or the mmap_sem or page table lock for page tables. In this case,
* or the mmap_lock or page table lock for page tables. In this case,
* it will always succeed, and you could have used a plain folio_get(),
* but it's sometimes more convenient to have a common function called
* from both locked and RCU-protected contexts.

View File

@ -776,7 +776,7 @@ config PRINTK_SAFE_LOG_BUF_SHIFT
depends on PRINTK
help
Select the size of an alternate printk per-CPU buffer where messages
printed from usafe contexts are temporary stored. One example would
printed from unsafe contexts are temporary stored. One example would
be NMI messages, another one - printk recursion. The messages are
copied to the main log buffer in a safe context to avoid a deadlock.
The value defines the size as a power of 2.

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/**
/*
* lib/minmax.c: windowed min/max tracker
*
* Kathleen Nichols' algorithm for tracking the minimum (or maximum)

View File

@ -94,6 +94,8 @@ static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@ -1181,7 +1183,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
/*
* Reset and decrement one ref on hugepage private reservation.
* Called with mm->mmap_sem writer semaphore held.
* Called with mm->mmap_lock writer semaphore held.
* This function should be only used by move_vma() and operate on
* same sized vma. It should never come here with last ref on the
* reservation.
@ -4834,6 +4836,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
if (addr & ~(huge_page_mask(hstate_vma(vma))))
return -EINVAL;
/*
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
*/
if (addr & ~PUD_MASK) {
/*
* hugetlb_vm_op_split is called right before we attempt to
* split the VMA. We will need to unshare PMDs in the old and
* new VMAs, so let's unshare before we split.
*/
unsigned long floor = addr & PUD_MASK;
unsigned long ceil = floor + PUD_SIZE;
if (floor >= vma->vm_start && ceil <= vma->vm_end)
hugetlb_unshare_pmds(vma, floor, ceil);
}
return 0;
}
@ -5131,7 +5152,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
/*
* We don't have to worry about the ordering of src and dst ptlocks
* because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
* because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
*/
if (src_ptl != dst_ptl)
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
@ -6639,9 +6660,18 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address, psize);
if (!ptep) {
if (!uffd_wp) {
address |= last_addr_mask;
continue;
}
/*
* Userfaultfd wr-protect requires pgtable
* pre-allocations to install pte markers.
*/
ptep = huge_pte_alloc(mm, vma, address, psize);
if (!ptep)
break;
}
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, vma, address, ptep)) {
/*
@ -6658,16 +6688,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
}
pte = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
spin_unlock(ptl);
continue;
}
if (unlikely(is_hugetlb_entry_migration(pte))) {
/* Nothing to do. */
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
swp_entry_t entry = pte_to_swp_entry(pte);
struct page *page = pfn_swap_entry_to_page(entry);
pte_t newpte = pte;
if (!is_readable_migration_entry(entry)) {
pte_t newpte;
if (is_writable_migration_entry(entry)) {
if (PageAnon(page))
entry = make_readable_exclusive_migration_entry(
swp_offset(entry));
@ -6675,25 +6702,22 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
entry = make_readable_migration_entry(
swp_offset(entry));
newpte = swp_entry_to_pte(entry);
pages++;
}
if (uffd_wp)
newpte = pte_swp_mkuffd_wp(newpte);
else if (uffd_wp_resolve)
newpte = pte_swp_clear_uffd_wp(newpte);
if (!pte_same(pte, newpte))
set_huge_pte_at(mm, address, ptep, newpte);
pages++;
}
spin_unlock(ptl);
continue;
}
if (unlikely(pte_marker_uffd_wp(pte))) {
/*
* This is changing a non-present pte into a none pte,
* no need for huge_ptep_modify_prot_start/commit().
*/
} else if (unlikely(is_pte_marker(pte))) {
/* No other markers apply for now. */
WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
if (uffd_wp_resolve)
/* Safe to modify directly (non-present->none). */
huge_pte_clear(mm, address, ptep, psize);
}
if (!huge_pte_none(pte)) {
} else if (!huge_pte_none(pte)) {
pte_t old_pte;
unsigned int shift = huge_page_shift(hstate_vma(vma));
@ -7328,26 +7352,21 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
}
}
/*
* This function will unconditionally remove all the shared pmd pgtable entries
* within the specific vma for a hugetlbfs memory range.
*/
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mm_struct *mm = vma->vm_mm;
struct mmu_notifier_range range;
unsigned long address, start, end;
unsigned long address;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & VM_MAYSHARE))
return;
start = ALIGN(vma->vm_start, PUD_SIZE);
end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
if (start >= end)
return;
@ -7379,6 +7398,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
mmu_notifier_invalidate_range_end(&range);
}
/*
* This function will unconditionally remove all the shared pmd pgtable entries
* within the specific vma for a hugetlbfs memory range.
*/
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
{
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
}
#ifdef CONFIG_CMA
static bool cma_reserve_called __initdata;

View File

@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
* Whether the KASAN KUnit test suite is currently being executed.
* Updated in kasan_test.c.
*/
bool kasan_kunit_executing;
static bool kasan_kunit_executing;
void kasan_kunit_test_suite_start(void)
{

View File

@ -1460,14 +1460,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
return SCAN_VMA_CHECK;
/*
* Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
* that got written to. Without this, we'd have to also lock the
* anon_vma if one exists.
*/
if (vma->anon_vma)
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
if (userfaultfd_wp(vma))
return SCAN_PTE_UFFD_WP;
@ -1567,8 +1559,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
}
/* step 4: remove pte entries */
/* we make no change to anon, but protect concurrent anon page lookup */
if (vma->anon_vma)
anon_vma_lock_write(vma->anon_vma);
collapse_and_free_pmd(mm, vma, haddr, pmd);
if (vma->anon_vma)
anon_vma_unlock_write(vma->anon_vma);
i_mmap_unlock_write(vma->vm_file->f_mapping);
maybe_install_pmd:
@ -2649,7 +2647,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
goto out_nolock;
}
hend = vma->vm_end & HPAGE_PMD_MASK;
hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
}
mmap_assert_locked(mm);
memset(cc->node_load, 0, sizeof(cc->node_load));

View File

@ -130,7 +130,7 @@ static int replace_anon_vma_name(struct vm_area_struct *vma,
#endif /* CONFIG_ANON_VMA_NAME */
/*
* Update the vm_flags on region of a vma, splitting it or merging it as
* necessary. Must be called with mmap_sem held for writing;
* necessary. Must be called with mmap_lock held for writing;
* Caller should ensure anon_name stability by raising its refcount even when
* anon_name belongs to a valid vma because this function might free that vma.
*/

View File

@ -1524,6 +1524,10 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
return 1;
/* Do we need write faults for uffd-wp tracking? */
if (userfaultfd_wp(vma))
return 1;
/* Specialty mapping? */
if (vm_flags & VM_PFNMAP)
return 0;
@ -2290,7 +2294,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
* @start: The aligned start address to munmap.
* @end: The aligned end address to munmap.
* @uf: The userfaultfd list_head
* @downgrade: Set to true to attempt a write downgrade of the mmap_sem
* @downgrade: Set to true to attempt a write downgrade of the mmap_lock
*
* If @downgrade is true, check return code for potential release of the lock.
*/
@ -2465,7 +2469,7 @@ map_count_exceeded:
* @len: The length of the range to munmap
* @uf: The userfaultfd list_head
* @downgrade: set to true if the user wants to attempt to write_downgrade the
* mmap_sem
* mmap_lock
*
* This function takes a @mas that is either pointing to the previous VMA or set
* to MA_START and sets it up to remove the mapping(s). The @len will be

View File

@ -559,7 +559,6 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
{
mm->map_count++;
vma->vm_mm = mm;
/* add the VMA to the mapping */
@ -587,6 +586,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
BUG_ON(!vma->vm_region);
setup_vma_to_mm(vma, mm);
mm->map_count++;
/* add the VMA to the tree */
vma_mas_store(vma, mas);
@ -1240,6 +1240,7 @@ share:
error_just_free:
up_write(&nommu_region_sem);
error:
mas_destroy(&mas);
if (region->vm_file)
fput(region->vm_file);
kmem_cache_free(vm_region_jar, region);
@ -1250,7 +1251,6 @@ error:
sharing_violation:
up_write(&nommu_region_sem);
mas_destroy(&mas);
pr_warn("Attempt to share mismatched mappings\n");
ret = -EINVAL;
goto error;
@ -1347,6 +1347,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_file)
return -ENOMEM;
mm = vma->vm_mm;
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
@ -1398,6 +1399,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
mas_store(&mas, vma);
vma_mas_store(new, &mas);
mm->map_count++;
return 0;
err_mas_preallocate:
@ -1509,6 +1511,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
erase_whole_vma:
if (delete_vma_from_mm(vma))
ret = -ENOMEM;
else
delete_vma(mm, vma);
return ret;
}

View File

@ -478,12 +478,10 @@ bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return false;
if (shmem_huge_force)
return true;
if (shmem_huge == SHMEM_HUGE_FORCE)
return true;
if (shmem_huge == SHMEM_HUGE_DENY)
return false;
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
return true;
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:

View File

@ -25,6 +25,7 @@
#undef NDEBUG
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -41,7 +42,7 @@
* 1: vsyscall VMA is --xp vsyscall=xonly
* 2: vsyscall VMA is r-xp vsyscall=emulate
*/
static int g_vsyscall;
static volatile int g_vsyscall;
static const char *g_proc_pid_maps_vsyscall;
static const char *g_proc_pid_smaps_vsyscall;
@ -147,11 +148,12 @@ static void vsyscall(void)
g_vsyscall = 0;
/* gettimeofday(NULL, NULL); */
uint64_t rax = 0xffffffffff600000;
asm volatile (
"call %P0"
:
: "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
: "rax", "rcx", "r11"
"call *%[rax]"
: [rax] "+a" (rax)
: "D" (NULL), "S" (NULL)
: "rcx", "r11"
);
g_vsyscall = 1;

View File

@ -257,11 +257,12 @@ static void vsyscall(void)
g_vsyscall = 0;
/* gettimeofday(NULL, NULL); */
uint64_t rax = 0xffffffffff600000;
asm volatile (
"call %P0"
:
: "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
: "rax", "rcx", "r11"
"call *%[rax]"
: [rax] "+a" (rax)
: "D" (NULL), "S" (NULL)
: "rcx", "r11"
);
g_vsyscall = 1;