mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
xen: branch for v6.12-rc1
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCZuu+BAAKCRCAXGG7T9hj vs3bAP4mp0NnxnDbvPObWoPKmLk5OvHdfY9cV+/M+r/UObfyswD+OYaZH0hVCHP6 L96RzSHE+Q1pKPNpQfMOPcCDFmO3wwI= =cN0H -----END PGP SIGNATURE----- Merge tag 'for-linus-6.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen updates from Juergen Gross: - fix a boot problem as a Xen dom0 on some AMD systems - fix Xen PVH boot problems with KASAN enabled - fix for a build warning - fixes to swiotlb-xen * tag 'for-linus-6.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/swiotlb: fix allocated size xen/swiotlb: add alignment check for dma buffers xen/pci: Avoid -Wflex-array-member-not-at-end warning xen/xenbus: Convert to use ERR_CAST() xen, pvh: fix unbootable VMs by inlining memset() in xen_prepare_pvh() x86/cpu: fix unbootable VMs by inlining memcmp() in hypervisor_cpuid_base() xen, pvh: fix unbootable VMs (PVH + KASAN - AMD_MEM_ENCRYPT) xen: tolerate ACPI NVS memory overlapping with Xen allocated memory xen: allow mapping ACPI data using a different physical address xen: add capability to remap non-RAM pages to different PFNs xen: move max_pfn in xen_memory_setup() out of function scope xen: move checks for e820 conflicts further up xen: introduce generic helper checking for memory map conflicts xen: use correct end address of kernel for conflict checking
This commit is contained in:
commit
19a519ca87
@ -174,6 +174,14 @@ void acpi_generic_reduced_hw_init(void);
|
||||
void x86_default_set_root_pointer(u64 addr);
|
||||
u64 x86_default_get_root_pointer(void);
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
|
||||
extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
|
||||
acpi_size size);
|
||||
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
|
||||
#define acpi_os_ioremap acpi_os_ioremap
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_ACPI */
|
||||
|
||||
#define acpi_lapic 0
|
||||
|
@ -196,7 +196,12 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
|
||||
for_each_possible_hypervisor_cpuid_base(base) {
|
||||
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
|
||||
|
||||
if (!memcmp(sig, signature, 12) &&
|
||||
/*
|
||||
* This must not compile to "call memcmp" because it's called
|
||||
* from PVH early boot code before instrumentation is set up
|
||||
* and memcmp() itself may be instrumented.
|
||||
*/
|
||||
if (!__builtin_memcmp(sig, signature, 12) &&
|
||||
(leaves == 0 || ((eax - base) >= leaves)))
|
||||
return base;
|
||||
}
|
||||
|
@ -1778,3 +1778,14 @@ u64 x86_default_get_root_pointer(void)
|
||||
{
|
||||
return boot_params.acpi_rsdp_addr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
{
|
||||
return ioremap_cache(phys, size);
|
||||
}
|
||||
|
||||
void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
|
||||
x86_acpi_os_ioremap;
|
||||
EXPORT_SYMBOL_GPL(acpi_os_ioremap);
|
||||
#endif
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/acpi.h>
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/range.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/pci-direct.h>
|
||||
#include <linux/sort.h>
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include <linux/stackprotector.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/mc146818rtc.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/cacheinfo.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
|
@ -1,5 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
OBJECT_FILES_NON_STANDARD_head.o := y
|
||||
KASAN_SANITIZE := n
|
||||
|
||||
obj-$(CONFIG_PVH) += enlighten.o
|
||||
obj-$(CONFIG_PVH) += head.o
|
||||
|
@ -130,7 +130,11 @@ void __init xen_prepare_pvh(void)
|
||||
BUG();
|
||||
}
|
||||
|
||||
memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
|
||||
/*
|
||||
* This must not compile to "call memset" because memset() may be
|
||||
* instrumented.
|
||||
*/
|
||||
__builtin_memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
|
||||
|
||||
hypervisor_specific_init(xen_guest);
|
||||
|
||||
|
@ -2018,10 +2018,7 @@ void __init xen_reserve_special_pages(void)
|
||||
|
||||
void __init xen_pt_check_e820(void)
|
||||
{
|
||||
if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
|
||||
xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
|
||||
BUG();
|
||||
}
|
||||
xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
|
||||
}
|
||||
|
||||
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/setup.h>
|
||||
@ -80,6 +81,7 @@
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <xen/balloon.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/hvc-console.h>
|
||||
|
||||
#include "xen-ops.h"
|
||||
|
||||
@ -792,6 +794,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Remapped non-RAM areas */
|
||||
#define NR_NONRAM_REMAP 4
|
||||
static struct nonram_remap {
|
||||
phys_addr_t maddr;
|
||||
phys_addr_t paddr;
|
||||
size_t size;
|
||||
} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
|
||||
static unsigned int nr_nonram_remap __ro_after_init;
|
||||
|
||||
/*
|
||||
* Do the real remapping of non-RAM regions as specified in the
|
||||
* xen_nonram_remap[] array.
|
||||
* In case of an error just crash the system.
|
||||
*/
|
||||
void __init xen_do_remap_nonram(void)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int remapped = 0;
|
||||
const struct nonram_remap *remap = xen_nonram_remap;
|
||||
unsigned long pfn, mfn, end_pfn;
|
||||
|
||||
for (i = 0; i < nr_nonram_remap; i++) {
|
||||
end_pfn = PFN_UP(remap->paddr + remap->size);
|
||||
pfn = PFN_DOWN(remap->paddr);
|
||||
mfn = PFN_DOWN(remap->maddr);
|
||||
while (pfn < end_pfn) {
|
||||
if (!set_phys_to_machine(pfn, mfn))
|
||||
panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
|
||||
pfn, mfn);
|
||||
|
||||
pfn++;
|
||||
mfn++;
|
||||
remapped++;
|
||||
}
|
||||
|
||||
remap++;
|
||||
}
|
||||
|
||||
pr_info("Remapped %u non-RAM page(s)\n", remapped);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
|
||||
* regions into account.
|
||||
* Any attempt to map an area crossing a remap boundary will produce a
|
||||
* WARN() splat.
|
||||
* phys is related to remap->maddr on input and will be rebased to remap->paddr.
|
||||
*/
|
||||
static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
|
||||
acpi_size size)
|
||||
{
|
||||
unsigned int i;
|
||||
const struct nonram_remap *remap = xen_nonram_remap;
|
||||
|
||||
for (i = 0; i < nr_nonram_remap; i++) {
|
||||
if (phys + size > remap->maddr &&
|
||||
phys < remap->maddr + remap->size) {
|
||||
WARN_ON(phys < remap->maddr ||
|
||||
phys + size > remap->maddr + remap->size);
|
||||
phys += remap->paddr - remap->maddr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return x86_acpi_os_ioremap(phys, size);
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/*
|
||||
* Add a new non-RAM remap entry.
|
||||
* In case of no free entry found, just crash the system.
|
||||
*/
|
||||
void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
|
||||
unsigned long size)
|
||||
{
|
||||
BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
|
||||
|
||||
if (nr_nonram_remap == NR_NONRAM_REMAP) {
|
||||
xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Switch to the Xen acpi_os_ioremap() variant. */
|
||||
if (nr_nonram_remap == 0)
|
||||
acpi_os_ioremap = xen_acpi_os_ioremap;
|
||||
#endif
|
||||
|
||||
xen_nonram_remap[nr_nonram_remap].maddr = maddr;
|
||||
xen_nonram_remap[nr_nonram_remap].paddr = paddr;
|
||||
xen_nonram_remap[nr_nonram_remap].size = size;
|
||||
|
||||
nr_nonram_remap++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
static int p2m_dump_show(struct seq_file *m, void *v)
|
||||
|
@ -15,12 +15,12 @@
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/memory_hotplug.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/elf.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/idtentry.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
@ -46,6 +46,9 @@ bool xen_pv_pci_possible;
|
||||
/* E820 map used during setting up memory. */
|
||||
static struct e820_table xen_e820_table __initdata;
|
||||
|
||||
/* Number of initially usable memory pages. */
|
||||
static unsigned long ini_nr_pages __initdata;
|
||||
|
||||
/*
|
||||
* Buffer used to remap identity mapped pages. We only need the virtual space.
|
||||
* The physical page behind this address is remapped as needed to different
|
||||
@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn)
|
||||
* as a fallback if the remapping fails.
|
||||
*/
|
||||
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
||||
unsigned long end_pfn, unsigned long nr_pages)
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
unsigned long pfn, end;
|
||||
int ret;
|
||||
@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
|
||||
WARN_ON(start_pfn > end_pfn);
|
||||
|
||||
/* Release pages first. */
|
||||
end = min(end_pfn, nr_pages);
|
||||
end = min(end_pfn, ini_nr_pages);
|
||||
for (pfn = start_pfn; pfn < end; pfn++) {
|
||||
unsigned long mfn = pfn_to_mfn(pfn);
|
||||
|
||||
@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
|
||||
* to Xen and not remapped.
|
||||
*/
|
||||
static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long remap_pfn)
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long i = 0;
|
||||
unsigned long n = end_pfn - start_pfn;
|
||||
|
||||
if (remap_pfn == 0)
|
||||
remap_pfn = nr_pages;
|
||||
remap_pfn = ini_nr_pages;
|
||||
|
||||
while (i < n) {
|
||||
unsigned long cur_pfn = start_pfn + i;
|
||||
@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
unsigned long remap_range_size;
|
||||
|
||||
/* Do not remap pages beyond the current allocation */
|
||||
if (cur_pfn >= nr_pages) {
|
||||
if (cur_pfn >= ini_nr_pages) {
|
||||
/* Identity map remaining pages */
|
||||
set_phys_range_identity(cur_pfn, cur_pfn + size);
|
||||
break;
|
||||
}
|
||||
if (cur_pfn + size > nr_pages)
|
||||
size = nr_pages - cur_pfn;
|
||||
if (cur_pfn + size > ini_nr_pages)
|
||||
size = ini_nr_pages - cur_pfn;
|
||||
|
||||
remap_range_size = xen_find_pfn_range(&remap_pfn);
|
||||
if (!remap_range_size) {
|
||||
pr_warn("Unable to find available pfn range, not remapping identity pages\n");
|
||||
xen_set_identity_and_release_chunk(cur_pfn,
|
||||
cur_pfn + left, nr_pages);
|
||||
cur_pfn + left);
|
||||
break;
|
||||
}
|
||||
/* Adjust size to fit in current e820 RAM region */
|
||||
@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
|
||||
}
|
||||
|
||||
static unsigned long __init xen_count_remap_pages(
|
||||
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
|
||||
unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long remap_pages)
|
||||
{
|
||||
if (start_pfn >= nr_pages)
|
||||
if (start_pfn >= ini_nr_pages)
|
||||
return remap_pages;
|
||||
|
||||
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
|
||||
return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
|
||||
}
|
||||
|
||||
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
|
||||
static unsigned long __init xen_foreach_remap_area(
|
||||
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
|
||||
unsigned long nr_pages, unsigned long last_val))
|
||||
unsigned long last_val))
|
||||
{
|
||||
phys_addr_t start = 0;
|
||||
unsigned long ret_val = 0;
|
||||
@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
|
||||
end_pfn = PFN_UP(entry->addr);
|
||||
|
||||
if (start_pfn < end_pfn)
|
||||
ret_val = func(start_pfn, end_pfn, nr_pages,
|
||||
ret_val);
|
||||
ret_val = func(start_pfn, end_pfn, ret_val);
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
@ -494,6 +495,8 @@ void __init xen_remap_memory(void)
|
||||
set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
|
||||
|
||||
pr_info("Remapped %ld page(s)\n", remapped);
|
||||
|
||||
xen_do_remap_nonram();
|
||||
}
|
||||
|
||||
static unsigned long __init xen_get_pages_limit(void)
|
||||
@ -567,7 +570,7 @@ static void __init xen_ignore_unusable(void)
|
||||
}
|
||||
}
|
||||
|
||||
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
|
||||
static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
|
||||
{
|
||||
struct e820_entry *entry;
|
||||
unsigned mapcnt;
|
||||
@ -624,6 +627,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
|
||||
* Note that the E820 map is modified accordingly, but the P2M map isn't yet.
|
||||
* The adaption of the P2M must be deferred until page allocation is possible.
|
||||
*/
|
||||
static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
|
||||
{
|
||||
struct e820_entry *entry;
|
||||
unsigned int mapcnt;
|
||||
phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
|
||||
phys_addr_t swap_addr, swap_size, entry_end;
|
||||
|
||||
swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
|
||||
swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
|
||||
entry = xen_e820_table.entries;
|
||||
|
||||
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
|
||||
entry_end = entry->addr + entry->size;
|
||||
if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
|
||||
entry_end - swap_size >= mem_end) {
|
||||
/* Reduce RAM entry by needed space (whole pages). */
|
||||
entry->size -= swap_size;
|
||||
|
||||
/* Add new entry at the end of E820 map. */
|
||||
entry = xen_e820_table.entries +
|
||||
xen_e820_table.nr_entries;
|
||||
xen_e820_table.nr_entries++;
|
||||
|
||||
/* Fill new entry (keep size and page offset). */
|
||||
entry->type = swap_entry->type;
|
||||
entry->addr = entry_end - swap_size +
|
||||
swap_addr - swap_entry->addr;
|
||||
entry->size = swap_entry->size;
|
||||
|
||||
/* Convert old entry to RAM, align to pages. */
|
||||
swap_entry->type = E820_TYPE_RAM;
|
||||
swap_entry->addr = swap_addr;
|
||||
swap_entry->size = swap_size;
|
||||
|
||||
/* Remember PFN<->MFN relation for P2M update. */
|
||||
xen_add_remap_nonram(swap_addr, entry_end - swap_size,
|
||||
swap_size);
|
||||
|
||||
/* Order E820 table and merge entries. */
|
||||
e820__update_table(&xen_e820_table);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
entry++;
|
||||
}
|
||||
|
||||
xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for non-RAM memory types in a specific guest physical area and move
|
||||
* those away if possible (ACPI NVS only for now).
|
||||
*/
|
||||
static void __init xen_e820_resolve_conflicts(phys_addr_t start,
|
||||
phys_addr_t size)
|
||||
{
|
||||
struct e820_entry *entry;
|
||||
unsigned int mapcnt;
|
||||
phys_addr_t end;
|
||||
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
end = start + size;
|
||||
entry = xen_e820_table.entries;
|
||||
|
||||
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
|
||||
if (entry->addr >= end)
|
||||
return;
|
||||
|
||||
if (entry->addr + entry->size > start &&
|
||||
entry->type == E820_TYPE_NVS)
|
||||
xen_e820_swap_entry_with_ram(entry);
|
||||
|
||||
entry++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for an area in physical memory to be usable for non-movable purposes.
|
||||
* An area is considered to usable if the used E820 map lists it to be RAM or
|
||||
* some other type which can be moved to higher PFNs while keeping the MFNs.
|
||||
* In case the area is not usable, crash the system with an error message.
|
||||
*/
|
||||
void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
|
||||
const char *component)
|
||||
{
|
||||
xen_e820_resolve_conflicts(start, size);
|
||||
|
||||
if (!xen_is_e820_reserved(start, size))
|
||||
return;
|
||||
|
||||
xen_raw_console_write("Xen hypervisor allocated ");
|
||||
xen_raw_console_write(component);
|
||||
xen_raw_console_write(" memory conflicts with E820 map\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Like memcpy, but with physical addresses for dest and src.
|
||||
*/
|
||||
@ -683,7 +791,7 @@ static void __init xen_reserve_xen_mfnlist(void)
|
||||
**/
|
||||
char * __init xen_memory_setup(void)
|
||||
{
|
||||
unsigned long max_pfn, pfn_s, n_pfns;
|
||||
unsigned long pfn_s, n_pfns;
|
||||
phys_addr_t mem_end, addr, size, chunk_size;
|
||||
u32 type;
|
||||
int rc;
|
||||
@ -695,9 +803,8 @@ char * __init xen_memory_setup(void)
|
||||
int op;
|
||||
|
||||
xen_parse_512gb();
|
||||
max_pfn = xen_get_pages_limit();
|
||||
max_pfn = min(max_pfn, xen_start_info->nr_pages);
|
||||
mem_end = PFN_PHYS(max_pfn);
|
||||
ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
|
||||
mem_end = PFN_PHYS(ini_nr_pages);
|
||||
|
||||
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
|
||||
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
|
||||
@ -747,13 +854,35 @@ char * __init xen_memory_setup(void)
|
||||
/* Make sure the Xen-supplied memory map is well-ordered. */
|
||||
e820__update_table(&xen_e820_table);
|
||||
|
||||
/*
|
||||
* Check whether the kernel itself conflicts with the target E820 map.
|
||||
* Failing now is better than running into weird problems later due
|
||||
* to relocating (and even reusing) pages with kernel text or data.
|
||||
*/
|
||||
xen_chk_is_e820_usable(__pa_symbol(_text),
|
||||
__pa_symbol(_end) - __pa_symbol(_text),
|
||||
"kernel");
|
||||
|
||||
/*
|
||||
* Check for a conflict of the xen_start_info memory with the target
|
||||
* E820 map.
|
||||
*/
|
||||
xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
|
||||
"xen_start_info");
|
||||
|
||||
/*
|
||||
* Check for a conflict of the hypervisor supplied page tables with
|
||||
* the target E820 map.
|
||||
*/
|
||||
xen_pt_check_e820();
|
||||
|
||||
max_pages = xen_get_max_pages();
|
||||
|
||||
/* How many extra pages do we need due to remapping? */
|
||||
max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
|
||||
max_pages += xen_foreach_remap_area(xen_count_remap_pages);
|
||||
|
||||
if (max_pages > max_pfn)
|
||||
extra_pages += max_pages - max_pfn;
|
||||
if (max_pages > ini_nr_pages)
|
||||
extra_pages += max_pages - ini_nr_pages;
|
||||
|
||||
/*
|
||||
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||
@ -762,8 +891,8 @@ char * __init xen_memory_setup(void)
|
||||
* Make sure we have no memory above max_pages, as this area
|
||||
* isn't handled by the p2m management.
|
||||
*/
|
||||
maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM));
|
||||
extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn);
|
||||
maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
|
||||
extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
|
||||
i = 0;
|
||||
addr = xen_e820_table.entries[0].addr;
|
||||
size = xen_e820_table.entries[0].size;
|
||||
@ -819,23 +948,6 @@ char * __init xen_memory_setup(void)
|
||||
|
||||
e820__update_table(e820_table);
|
||||
|
||||
/*
|
||||
* Check whether the kernel itself conflicts with the target E820 map.
|
||||
* Failing now is better than running into weird problems later due
|
||||
* to relocating (and even reusing) pages with kernel text or data.
|
||||
*/
|
||||
if (xen_is_e820_reserved(__pa_symbol(_text),
|
||||
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
|
||||
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a conflict of the hypervisor supplied page tables with
|
||||
* the target E820 map.
|
||||
*/
|
||||
xen_pt_check_e820();
|
||||
|
||||
xen_reserve_xen_mfnlist();
|
||||
|
||||
/* Check for a conflict of the initrd with the target E820 map. */
|
||||
@ -863,7 +975,7 @@ char * __init xen_memory_setup(void)
|
||||
* Set identity map on non-RAM pages and prepare remapping the
|
||||
* underlying RAM.
|
||||
*/
|
||||
xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
|
||||
xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
|
||||
|
||||
pr_info("Released %ld page(s)\n", xen_released_pages);
|
||||
|
||||
|
@ -47,8 +47,12 @@ void xen_mm_unpin_all(void);
|
||||
#ifdef CONFIG_X86_64
|
||||
void __init xen_relocate_p2m(void);
|
||||
#endif
|
||||
void __init xen_do_remap_nonram(void);
|
||||
void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
|
||||
unsigned long size);
|
||||
|
||||
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size);
|
||||
void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
|
||||
const char *component);
|
||||
unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
|
||||
void __init xen_inv_extra_mem(void);
|
||||
void __init xen_remap_memory(void);
|
||||
|
@ -44,15 +44,11 @@ static int xen_add_device(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
if (pci_seg_supported) {
|
||||
struct {
|
||||
struct physdev_pci_device_add add;
|
||||
uint32_t pxm;
|
||||
} add_ext = {
|
||||
.add.seg = pci_domain_nr(pci_dev->bus),
|
||||
.add.bus = pci_dev->bus->number,
|
||||
.add.devfn = pci_dev->devfn
|
||||
};
|
||||
struct physdev_pci_device_add *add = &add_ext.add;
|
||||
DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
|
||||
|
||||
add->seg = pci_domain_nr(pci_dev->bus);
|
||||
add->bus = pci_dev->bus->number;
|
||||
add->devfn = pci_dev->devfn;
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
acpi_handle handle;
|
||||
|
@ -78,9 +78,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
|
||||
{
|
||||
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
|
||||
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
|
||||
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
|
||||
|
||||
next_bfn = pfn_to_bfn(xen_pfn);
|
||||
|
||||
/* If buffer is physically aligned, ensure DMA alignment. */
|
||||
if (IS_ALIGNED(p, algn) &&
|
||||
!IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
|
||||
return 1;
|
||||
|
||||
for (i = 1; i < nr_pages; i++)
|
||||
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
|
||||
return 1;
|
||||
@ -141,7 +147,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
void *ret;
|
||||
|
||||
/* Align the allocation to the Xen page size */
|
||||
size = 1UL << (order + XEN_PAGE_SHIFT);
|
||||
size = ALIGN(size, XEN_PAGE_SIZE);
|
||||
|
||||
ret = (void *)__get_free_pages(flags, get_order(size));
|
||||
if (!ret)
|
||||
@ -173,7 +179,7 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
int order = get_order(size);
|
||||
|
||||
/* Convert the size to actually allocated. */
|
||||
size = 1UL << (order + XEN_PAGE_SHIFT);
|
||||
size = ALIGN(size, XEN_PAGE_SIZE);
|
||||
|
||||
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
|
||||
WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
|
||||
|
@ -427,12 +427,12 @@ char **xenbus_directory(struct xenbus_transaction t,
|
||||
|
||||
path = join(dir, node);
|
||||
if (IS_ERR(path))
|
||||
return (char **)path;
|
||||
return ERR_CAST(path);
|
||||
|
||||
strings = xs_single(t, XS_DIRECTORY, path, &len);
|
||||
kfree(path);
|
||||
if (IS_ERR(strings))
|
||||
return (char **)strings;
|
||||
return ERR_CAST(strings);
|
||||
|
||||
return split(strings, len, num);
|
||||
}
|
||||
@ -465,7 +465,7 @@ void *xenbus_read(struct xenbus_transaction t,
|
||||
|
||||
path = join(dir, node);
|
||||
if (IS_ERR(path))
|
||||
return (void *)path;
|
||||
return ERR_CAST(path);
|
||||
|
||||
ret = xs_single(t, XS_READ, path, len);
|
||||
kfree(path);
|
||||
|
Loading…
Reference in New Issue
Block a user