forked from Minki/linux
xen/swiotlb: Use the swiotlb_late_init_with_tbl to init Xen-SWIOTLB late when PV PCI is used.
With this patch we provide the functionality to initialize the Xen-SWIOTLB late in the bootup cycle - specifically for Xen PCI-frontend. We still will work if the user had supplied 'iommu=soft' on the Linux command line. Note: We cannot depend on after_bootmem to automatically determine whether this is early or not. This is because when PCI IOMMUs are initialized it is after after_bootmem but before a lot of "other" subsystems are initialized. CC: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> [v1: Fix smatch warnings] [v2: Added check for xen_swiotlb] [v3: Rebased with new xen-swiotlb changes] [v4: squashed xen/swiotlb: Depending on after_bootmem is not correct in] Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
5bab7864b1
commit
b827760053
@ -5,10 +5,12 @@
|
||||
extern int xen_swiotlb;
|
||||
extern int __init pci_xen_swiotlb_detect(void);
|
||||
extern void __init pci_xen_swiotlb_init(void);
|
||||
extern int pci_xen_swiotlb_init_late(void);
|
||||
#else
|
||||
#define xen_swiotlb (0)
|
||||
static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
|
||||
static inline void __init pci_xen_swiotlb_init(void) { }
|
||||
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_SWIOTLB_XEN_H */
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/dma.h>
|
||||
#endif
|
||||
|
||||
#include <linux/export.h>
|
||||
int xen_swiotlb __read_mostly;
|
||||
|
||||
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
@ -69,13 +69,33 @@ int __init pci_xen_swiotlb_detect(void)
|
||||
void __init pci_xen_swiotlb_init(void)
|
||||
{
|
||||
if (xen_swiotlb) {
|
||||
xen_swiotlb_init(1);
|
||||
xen_swiotlb_init(1, true /* early */);
|
||||
dma_ops = &xen_swiotlb_dma_ops;
|
||||
|
||||
/* Make sure ACS will be enabled */
|
||||
pci_request_acs();
|
||||
}
|
||||
}
|
||||
|
||||
int pci_xen_swiotlb_init_late(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (xen_swiotlb)
|
||||
return 0;
|
||||
|
||||
rc = xen_swiotlb_init(1, false /* late */);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dma_ops = &xen_swiotlb_dma_ops;
|
||||
/* Make sure ACS will be enabled */
|
||||
pci_request_acs();
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
|
||||
|
||||
IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
|
||||
0,
|
||||
pci_xen_swiotlb_init,
|
||||
|
@ -176,9 +176,9 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
|
||||
}
|
||||
return "";
|
||||
}
|
||||
void __init xen_swiotlb_init(int verbose)
|
||||
int __ref xen_swiotlb_init(int verbose, bool early)
|
||||
{
|
||||
unsigned long bytes;
|
||||
unsigned long bytes, order;
|
||||
int rc = -ENOMEM;
|
||||
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
|
||||
unsigned int repeat = 3;
|
||||
@ -186,10 +186,28 @@ void __init xen_swiotlb_init(int verbose)
|
||||
xen_io_tlb_nslabs = swiotlb_nr_tbl();
|
||||
retry:
|
||||
bytes = xen_set_nslabs(xen_io_tlb_nslabs);
|
||||
order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
|
||||
/*
|
||||
* Get IO TLB memory from any location.
|
||||
*/
|
||||
xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
|
||||
if (early)
|
||||
xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
|
||||
else {
|
||||
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
|
||||
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
|
||||
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
|
||||
xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
|
||||
if (xen_io_tlb_start)
|
||||
break;
|
||||
order--;
|
||||
}
|
||||
if (order != get_order(bytes)) {
|
||||
pr_warn("Warning: only able to allocate %ld MB "
|
||||
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
|
||||
xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
|
||||
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
|
||||
}
|
||||
}
|
||||
if (!xen_io_tlb_start) {
|
||||
m_ret = XEN_SWIOTLB_ENOMEM;
|
||||
goto error;
|
||||
@ -202,14 +220,21 @@ retry:
|
||||
bytes,
|
||||
xen_io_tlb_nslabs);
|
||||
if (rc) {
|
||||
free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
|
||||
if (early)
|
||||
free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
|
||||
else {
|
||||
free_pages((unsigned long)xen_io_tlb_start, order);
|
||||
xen_io_tlb_start = NULL;
|
||||
}
|
||||
m_ret = XEN_SWIOTLB_EFIXUP;
|
||||
goto error;
|
||||
}
|
||||
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
|
||||
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
|
||||
|
||||
return;
|
||||
if (early)
|
||||
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
|
||||
else
|
||||
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
|
||||
return rc;
|
||||
error:
|
||||
if (repeat--) {
|
||||
xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
|
||||
@ -218,10 +243,13 @@ error:
|
||||
(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
|
||||
goto retry;
|
||||
}
|
||||
xen_raw_printk("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
|
||||
panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
|
||||
pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
|
||||
if (early)
|
||||
panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
|
||||
else
|
||||
free_pages((unsigned long)xen_io_tlb_start, order);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void *
|
||||
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
extern void xen_swiotlb_init(int verbose);
|
||||
extern int xen_swiotlb_init(int verbose, bool early);
|
||||
|
||||
extern void
|
||||
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
|
Loading…
Reference in New Issue
Block a user