mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 05:41:55 +00:00
IOMMU Upates for Linux v3.15
This time a few more updates queued up. * Rework VT-d code to support ACPI devices * Improvements for memory and PCI hotplug support in the VT-d driver * Device-tree support for OMAP IOMMU * Convert OMAP IOMMU to use devm_* interfaces * Fixed PASID support for AMD IOMMU * Other random cleanups and fixes for OMAP, ARM-SMMU and SHMOBILE IOMMU Most of the changes are in the VT-d driver because some rework was necessary for better hotplug and ACPI device support. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJTPn8aAAoJECvwRC2XARrj66UQAICYhJKrErry3V8tGxzJ6r/3 jEWCmwd//FmV7rHcdRqckrv8vn9n9wf0lhRoGpbo9iiUrO8ikTdNltccKr1t0cAf yPabayk1PbbZyc1Dv1hGKbIH52lHfVjbqesQ9Z2gndgVJuXWz5tdGDInXIvuYYEP vWJFS2D5pfAj/lokvcJ1LcwLoBgDdsKM6dbGb2gOxxHm3gIUFvkZktMWGKgKeEGF HL8R78tfGj6LRfLILT3RcxW4LfhnM2r1ZSeOm1lCdy7CISptqg6COWdX+gw/UToJ lQeuPXv/AUaLTFRL+v07qus8iVsvr6E+Fx3ppmi+mkvgoSD0spWPKnAvrAkFDij3 ygXFKumTHqGTIHJQTVNWXrHkYFeM/XaIX7lZv0UwhE2RuLTRjFaIdBo2Wckmu9T3 Un2mZ6NshyI2IQ6NC9ytxHP8BCAP/rhjirKOpegYlKfbU5AHjkBQHHNjL7hcSIH5 g3ZXIV/FOgfNWhv4cdBR8go1N/PO+VMMYUfc1MC0sYi9MeiOnmsq5Y3LhXQWDgPE eogQiz5j87ciNcA4IMO/tUJ2xBSvU6g2ESSCLZxZ33F6uD6X+VVgvinOeFBncbCS fD0QUBo27tXUclWJHe7Z2yT4X2Xn4RohGEohqSpD0iNa1RUaNyBf7EzoEkZihxSZ J7OuEnbE1kVQLh8y5ZdE =uy/Z -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU upates from Joerg Roedel: "This time a few more updates queued up. - Rework VT-d code to support ACPI devices - Improvements for memory and PCI hotplug support in the VT-d driver - Device-tree support for OMAP IOMMU - Convert OMAP IOMMU to use devm_* interfaces - Fixed PASID support for AMD IOMMU - Other random cleanups and fixes for OMAP, ARM-SMMU and SHMOBILE IOMMU Most of the changes are in the VT-d driver because some rework was necessary for better hotplug and ACPI device support" * tag 'iommu-updates-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (75 commits) iommu/vt-d: Fix error handling in ANDD processing iommu/vt-d: returning free pointer in get_domain_for_dev() iommu/vt-d: Only call dmar_acpi_dev_scope_init() if DRHD units present iommu/vt-d: Check for NULL pointer in dmar_acpi_dev_scope_init() iommu/amd: Fix logic to determine and checking max PASID iommu/vt-d: Include ACPI devices in iommu=pt iommu/vt-d: Finally enable translation for non-PCI devices iommu/vt-d: Remove to_pci_dev() in intel_map_page() iommu/vt-d: Remove pdev from intel_iommu_attach_device() iommu/vt-d: Remove pdev from iommu_no_mapping() iommu/vt-d: Make domain_add_dev_info() take struct device iommu/vt-d: Make domain_remove_one_dev_info() take struct device iommu/vt-d: Rename 'hwdev' variables to 'dev' now that that's the norm iommu/vt-d: Remove some pointless to_pci_dev() calls iommu/vt-d: Make get_valid_domain_for_dev() take struct device iommu/vt-d: Make iommu_should_identity_map() take struct device iommu/vt-d: Handle RMRRs for non-PCI devices iommu/vt-d: Make get_domain_for_dev() take struct device iommu/vt-d: Make domain_context_mapp{ed,ing}() take struct device iommu/vt-d: Make device_to_iommu() cope with non-PCI devices ...
This commit is contained in:
commit
3f583bc219
@ -48,6 +48,12 @@ conditions.
|
|||||||
from the mmu-masters towards memory) node for this
|
from the mmu-masters towards memory) node for this
|
||||||
SMMU.
|
SMMU.
|
||||||
|
|
||||||
|
- calxeda,smmu-secure-config-access : Enable proper handling of buggy
|
||||||
|
implementations that always use secure access to
|
||||||
|
SMMU configuration registers. In this case non-secure
|
||||||
|
aliases of secure registers have to be used during
|
||||||
|
SMMU configuration.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
smmu {
|
smmu {
|
||||||
|
26
Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
Normal file
26
Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
OMAP2+ IOMMU
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible : Should be one of,
|
||||||
|
"ti,omap2-iommu" for OMAP2/OMAP3 IOMMU instances
|
||||||
|
"ti,omap4-iommu" for OMAP4/OMAP5 IOMMU instances
|
||||||
|
"ti,dra7-iommu" for DRA7xx IOMMU instances
|
||||||
|
- ti,hwmods : Name of the hwmod associated with the IOMMU instance
|
||||||
|
- reg : Address space for the configuration registers
|
||||||
|
- interrupts : Interrupt specifier for the IOMMU instance
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- ti,#tlb-entries : Number of entries in the translation look-aside buffer.
|
||||||
|
Should be either 8 or 32 (default: 32)
|
||||||
|
- ti,iommu-bus-err-back : Indicates the IOMMU instance supports throwing
|
||||||
|
back a bus error response on MMU faults.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
/* OMAP3 ISP MMU */
|
||||||
|
mmu_isp: mmu@480bd400 {
|
||||||
|
compatible = "ti,omap2-iommu";
|
||||||
|
reg = <0x480bd400 0x80>;
|
||||||
|
interrupts = <24>;
|
||||||
|
ti,hwmods = "mmu_isp";
|
||||||
|
ti,#tlb-entries = <8>;
|
||||||
|
};
|
@ -10,6 +10,7 @@
|
|||||||
* published by the Free Software Foundation.
|
* published by the Free Software Foundation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/of.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
@ -58,6 +59,10 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused)
|
|||||||
|
|
||||||
static int __init omap_iommu_init(void)
|
static int __init omap_iommu_init(void)
|
||||||
{
|
{
|
||||||
|
/* If dtb is there, the devices will be created dynamically */
|
||||||
|
if (of_have_populated_dt())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL);
|
return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL);
|
||||||
}
|
}
|
||||||
/* must be ready before omap3isp is probed */
|
/* must be ready before omap3isp is probed */
|
||||||
|
@ -207,7 +207,7 @@ config SHMOBILE_IOMMU
|
|||||||
bool "IOMMU for Renesas IPMMU/IPMMUI"
|
bool "IOMMU for Renesas IPMMU/IPMMUI"
|
||||||
default n
|
default n
|
||||||
depends on ARM
|
depends on ARM
|
||||||
depends on SH_MOBILE || COMPILE_TEST
|
depends on ARCH_SHMOBILE || COMPILE_TEST
|
||||||
select IOMMU_API
|
select IOMMU_API
|
||||||
select ARM_DMA_USE_IOMMU
|
select ARM_DMA_USE_IOMMU
|
||||||
select SHMOBILE_IPMMU
|
select SHMOBILE_IPMMU
|
||||||
|
@ -963,7 +963,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
|
|||||||
|
|
||||||
address &= ~(0xfffULL);
|
address &= ~(0xfffULL);
|
||||||
|
|
||||||
cmd->data[0] = pasid & PASID_MASK;
|
cmd->data[0] = pasid;
|
||||||
cmd->data[1] = domid;
|
cmd->data[1] = domid;
|
||||||
cmd->data[2] = lower_32_bits(address);
|
cmd->data[2] = lower_32_bits(address);
|
||||||
cmd->data[3] = upper_32_bits(address);
|
cmd->data[3] = upper_32_bits(address);
|
||||||
@ -982,10 +982,10 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
|
|||||||
address &= ~(0xfffULL);
|
address &= ~(0xfffULL);
|
||||||
|
|
||||||
cmd->data[0] = devid;
|
cmd->data[0] = devid;
|
||||||
cmd->data[0] |= (pasid & 0xff) << 16;
|
cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
|
||||||
cmd->data[0] |= (qdep & 0xff) << 24;
|
cmd->data[0] |= (qdep & 0xff) << 24;
|
||||||
cmd->data[1] = devid;
|
cmd->data[1] = devid;
|
||||||
cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
|
cmd->data[1] |= (pasid & 0xff) << 16;
|
||||||
cmd->data[2] = lower_32_bits(address);
|
cmd->data[2] = lower_32_bits(address);
|
||||||
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
|
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
|
||||||
cmd->data[3] = upper_32_bits(address);
|
cmd->data[3] = upper_32_bits(address);
|
||||||
@ -1001,7 +1001,7 @@ static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
|
|||||||
|
|
||||||
cmd->data[0] = devid;
|
cmd->data[0] = devid;
|
||||||
if (gn) {
|
if (gn) {
|
||||||
cmd->data[1] = pasid & PASID_MASK;
|
cmd->data[1] = pasid;
|
||||||
cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
|
cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
|
||||||
}
|
}
|
||||||
cmd->data[3] = tag & 0x1ff;
|
cmd->data[3] = tag & 0x1ff;
|
||||||
|
@ -150,7 +150,7 @@ int amd_iommus_present;
|
|||||||
bool amd_iommu_np_cache __read_mostly;
|
bool amd_iommu_np_cache __read_mostly;
|
||||||
bool amd_iommu_iotlb_sup __read_mostly = true;
|
bool amd_iommu_iotlb_sup __read_mostly = true;
|
||||||
|
|
||||||
u32 amd_iommu_max_pasids __read_mostly = ~0;
|
u32 amd_iommu_max_pasid __read_mostly = ~0;
|
||||||
|
|
||||||
bool amd_iommu_v2_present __read_mostly;
|
bool amd_iommu_v2_present __read_mostly;
|
||||||
bool amd_iommu_pc_present __read_mostly;
|
bool amd_iommu_pc_present __read_mostly;
|
||||||
@ -1231,14 +1231,16 @@ static int iommu_init_pci(struct amd_iommu *iommu)
|
|||||||
|
|
||||||
if (iommu_feature(iommu, FEATURE_GT)) {
|
if (iommu_feature(iommu, FEATURE_GT)) {
|
||||||
int glxval;
|
int glxval;
|
||||||
u32 pasids;
|
u32 max_pasid;
|
||||||
u64 shift;
|
u64 pasmax;
|
||||||
|
|
||||||
shift = iommu->features & FEATURE_PASID_MASK;
|
pasmax = iommu->features & FEATURE_PASID_MASK;
|
||||||
shift >>= FEATURE_PASID_SHIFT;
|
pasmax >>= FEATURE_PASID_SHIFT;
|
||||||
pasids = (1 << shift);
|
max_pasid = (1 << (pasmax + 1)) - 1;
|
||||||
|
|
||||||
amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
|
amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
|
||||||
|
|
||||||
|
BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
|
||||||
|
|
||||||
glxval = iommu->features & FEATURE_GLXVAL_MASK;
|
glxval = iommu->features & FEATURE_GLXVAL_MASK;
|
||||||
glxval >>= FEATURE_GLXVAL_SHIFT;
|
glxval >>= FEATURE_GLXVAL_SHIFT;
|
||||||
|
@ -99,7 +99,12 @@
|
|||||||
#define FEATURE_GLXVAL_SHIFT 14
|
#define FEATURE_GLXVAL_SHIFT 14
|
||||||
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
|
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
|
||||||
|
|
||||||
#define PASID_MASK 0x000fffff
|
/* Note:
|
||||||
|
* The current driver only support 16-bit PASID.
|
||||||
|
* Currently, hardware only implement upto 16-bit PASID
|
||||||
|
* even though the spec says it could have upto 20 bits.
|
||||||
|
*/
|
||||||
|
#define PASID_MASK 0x0000ffff
|
||||||
|
|
||||||
/* MMIO status bits */
|
/* MMIO status bits */
|
||||||
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
|
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
|
||||||
@ -697,8 +702,8 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
|||||||
*/
|
*/
|
||||||
extern u32 amd_iommu_unmap_flush;
|
extern u32 amd_iommu_unmap_flush;
|
||||||
|
|
||||||
/* Smallest number of PASIDs supported by any IOMMU in the system */
|
/* Smallest max PASID supported by any IOMMU in the system */
|
||||||
extern u32 amd_iommu_max_pasids;
|
extern u32 amd_iommu_max_pasid;
|
||||||
|
|
||||||
extern bool amd_iommu_v2_present;
|
extern bool amd_iommu_v2_present;
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
|
|
||||||
/* Maximum number of stream IDs assigned to a single device */
|
/* Maximum number of stream IDs assigned to a single device */
|
||||||
#define MAX_MASTER_STREAMIDS 8
|
#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
|
||||||
|
|
||||||
/* Maximum number of context banks per SMMU */
|
/* Maximum number of context banks per SMMU */
|
||||||
#define ARM_SMMU_MAX_CBS 128
|
#define ARM_SMMU_MAX_CBS 128
|
||||||
@ -60,6 +60,16 @@
|
|||||||
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
|
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
|
||||||
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
|
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SMMU global address space with conditional offset to access secure
|
||||||
|
* aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
|
||||||
|
* nsGFSYNR0: 0x450)
|
||||||
|
*/
|
||||||
|
#define ARM_SMMU_GR0_NS(smmu) \
|
||||||
|
((smmu)->base + \
|
||||||
|
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
|
||||||
|
? 0x400 : 0))
|
||||||
|
|
||||||
/* Page table bits */
|
/* Page table bits */
|
||||||
#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
|
#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
|
||||||
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
|
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
|
||||||
@ -351,6 +361,9 @@ struct arm_smmu_device {
|
|||||||
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
|
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
|
||||||
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
|
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
|
||||||
u32 features;
|
u32 features;
|
||||||
|
|
||||||
|
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
|
||||||
|
u32 options;
|
||||||
int version;
|
int version;
|
||||||
|
|
||||||
u32 num_context_banks;
|
u32 num_context_banks;
|
||||||
@ -401,6 +414,29 @@ struct arm_smmu_domain {
|
|||||||
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
|
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
|
||||||
static LIST_HEAD(arm_smmu_devices);
|
static LIST_HEAD(arm_smmu_devices);
|
||||||
|
|
||||||
|
struct arm_smmu_option_prop {
|
||||||
|
u32 opt;
|
||||||
|
const char *prop;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct arm_smmu_option_prop arm_smmu_options [] = {
|
||||||
|
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
|
||||||
|
{ 0, NULL},
|
||||||
|
};
|
||||||
|
|
||||||
|
static void parse_driver_options(struct arm_smmu_device *smmu)
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
do {
|
||||||
|
if (of_property_read_bool(smmu->dev->of_node,
|
||||||
|
arm_smmu_options[i].prop)) {
|
||||||
|
smmu->options |= arm_smmu_options[i].opt;
|
||||||
|
dev_notice(smmu->dev, "option %s\n",
|
||||||
|
arm_smmu_options[i].prop);
|
||||||
|
}
|
||||||
|
} while (arm_smmu_options[++i].opt);
|
||||||
|
}
|
||||||
|
|
||||||
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
|
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
|
||||||
struct device_node *dev_node)
|
struct device_node *dev_node)
|
||||||
{
|
{
|
||||||
@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
|
|||||||
{
|
{
|
||||||
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
|
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
|
||||||
struct arm_smmu_device *smmu = dev;
|
struct arm_smmu_device *smmu = dev;
|
||||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
|
||||||
|
|
||||||
gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
|
gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
|
||||||
if (!gfsr)
|
|
||||||
return IRQ_NONE;
|
|
||||||
|
|
||||||
gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
|
gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
|
||||||
gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
|
gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
|
||||||
gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
|
gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
|
||||||
|
|
||||||
|
if (!gfsr)
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
dev_err_ratelimited(smmu->dev,
|
dev_err_ratelimited(smmu->dev,
|
||||||
"Unexpected global fault, this could be serious\n");
|
"Unexpected global fault, this could be serious\n");
|
||||||
dev_err_ratelimited(smmu->dev,
|
dev_err_ratelimited(smmu->dev,
|
||||||
@ -642,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
|
|||||||
|
|
||||||
/* Ensure new page tables are visible to the hardware walker */
|
/* Ensure new page tables are visible to the hardware walker */
|
||||||
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
|
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
|
||||||
dsb();
|
dsb(ishst);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If the SMMU can't walk tables in the CPU caches, treat them
|
* If the SMMU can't walk tables in the CPU caches, treat them
|
||||||
@ -990,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Recursively free the page tables for this domain. We don't
|
* Recursively free the page tables for this domain. We don't
|
||||||
* care about speculative TLB filling, because the TLB will be
|
* care about speculative TLB filling because the tables should
|
||||||
* nuked next time this context bank is re-allocated and no devices
|
* not be active in any context bank at this point (SCTLR.M is 0).
|
||||||
* currently map to these tables.
|
|
||||||
*/
|
*/
|
||||||
pgd = pgd_base;
|
pgd = pgd_base;
|
||||||
for (i = 0; i < PTRS_PER_PGD; ++i) {
|
for (i = 0; i < PTRS_PER_PGD; ++i) {
|
||||||
@ -1218,7 +1253,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
|
|||||||
|
|
||||||
static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
unsigned long pfn, int flags, int stage)
|
unsigned long pfn, int prot, int stage)
|
||||||
{
|
{
|
||||||
pte_t *pte, *start;
|
pte_t *pte, *start;
|
||||||
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
|
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
|
||||||
@ -1240,28 +1275,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
|||||||
|
|
||||||
if (stage == 1) {
|
if (stage == 1) {
|
||||||
pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
|
pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
|
||||||
if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
|
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
|
||||||
pteval |= ARM_SMMU_PTE_AP_RDONLY;
|
pteval |= ARM_SMMU_PTE_AP_RDONLY;
|
||||||
|
|
||||||
if (flags & IOMMU_CACHE)
|
if (prot & IOMMU_CACHE)
|
||||||
pteval |= (MAIR_ATTR_IDX_CACHE <<
|
pteval |= (MAIR_ATTR_IDX_CACHE <<
|
||||||
ARM_SMMU_PTE_ATTRINDX_SHIFT);
|
ARM_SMMU_PTE_ATTRINDX_SHIFT);
|
||||||
} else {
|
} else {
|
||||||
pteval |= ARM_SMMU_PTE_HAP_FAULT;
|
pteval |= ARM_SMMU_PTE_HAP_FAULT;
|
||||||
if (flags & IOMMU_READ)
|
if (prot & IOMMU_READ)
|
||||||
pteval |= ARM_SMMU_PTE_HAP_READ;
|
pteval |= ARM_SMMU_PTE_HAP_READ;
|
||||||
if (flags & IOMMU_WRITE)
|
if (prot & IOMMU_WRITE)
|
||||||
pteval |= ARM_SMMU_PTE_HAP_WRITE;
|
pteval |= ARM_SMMU_PTE_HAP_WRITE;
|
||||||
if (flags & IOMMU_CACHE)
|
if (prot & IOMMU_CACHE)
|
||||||
pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
|
pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
|
||||||
else
|
else
|
||||||
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
|
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If no access, create a faulting entry to avoid TLB fills */
|
/* If no access, create a faulting entry to avoid TLB fills */
|
||||||
if (flags & IOMMU_EXEC)
|
if (prot & IOMMU_EXEC)
|
||||||
pteval &= ~ARM_SMMU_PTE_XN;
|
pteval &= ~ARM_SMMU_PTE_XN;
|
||||||
else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
|
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
|
||||||
pteval &= ~ARM_SMMU_PTE_PAGE;
|
pteval &= ~ARM_SMMU_PTE_PAGE;
|
||||||
|
|
||||||
pteval |= ARM_SMMU_PTE_SH_IS;
|
pteval |= ARM_SMMU_PTE_SH_IS;
|
||||||
@ -1323,7 +1358,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
|||||||
|
|
||||||
static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
phys_addr_t phys, int flags, int stage)
|
phys_addr_t phys, int prot, int stage)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
@ -1347,7 +1382,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
|||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
|
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
|
||||||
flags, stage);
|
prot, stage);
|
||||||
phys += next - addr;
|
phys += next - addr;
|
||||||
} while (pmd++, addr = next, addr < end);
|
} while (pmd++, addr = next, addr < end);
|
||||||
|
|
||||||
@ -1356,7 +1391,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
|||||||
|
|
||||||
static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
|
static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
phys_addr_t phys, int flags, int stage)
|
phys_addr_t phys, int prot, int stage)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
@ -1380,7 +1415,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
|
|||||||
do {
|
do {
|
||||||
next = pud_addr_end(addr, end);
|
next = pud_addr_end(addr, end);
|
||||||
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
|
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
|
||||||
flags, stage);
|
prot, stage);
|
||||||
phys += next - addr;
|
phys += next - addr;
|
||||||
} while (pud++, addr = next, addr < end);
|
} while (pud++, addr = next, addr < end);
|
||||||
|
|
||||||
@ -1389,7 +1424,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
|
|||||||
|
|
||||||
static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
||||||
unsigned long iova, phys_addr_t paddr,
|
unsigned long iova, phys_addr_t paddr,
|
||||||
size_t size, int flags)
|
size_t size, int prot)
|
||||||
{
|
{
|
||||||
int ret, stage;
|
int ret, stage;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
@ -1397,7 +1432,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|||||||
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
|
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
|
||||||
pgd_t *pgd = root_cfg->pgd;
|
pgd_t *pgd = root_cfg->pgd;
|
||||||
struct arm_smmu_device *smmu = root_cfg->smmu;
|
struct arm_smmu_device *smmu = root_cfg->smmu;
|
||||||
unsigned long irqflags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
|
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
|
||||||
stage = 2;
|
stage = 2;
|
||||||
@ -1420,14 +1455,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|||||||
if (paddr & ~output_mask)
|
if (paddr & ~output_mask)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
spin_lock_irqsave(&smmu_domain->lock, irqflags);
|
spin_lock_irqsave(&smmu_domain->lock, flags);
|
||||||
pgd += pgd_index(iova);
|
pgd += pgd_index(iova);
|
||||||
end = iova + size;
|
end = iova + size;
|
||||||
do {
|
do {
|
||||||
unsigned long next = pgd_addr_end(iova, end);
|
unsigned long next = pgd_addr_end(iova, end);
|
||||||
|
|
||||||
ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
|
ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
|
||||||
flags, stage);
|
prot, stage);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
@ -1436,13 +1471,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
|
|||||||
} while (pgd++, iova != end);
|
} while (pgd++, iova != end);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
|
spin_unlock_irqrestore(&smmu_domain->lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int flags)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
struct arm_smmu_domain *smmu_domain = domain->priv;
|
struct arm_smmu_domain *smmu_domain = domain->priv;
|
||||||
|
|
||||||
@ -1453,7 +1488,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
if ((phys_addr_t)iova & ~smmu_domain->output_mask)
|
if ((phys_addr_t)iova & ~smmu_domain->output_mask)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
|
return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
@ -1597,9 +1632,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||||||
int i = 0;
|
int i = 0;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
/* Clear Global FSR */
|
/* clear global FSR */
|
||||||
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
|
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
|
||||||
writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
|
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
|
||||||
|
|
||||||
/* Mark all SMRn as invalid and all S2CRn as bypass */
|
/* Mark all SMRn as invalid and all S2CRn as bypass */
|
||||||
for (i = 0; i < smmu->num_mapping_groups; ++i) {
|
for (i = 0; i < smmu->num_mapping_groups; ++i) {
|
||||||
@ -1619,7 +1654,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
||||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
||||||
|
|
||||||
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
|
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||||
|
|
||||||
/* Enable fault reporting */
|
/* Enable fault reporting */
|
||||||
reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
|
reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
|
||||||
@ -1638,7 +1673,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||||||
|
|
||||||
/* Push the button */
|
/* Push the button */
|
||||||
arm_smmu_tlb_sync(smmu);
|
arm_smmu_tlb_sync(smmu);
|
||||||
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
|
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_smmu_id_size_to_bits(int size)
|
static int arm_smmu_id_size_to_bits(int size)
|
||||||
@ -1885,6 +1920,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out_put_parent;
|
goto out_put_parent;
|
||||||
|
|
||||||
|
parse_driver_options(smmu);
|
||||||
|
|
||||||
if (smmu->version > 1 &&
|
if (smmu->version > 1 &&
|
||||||
smmu->num_context_banks != smmu->num_context_irqs) {
|
smmu->num_context_banks != smmu->num_context_irqs) {
|
||||||
dev_err(dev,
|
dev_err(dev,
|
||||||
@ -1969,7 +2006,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
|
|||||||
free_irq(smmu->irqs[i], smmu);
|
free_irq(smmu->irqs[i], smmu);
|
||||||
|
|
||||||
/* Turn the thing off */
|
/* Turn the thing off */
|
||||||
writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
|
writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,14 +43,24 @@
|
|||||||
|
|
||||||
#include "irq_remapping.h"
|
#include "irq_remapping.h"
|
||||||
|
|
||||||
/* No locks are needed as DMA remapping hardware unit
|
/*
|
||||||
* list is constructed at boot time and hotplug of
|
* Assumptions:
|
||||||
* these units are not supported by the architecture.
|
* 1) The hotplug framework guarentees that DMAR unit will be hot-added
|
||||||
|
* before IO devices managed by that unit.
|
||||||
|
* 2) The hotplug framework guarantees that DMAR unit will be hot-removed
|
||||||
|
* after IO devices managed by that unit.
|
||||||
|
* 3) Hotplug events are rare.
|
||||||
|
*
|
||||||
|
* Locking rules for DMA and interrupt remapping related global data structures:
|
||||||
|
* 1) Use dmar_global_lock in process context
|
||||||
|
* 2) Use RCU in interrupt context
|
||||||
*/
|
*/
|
||||||
|
DECLARE_RWSEM(dmar_global_lock);
|
||||||
LIST_HEAD(dmar_drhd_units);
|
LIST_HEAD(dmar_drhd_units);
|
||||||
|
|
||||||
struct acpi_table_header * __initdata dmar_tbl;
|
struct acpi_table_header * __initdata dmar_tbl;
|
||||||
static acpi_size dmar_tbl_size;
|
static acpi_size dmar_tbl_size;
|
||||||
|
static int dmar_dev_scope_status = 1;
|
||||||
|
|
||||||
static int alloc_iommu(struct dmar_drhd_unit *drhd);
|
static int alloc_iommu(struct dmar_drhd_unit *drhd);
|
||||||
static void free_iommu(struct intel_iommu *iommu);
|
static void free_iommu(struct intel_iommu *iommu);
|
||||||
@ -62,73 +72,20 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
|
|||||||
* the very end.
|
* the very end.
|
||||||
*/
|
*/
|
||||||
if (drhd->include_all)
|
if (drhd->include_all)
|
||||||
list_add_tail(&drhd->list, &dmar_drhd_units);
|
list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
|
||||||
else
|
else
|
||||||
list_add(&drhd->list, &dmar_drhd_units);
|
list_add_rcu(&drhd->list, &dmar_drhd_units);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
|
void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
|
||||||
struct pci_dev **dev, u16 segment)
|
|
||||||
{
|
|
||||||
struct pci_bus *bus;
|
|
||||||
struct pci_dev *pdev = NULL;
|
|
||||||
struct acpi_dmar_pci_path *path;
|
|
||||||
int count;
|
|
||||||
|
|
||||||
bus = pci_find_bus(segment, scope->bus);
|
|
||||||
path = (struct acpi_dmar_pci_path *)(scope + 1);
|
|
||||||
count = (scope->length - sizeof(struct acpi_dmar_device_scope))
|
|
||||||
/ sizeof(struct acpi_dmar_pci_path);
|
|
||||||
|
|
||||||
while (count) {
|
|
||||||
if (pdev)
|
|
||||||
pci_dev_put(pdev);
|
|
||||||
/*
|
|
||||||
* Some BIOSes list non-exist devices in DMAR table, just
|
|
||||||
* ignore it
|
|
||||||
*/
|
|
||||||
if (!bus) {
|
|
||||||
pr_warn("Device scope bus [%d] not found\n", scope->bus);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
|
|
||||||
if (!pdev) {
|
|
||||||
/* warning will be printed below */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
path ++;
|
|
||||||
count --;
|
|
||||||
bus = pdev->subordinate;
|
|
||||||
}
|
|
||||||
if (!pdev) {
|
|
||||||
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
|
|
||||||
segment, scope->bus, path->device, path->function);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
|
|
||||||
pdev->subordinate) || (scope->entry_type == \
|
|
||||||
ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
|
|
||||||
pci_dev_put(pdev);
|
|
||||||
pr_warn("Device scope type does not match for %s\n",
|
|
||||||
pci_name(pdev));
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
*dev = pdev;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
|
||||||
struct pci_dev ***devices, u16 segment)
|
|
||||||
{
|
{
|
||||||
struct acpi_dmar_device_scope *scope;
|
struct acpi_dmar_device_scope *scope;
|
||||||
void * tmp = start;
|
|
||||||
int index;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
*cnt = 0;
|
*cnt = 0;
|
||||||
while (start < end) {
|
while (start < end) {
|
||||||
scope = start;
|
scope = start;
|
||||||
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
|
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
|
||||||
|
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
|
||||||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
|
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
|
||||||
(*cnt)++;
|
(*cnt)++;
|
||||||
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
|
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
|
||||||
@ -138,43 +95,236 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
|||||||
start += scope->length;
|
start += scope->length;
|
||||||
}
|
}
|
||||||
if (*cnt == 0)
|
if (*cnt == 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct device *tmp_dev;
|
||||||
|
|
||||||
|
if (*devices && *cnt) {
|
||||||
|
for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
|
||||||
|
put_device(tmp_dev);
|
||||||
|
kfree(*devices);
|
||||||
|
}
|
||||||
|
|
||||||
|
*devices = NULL;
|
||||||
|
*cnt = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Optimize out kzalloc()/kfree() for normal cases */
|
||||||
|
static char dmar_pci_notify_info_buf[64];
|
||||||
|
|
||||||
|
static struct dmar_pci_notify_info *
|
||||||
|
dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
||||||
|
{
|
||||||
|
int level = 0;
|
||||||
|
size_t size;
|
||||||
|
struct pci_dev *tmp;
|
||||||
|
struct dmar_pci_notify_info *info;
|
||||||
|
|
||||||
|
BUG_ON(dev->is_virtfn);
|
||||||
|
|
||||||
|
/* Only generate path[] for device addition event */
|
||||||
|
if (event == BUS_NOTIFY_ADD_DEVICE)
|
||||||
|
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
||||||
|
level++;
|
||||||
|
|
||||||
|
size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
|
||||||
|
if (size <= sizeof(dmar_pci_notify_info_buf)) {
|
||||||
|
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
|
||||||
|
} else {
|
||||||
|
info = kzalloc(size, GFP_KERNEL);
|
||||||
|
if (!info) {
|
||||||
|
pr_warn("Out of memory when allocating notify_info "
|
||||||
|
"for %s.\n", pci_name(dev));
|
||||||
|
if (dmar_dev_scope_status == 0)
|
||||||
|
dmar_dev_scope_status = -ENOMEM;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info->event = event;
|
||||||
|
info->dev = dev;
|
||||||
|
info->seg = pci_domain_nr(dev->bus);
|
||||||
|
info->level = level;
|
||||||
|
if (event == BUS_NOTIFY_ADD_DEVICE) {
|
||||||
|
for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
|
||||||
|
info->path[level].device = PCI_SLOT(tmp->devfn);
|
||||||
|
info->path[level].function = PCI_FUNC(tmp->devfn);
|
||||||
|
if (pci_is_root_bus(tmp->bus))
|
||||||
|
info->bus = tmp->bus->number;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
|
||||||
|
{
|
||||||
|
if ((void *)info != dmar_pci_notify_info_buf)
|
||||||
|
kfree(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
|
||||||
|
struct acpi_dmar_pci_path *path, int count)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (info->bus != bus)
|
||||||
|
return false;
|
||||||
|
if (info->level != count)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
if (path[i].device != info->path[i].device ||
|
||||||
|
path[i].function != info->path[i].function)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
|
||||||
|
int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
|
||||||
|
void *start, void*end, u16 segment,
|
||||||
|
struct dmar_dev_scope *devices,
|
||||||
|
int devices_cnt)
|
||||||
|
{
|
||||||
|
int i, level;
|
||||||
|
struct device *tmp, *dev = &info->dev->dev;
|
||||||
|
struct acpi_dmar_device_scope *scope;
|
||||||
|
struct acpi_dmar_pci_path *path;
|
||||||
|
|
||||||
|
if (segment != info->seg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
*devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
|
for (; start < end; start += scope->length) {
|
||||||
if (!*devices)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
start = tmp;
|
|
||||||
index = 0;
|
|
||||||
while (start < end) {
|
|
||||||
scope = start;
|
scope = start;
|
||||||
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
|
if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
|
||||||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
|
scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
|
||||||
ret = dmar_parse_one_dev_scope(scope,
|
continue;
|
||||||
&(*devices)[index], segment);
|
|
||||||
if (ret) {
|
path = (struct acpi_dmar_pci_path *)(scope + 1);
|
||||||
dmar_free_dev_scope(devices, cnt);
|
level = (scope->length - sizeof(*scope)) / sizeof(*path);
|
||||||
return ret;
|
if (!dmar_match_pci_path(info, scope->bus, path, level))
|
||||||
}
|
continue;
|
||||||
index ++;
|
|
||||||
|
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
|
||||||
|
(info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
|
||||||
|
pr_warn("Device scope type does not match for %s\n",
|
||||||
|
pci_name(info->dev));
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
start += scope->length;
|
|
||||||
|
for_each_dev_scope(devices, devices_cnt, i, tmp)
|
||||||
|
if (tmp == NULL) {
|
||||||
|
devices[i].bus = info->dev->bus->number;
|
||||||
|
devices[i].devfn = info->dev->devfn;
|
||||||
|
rcu_assign_pointer(devices[i].dev,
|
||||||
|
get_device(dev));
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
BUG_ON(i >= devices_cnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
|
int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
|
||||||
|
struct dmar_dev_scope *devices, int count)
|
||||||
{
|
{
|
||||||
if (*devices && *cnt) {
|
int index;
|
||||||
while (--*cnt >= 0)
|
struct device *tmp;
|
||||||
pci_dev_put((*devices)[*cnt]);
|
|
||||||
kfree(*devices);
|
if (info->seg != segment)
|
||||||
*devices = NULL;
|
return 0;
|
||||||
*cnt = 0;
|
|
||||||
}
|
for_each_active_dev_scope(devices, count, index, tmp)
|
||||||
|
if (tmp == &info->dev->dev) {
|
||||||
|
rcu_assign_pointer(devices[index].dev, NULL);
|
||||||
|
synchronize_rcu();
|
||||||
|
put_device(tmp);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct dmar_drhd_unit *dmaru;
|
||||||
|
struct acpi_dmar_hardware_unit *drhd;
|
||||||
|
|
||||||
|
for_each_drhd_unit(dmaru) {
|
||||||
|
if (dmaru->include_all)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
drhd = container_of(dmaru->hdr,
|
||||||
|
struct acpi_dmar_hardware_unit, header);
|
||||||
|
ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
|
||||||
|
((void *)drhd) + drhd->header.length,
|
||||||
|
dmaru->segment,
|
||||||
|
dmaru->devices, dmaru->devices_cnt);
|
||||||
|
if (ret != 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (ret >= 0)
|
||||||
|
ret = dmar_iommu_notify_scope_dev(info);
|
||||||
|
if (ret < 0 && dmar_dev_scope_status == 0)
|
||||||
|
dmar_dev_scope_status = ret;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
|
||||||
|
{
|
||||||
|
struct dmar_drhd_unit *dmaru;
|
||||||
|
|
||||||
|
for_each_drhd_unit(dmaru)
|
||||||
|
if (dmar_remove_dev_scope(info, dmaru->segment,
|
||||||
|
dmaru->devices, dmaru->devices_cnt))
|
||||||
|
break;
|
||||||
|
dmar_iommu_notify_scope_dev(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dmar_pci_bus_notifier(struct notifier_block *nb,
|
||||||
|
unsigned long action, void *data)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = to_pci_dev(data);
|
||||||
|
struct dmar_pci_notify_info *info;
|
||||||
|
|
||||||
|
/* Only care about add/remove events for physical functions */
|
||||||
|
if (pdev->is_virtfn)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
info = dmar_alloc_pci_notify_info(pdev, action);
|
||||||
|
if (!info)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
down_write(&dmar_global_lock);
|
||||||
|
if (action == BUS_NOTIFY_ADD_DEVICE)
|
||||||
|
dmar_pci_bus_add_dev(info);
|
||||||
|
else if (action == BUS_NOTIFY_DEL_DEVICE)
|
||||||
|
dmar_pci_bus_del_dev(info);
|
||||||
|
up_write(&dmar_global_lock);
|
||||||
|
|
||||||
|
dmar_free_pci_notify_info(info);
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block dmar_pci_bus_nb = {
|
||||||
|
.notifier_call = dmar_pci_bus_notifier,
|
||||||
|
.priority = INT_MIN,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
|
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
|
||||||
* structure which uniquely represent one DMA remapping hardware unit
|
* structure which uniquely represent one DMA remapping hardware unit
|
||||||
@ -196,9 +346,18 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
|
|||||||
dmaru->reg_base_addr = drhd->address;
|
dmaru->reg_base_addr = drhd->address;
|
||||||
dmaru->segment = drhd->segment;
|
dmaru->segment = drhd->segment;
|
||||||
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
|
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
|
||||||
|
dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
|
||||||
|
((void *)drhd) + drhd->header.length,
|
||||||
|
&dmaru->devices_cnt);
|
||||||
|
if (dmaru->devices_cnt && dmaru->devices == NULL) {
|
||||||
|
kfree(dmaru);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
ret = alloc_iommu(dmaru);
|
ret = alloc_iommu(dmaru);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
dmar_free_dev_scope(&dmaru->devices,
|
||||||
|
&dmaru->devices_cnt);
|
||||||
kfree(dmaru);
|
kfree(dmaru);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -215,19 +374,24 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
|
|||||||
kfree(dmaru);
|
kfree(dmaru);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
|
static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
|
||||||
{
|
{
|
||||||
struct acpi_dmar_hardware_unit *drhd;
|
struct acpi_dmar_andd *andd = (void *)header;
|
||||||
|
|
||||||
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
|
/* Check for NUL termination within the designated length */
|
||||||
|
if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
|
||||||
|
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
|
||||||
|
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
|
||||||
|
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
||||||
|
dmi_get_system_info(DMI_BIOS_VENDOR),
|
||||||
|
dmi_get_system_info(DMI_BIOS_VERSION),
|
||||||
|
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
pr_info("ANDD device: %x name: %s\n", andd->device_number,
|
||||||
|
andd->object_name);
|
||||||
|
|
||||||
if (dmaru->include_all)
|
return 0;
|
||||||
return 0;
|
|
||||||
|
|
||||||
return dmar_parse_dev_scope((void *)(drhd + 1),
|
|
||||||
((void *)drhd) + drhd->header.length,
|
|
||||||
&dmaru->devices_cnt, &dmaru->devices,
|
|
||||||
drhd->segment);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_NUMA
|
#ifdef CONFIG_ACPI_NUMA
|
||||||
@ -293,6 +457,10 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
|
|||||||
(unsigned long long)rhsa->base_address,
|
(unsigned long long)rhsa->base_address,
|
||||||
rhsa->proximity_domain);
|
rhsa->proximity_domain);
|
||||||
break;
|
break;
|
||||||
|
case ACPI_DMAR_TYPE_ANDD:
|
||||||
|
/* We don't print this here because we need to sanity-check
|
||||||
|
it first. So print it in dmar_parse_one_andd() instead. */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,6 +546,9 @@ parse_dmar_table(void)
|
|||||||
ret = dmar_parse_one_rhsa(entry_header);
|
ret = dmar_parse_one_rhsa(entry_header);
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
|
case ACPI_DMAR_TYPE_ANDD:
|
||||||
|
ret = dmar_parse_one_andd(entry_header);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
pr_warn("Unknown DMAR structure type %d\n",
|
pr_warn("Unknown DMAR structure type %d\n",
|
||||||
entry_header->type);
|
entry_header->type);
|
||||||
@ -394,14 +565,15 @@ parse_dmar_table(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
|
static int dmar_pci_device_match(struct dmar_dev_scope devices[],
|
||||||
struct pci_dev *dev)
|
int cnt, struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int index;
|
int index;
|
||||||
|
struct device *tmp;
|
||||||
|
|
||||||
while (dev) {
|
while (dev) {
|
||||||
for (index = 0; index < cnt; index++)
|
for_each_active_dev_scope(devices, cnt, index, tmp)
|
||||||
if (dev == devices[index])
|
if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/* Check our parent */
|
/* Check our parent */
|
||||||
@ -414,11 +586,12 @@ static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
|
|||||||
struct dmar_drhd_unit *
|
struct dmar_drhd_unit *
|
||||||
dmar_find_matched_drhd_unit(struct pci_dev *dev)
|
dmar_find_matched_drhd_unit(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
struct dmar_drhd_unit *dmaru = NULL;
|
struct dmar_drhd_unit *dmaru;
|
||||||
struct acpi_dmar_hardware_unit *drhd;
|
struct acpi_dmar_hardware_unit *drhd;
|
||||||
|
|
||||||
dev = pci_physfn(dev);
|
dev = pci_physfn(dev);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
for_each_drhd_unit(dmaru) {
|
for_each_drhd_unit(dmaru) {
|
||||||
drhd = container_of(dmaru->hdr,
|
drhd = container_of(dmaru->hdr,
|
||||||
struct acpi_dmar_hardware_unit,
|
struct acpi_dmar_hardware_unit,
|
||||||
@ -426,44 +599,128 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
|
|||||||
|
|
||||||
if (dmaru->include_all &&
|
if (dmaru->include_all &&
|
||||||
drhd->segment == pci_domain_nr(dev->bus))
|
drhd->segment == pci_domain_nr(dev->bus))
|
||||||
return dmaru;
|
goto out;
|
||||||
|
|
||||||
if (dmar_pci_device_match(dmaru->devices,
|
if (dmar_pci_device_match(dmaru->devices,
|
||||||
dmaru->devices_cnt, dev))
|
dmaru->devices_cnt, dev))
|
||||||
return dmaru;
|
goto out;
|
||||||
}
|
}
|
||||||
|
dmaru = NULL;
|
||||||
|
out:
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return NULL;
|
return dmaru;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init dmar_acpi_insert_dev_scope(u8 device_number,
|
||||||
|
struct acpi_device *adev)
|
||||||
|
{
|
||||||
|
struct dmar_drhd_unit *dmaru;
|
||||||
|
struct acpi_dmar_hardware_unit *drhd;
|
||||||
|
struct acpi_dmar_device_scope *scope;
|
||||||
|
struct device *tmp;
|
||||||
|
int i;
|
||||||
|
struct acpi_dmar_pci_path *path;
|
||||||
|
|
||||||
|
for_each_drhd_unit(dmaru) {
|
||||||
|
drhd = container_of(dmaru->hdr,
|
||||||
|
struct acpi_dmar_hardware_unit,
|
||||||
|
header);
|
||||||
|
|
||||||
|
for (scope = (void *)(drhd + 1);
|
||||||
|
(unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
|
||||||
|
scope = ((void *)scope) + scope->length) {
|
||||||
|
if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI)
|
||||||
|
continue;
|
||||||
|
if (scope->enumeration_id != device_number)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
path = (void *)(scope + 1);
|
||||||
|
pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
|
||||||
|
dev_name(&adev->dev), dmaru->reg_base_addr,
|
||||||
|
scope->bus, path->device, path->function);
|
||||||
|
for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
|
||||||
|
if (tmp == NULL) {
|
||||||
|
dmaru->devices[i].bus = scope->bus;
|
||||||
|
dmaru->devices[i].devfn = PCI_DEVFN(path->device,
|
||||||
|
path->function);
|
||||||
|
rcu_assign_pointer(dmaru->devices[i].dev,
|
||||||
|
get_device(&adev->dev));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
BUG_ON(i >= dmaru->devices_cnt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
|
||||||
|
device_number, dev_name(&adev->dev));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init dmar_acpi_dev_scope_init(void)
|
||||||
|
{
|
||||||
|
struct acpi_dmar_andd *andd;
|
||||||
|
|
||||||
|
if (dmar_tbl == NULL)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
|
||||||
|
((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
|
||||||
|
andd = ((void *)andd) + andd->header.length) {
|
||||||
|
if (andd->header.type == ACPI_DMAR_TYPE_ANDD) {
|
||||||
|
acpi_handle h;
|
||||||
|
struct acpi_device *adev;
|
||||||
|
|
||||||
|
if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
|
||||||
|
andd->object_name,
|
||||||
|
&h))) {
|
||||||
|
pr_err("Failed to find handle for ACPI object %s\n",
|
||||||
|
andd->object_name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
acpi_bus_get_device(h, &adev);
|
||||||
|
if (!adev) {
|
||||||
|
pr_err("Failed to get device for ACPI object %s\n",
|
||||||
|
andd->object_name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
dmar_acpi_insert_dev_scope(andd->device_number, adev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init dmar_dev_scope_init(void)
|
int __init dmar_dev_scope_init(void)
|
||||||
{
|
{
|
||||||
static int dmar_dev_scope_initialized;
|
struct pci_dev *dev = NULL;
|
||||||
struct dmar_drhd_unit *drhd;
|
struct dmar_pci_notify_info *info;
|
||||||
int ret = -ENODEV;
|
|
||||||
|
|
||||||
if (dmar_dev_scope_initialized)
|
if (dmar_dev_scope_status != 1)
|
||||||
return dmar_dev_scope_initialized;
|
return dmar_dev_scope_status;
|
||||||
|
|
||||||
if (list_empty(&dmar_drhd_units))
|
if (list_empty(&dmar_drhd_units)) {
|
||||||
goto fail;
|
dmar_dev_scope_status = -ENODEV;
|
||||||
|
} else {
|
||||||
|
dmar_dev_scope_status = 0;
|
||||||
|
|
||||||
list_for_each_entry(drhd, &dmar_drhd_units, list) {
|
dmar_acpi_dev_scope_init();
|
||||||
ret = dmar_parse_dev(drhd);
|
|
||||||
if (ret)
|
for_each_pci_dev(dev) {
|
||||||
goto fail;
|
if (dev->is_virtfn)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
info = dmar_alloc_pci_notify_info(dev,
|
||||||
|
BUS_NOTIFY_ADD_DEVICE);
|
||||||
|
if (!info) {
|
||||||
|
return dmar_dev_scope_status;
|
||||||
|
} else {
|
||||||
|
dmar_pci_bus_add_dev(info);
|
||||||
|
dmar_free_pci_notify_info(info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dmar_parse_rmrr_atsr_dev();
|
return dmar_dev_scope_status;
|
||||||
if (ret)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
dmar_dev_scope_initialized = 1;
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
fail:
|
|
||||||
dmar_dev_scope_initialized = ret;
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -557,6 +814,7 @@ int __init detect_intel_iommu(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
down_write(&dmar_global_lock);
|
||||||
ret = dmar_table_detect();
|
ret = dmar_table_detect();
|
||||||
if (ret)
|
if (ret)
|
||||||
ret = check_zero_address();
|
ret = check_zero_address();
|
||||||
@ -574,6 +832,7 @@ int __init detect_intel_iommu(void)
|
|||||||
}
|
}
|
||||||
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
|
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
|
||||||
dmar_tbl = NULL;
|
dmar_tbl = NULL;
|
||||||
|
up_write(&dmar_global_lock);
|
||||||
|
|
||||||
return ret ? 1 : -ENODEV;
|
return ret ? 1 : -ENODEV;
|
||||||
}
|
}
|
||||||
@ -696,6 +955,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||||||
}
|
}
|
||||||
iommu->agaw = agaw;
|
iommu->agaw = agaw;
|
||||||
iommu->msagaw = msagaw;
|
iommu->msagaw = msagaw;
|
||||||
|
iommu->segment = drhd->segment;
|
||||||
|
|
||||||
iommu->node = -1;
|
iommu->node = -1;
|
||||||
|
|
||||||
@ -1386,10 +1646,15 @@ static int __init dmar_free_unused_resources(void)
|
|||||||
if (irq_remapping_enabled || intel_iommu_enabled)
|
if (irq_remapping_enabled || intel_iommu_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
|
||||||
|
bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
|
||||||
|
|
||||||
|
down_write(&dmar_global_lock);
|
||||||
list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
|
list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
|
||||||
list_del(&dmaru->list);
|
list_del(&dmaru->list);
|
||||||
dmar_free_drhd(dmaru);
|
dmar_free_drhd(dmaru);
|
||||||
}
|
}
|
||||||
|
up_write(&dmar_global_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -38,6 +38,17 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
|
|||||||
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
|
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
|
||||||
static int ir_ioapic_num, ir_hpet_num;
|
static int ir_ioapic_num, ir_hpet_num;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lock ordering:
|
||||||
|
* ->dmar_global_lock
|
||||||
|
* ->irq_2_ir_lock
|
||||||
|
* ->qi->q_lock
|
||||||
|
* ->iommu->register_lock
|
||||||
|
* Note:
|
||||||
|
* intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
|
||||||
|
* in single-threaded environment with interrupt disabled, so no need to tabke
|
||||||
|
* the dmar_global_lock.
|
||||||
|
*/
|
||||||
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
|
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
|
||||||
|
|
||||||
static int __init parse_ioapics_under_ir(void);
|
static int __init parse_ioapics_under_ir(void);
|
||||||
@ -307,12 +318,14 @@ static int set_ioapic_sid(struct irte *irte, int apic)
|
|||||||
if (!irte)
|
if (!irte)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
down_read(&dmar_global_lock);
|
||||||
for (i = 0; i < MAX_IO_APICS; i++) {
|
for (i = 0; i < MAX_IO_APICS; i++) {
|
||||||
if (ir_ioapic[i].id == apic) {
|
if (ir_ioapic[i].id == apic) {
|
||||||
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
|
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
up_read(&dmar_global_lock);
|
||||||
|
|
||||||
if (sid == 0) {
|
if (sid == 0) {
|
||||||
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
|
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
|
||||||
@ -332,12 +345,14 @@ static int set_hpet_sid(struct irte *irte, u8 id)
|
|||||||
if (!irte)
|
if (!irte)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
down_read(&dmar_global_lock);
|
||||||
for (i = 0; i < MAX_HPET_TBS; i++) {
|
for (i = 0; i < MAX_HPET_TBS; i++) {
|
||||||
if (ir_hpet[i].id == id) {
|
if (ir_hpet[i].id == id) {
|
||||||
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
|
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
up_read(&dmar_global_lock);
|
||||||
|
|
||||||
if (sid == 0) {
|
if (sid == 0) {
|
||||||
pr_warning("Failed to set source-id of HPET block (%d)\n", id);
|
pr_warning("Failed to set source-id of HPET block (%d)\n", id);
|
||||||
@ -794,10 +809,16 @@ static int __init parse_ioapics_under_ir(void)
|
|||||||
|
|
||||||
static int __init ir_dev_scope_init(void)
|
static int __init ir_dev_scope_init(void)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!irq_remapping_enabled)
|
if (!irq_remapping_enabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return dmar_dev_scope_init();
|
down_write(&dmar_global_lock);
|
||||||
|
ret = dmar_dev_scope_init();
|
||||||
|
up_write(&dmar_global_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
rootfs_initcall(ir_dev_scope_init);
|
rootfs_initcall(ir_dev_scope_init);
|
||||||
|
|
||||||
@ -878,23 +899,27 @@ static int intel_setup_ioapic_entry(int irq,
|
|||||||
struct io_apic_irq_attr *attr)
|
struct io_apic_irq_attr *attr)
|
||||||
{
|
{
|
||||||
int ioapic_id = mpc_ioapic_id(attr->ioapic);
|
int ioapic_id = mpc_ioapic_id(attr->ioapic);
|
||||||
struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
|
struct intel_iommu *iommu;
|
||||||
struct IR_IO_APIC_route_entry *entry;
|
struct IR_IO_APIC_route_entry *entry;
|
||||||
struct irte irte;
|
struct irte irte;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
|
down_read(&dmar_global_lock);
|
||||||
|
iommu = map_ioapic_to_ir(ioapic_id);
|
||||||
if (!iommu) {
|
if (!iommu) {
|
||||||
pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
|
pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
|
||||||
return -ENODEV;
|
index = -ENODEV;
|
||||||
}
|
} else {
|
||||||
|
index = alloc_irte(iommu, irq, 1);
|
||||||
entry = (struct IR_IO_APIC_route_entry *)route_entry;
|
if (index < 0) {
|
||||||
|
pr_warn("Failed to allocate IRTE for ioapic %d\n",
|
||||||
index = alloc_irte(iommu, irq, 1);
|
ioapic_id);
|
||||||
if (index < 0) {
|
index = -ENOMEM;
|
||||||
pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
|
}
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
up_read(&dmar_global_lock);
|
||||||
|
if (index < 0)
|
||||||
|
return index;
|
||||||
|
|
||||||
prepare_irte(&irte, vector, destination);
|
prepare_irte(&irte, vector, destination);
|
||||||
|
|
||||||
@ -913,6 +938,7 @@ static int intel_setup_ioapic_entry(int irq,
|
|||||||
irte.avail, irte.vector, irte.dest_id,
|
irte.avail, irte.vector, irte.dest_id,
|
||||||
irte.sid, irte.sq, irte.svt);
|
irte.sid, irte.sq, irte.svt);
|
||||||
|
|
||||||
|
entry = (struct IR_IO_APIC_route_entry *)route_entry;
|
||||||
memset(entry, 0, sizeof(*entry));
|
memset(entry, 0, sizeof(*entry));
|
||||||
|
|
||||||
entry->index2 = (index >> 15) & 0x1;
|
entry->index2 = (index >> 15) & 0x1;
|
||||||
@ -1043,20 +1069,23 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
|
|||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
|
down_read(&dmar_global_lock);
|
||||||
iommu = map_dev_to_ir(dev);
|
iommu = map_dev_to_ir(dev);
|
||||||
if (!iommu) {
|
if (!iommu) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"Unable to map PCI %s to iommu\n", pci_name(dev));
|
"Unable to map PCI %s to iommu\n", pci_name(dev));
|
||||||
return -ENOENT;
|
index = -ENOENT;
|
||||||
|
} else {
|
||||||
|
index = alloc_irte(iommu, irq, nvec);
|
||||||
|
if (index < 0) {
|
||||||
|
printk(KERN_ERR
|
||||||
|
"Unable to allocate %d IRTE for PCI %s\n",
|
||||||
|
nvec, pci_name(dev));
|
||||||
|
index = -ENOSPC;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
up_read(&dmar_global_lock);
|
||||||
|
|
||||||
index = alloc_irte(iommu, irq, nvec);
|
|
||||||
if (index < 0) {
|
|
||||||
printk(KERN_ERR
|
|
||||||
"Unable to allocate %d IRTE for PCI %s\n", nvec,
|
|
||||||
pci_name(dev));
|
|
||||||
return -ENOSPC;
|
|
||||||
}
|
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1064,33 +1093,40 @@ static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
|||||||
int index, int sub_handle)
|
int index, int sub_handle)
|
||||||
{
|
{
|
||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
|
int ret = -ENOENT;
|
||||||
|
|
||||||
|
down_read(&dmar_global_lock);
|
||||||
iommu = map_dev_to_ir(pdev);
|
iommu = map_dev_to_ir(pdev);
|
||||||
if (!iommu)
|
if (iommu) {
|
||||||
return -ENOENT;
|
/*
|
||||||
/*
|
* setup the mapping between the irq and the IRTE
|
||||||
* setup the mapping between the irq and the IRTE
|
* base index, the sub_handle pointing to the
|
||||||
* base index, the sub_handle pointing to the
|
* appropriate interrupt remap table entry.
|
||||||
* appropriate interrupt remap table entry.
|
*/
|
||||||
*/
|
set_irte_irq(irq, iommu, index, sub_handle);
|
||||||
set_irte_irq(irq, iommu, index, sub_handle);
|
ret = 0;
|
||||||
|
}
|
||||||
|
up_read(&dmar_global_lock);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
|
static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||||
{
|
{
|
||||||
struct intel_iommu *iommu = map_hpet_to_ir(id);
|
int ret = -1;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
if (!iommu)
|
down_read(&dmar_global_lock);
|
||||||
return -1;
|
iommu = map_hpet_to_ir(id);
|
||||||
|
if (iommu) {
|
||||||
|
index = alloc_irte(iommu, irq, 1);
|
||||||
|
if (index >= 0)
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
up_read(&dmar_global_lock);
|
||||||
|
|
||||||
index = alloc_irte(iommu, irq, 1);
|
return ret;
|
||||||
if (index < 0)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct irq_remap_ops intel_irq_remap_ops = {
|
struct irq_remap_ops intel_irq_remap_ops = {
|
||||||
|
@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct iova *
|
||||||
|
alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
|
||||||
|
{
|
||||||
|
struct iova *iova;
|
||||||
|
|
||||||
|
iova = alloc_iova_mem();
|
||||||
|
if (iova) {
|
||||||
|
iova->pfn_lo = pfn_lo;
|
||||||
|
iova->pfn_hi = pfn_hi;
|
||||||
|
}
|
||||||
|
|
||||||
|
return iova;
|
||||||
|
}
|
||||||
|
|
||||||
static struct iova *
|
static struct iova *
|
||||||
__insert_new_range(struct iova_domain *iovad,
|
__insert_new_range(struct iova_domain *iovad,
|
||||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||||
{
|
{
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
|
|
||||||
iova = alloc_iova_mem();
|
iova = alloc_and_init_iova(pfn_lo, pfn_hi);
|
||||||
if (!iova)
|
if (iova)
|
||||||
return iova;
|
iova_insert_rbtree(&iovad->rbroot, iova);
|
||||||
|
|
||||||
iova->pfn_hi = pfn_hi;
|
|
||||||
iova->pfn_lo = pfn_lo;
|
|
||||||
iova_insert_rbtree(&iovad->rbroot, iova);
|
|
||||||
return iova;
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
|
|||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
|
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct iova *
|
||||||
|
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
|
||||||
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct iova *prev = NULL, *next = NULL;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||||
|
if (iova->pfn_lo < pfn_lo) {
|
||||||
|
prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
|
||||||
|
if (prev == NULL)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
if (iova->pfn_hi > pfn_hi) {
|
||||||
|
next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
|
||||||
|
if (next == NULL)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
__cached_rbnode_delete_update(iovad, iova);
|
||||||
|
rb_erase(&iova->node, &iovad->rbroot);
|
||||||
|
|
||||||
|
if (prev) {
|
||||||
|
iova_insert_rbtree(&iovad->rbroot, prev);
|
||||||
|
iova->pfn_lo = pfn_lo;
|
||||||
|
}
|
||||||
|
if (next) {
|
||||||
|
iova_insert_rbtree(&iovad->rbroot, next);
|
||||||
|
iova->pfn_hi = pfn_hi;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||||
|
|
||||||
|
return iova;
|
||||||
|
|
||||||
|
error:
|
||||||
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||||
|
if (prev)
|
||||||
|
free_iova_mem(prev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
#include <linux/of_iommu.h>
|
||||||
|
#include <linux/of_irq.h>
|
||||||
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
@ -146,13 +149,10 @@ static int iommu_enable(struct omap_iommu *obj)
|
|||||||
struct platform_device *pdev = to_platform_device(obj->dev);
|
struct platform_device *pdev = to_platform_device(obj->dev);
|
||||||
struct iommu_platform_data *pdata = pdev->dev.platform_data;
|
struct iommu_platform_data *pdata = pdev->dev.platform_data;
|
||||||
|
|
||||||
if (!pdata)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!arch_iommu)
|
if (!arch_iommu)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (pdata->deassert_reset) {
|
if (pdata && pdata->deassert_reset) {
|
||||||
err = pdata->deassert_reset(pdev, pdata->reset_name);
|
err = pdata->deassert_reset(pdev, pdata->reset_name);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(obj->dev, "deassert_reset failed: %d\n", err);
|
dev_err(obj->dev, "deassert_reset failed: %d\n", err);
|
||||||
@ -172,14 +172,11 @@ static void iommu_disable(struct omap_iommu *obj)
|
|||||||
struct platform_device *pdev = to_platform_device(obj->dev);
|
struct platform_device *pdev = to_platform_device(obj->dev);
|
||||||
struct iommu_platform_data *pdata = pdev->dev.platform_data;
|
struct iommu_platform_data *pdata = pdev->dev.platform_data;
|
||||||
|
|
||||||
if (!pdata)
|
|
||||||
return;
|
|
||||||
|
|
||||||
arch_iommu->disable(obj);
|
arch_iommu->disable(obj);
|
||||||
|
|
||||||
pm_runtime_put_sync(obj->dev);
|
pm_runtime_put_sync(obj->dev);
|
||||||
|
|
||||||
if (pdata->assert_reset)
|
if (pdata && pdata->assert_reset)
|
||||||
pdata->assert_reset(pdev, pdata->reset_name);
|
pdata->assert_reset(pdev, pdata->reset_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,7 +520,8 @@ static void flush_iopte_range(u32 *first, u32 *last)
|
|||||||
static void iopte_free(u32 *iopte)
|
static void iopte_free(u32 *iopte)
|
||||||
{
|
{
|
||||||
/* Note: freed iopte's must be clean ready for re-use */
|
/* Note: freed iopte's must be clean ready for re-use */
|
||||||
kmem_cache_free(iopte_cachep, iopte);
|
if (iopte)
|
||||||
|
kmem_cache_free(iopte_cachep, iopte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
|
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
|
||||||
@ -863,7 +861,7 @@ static int device_match_by_alias(struct device *dev, void *data)
|
|||||||
**/
|
**/
|
||||||
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
|
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
|
||||||
{
|
{
|
||||||
int err = -ENOMEM;
|
int err;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct omap_iommu *obj;
|
struct omap_iommu *obj;
|
||||||
|
|
||||||
@ -871,7 +869,7 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
|
|||||||
(void *)name,
|
(void *)name,
|
||||||
device_match_by_alias);
|
device_match_by_alias);
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return NULL;
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
obj = to_iommu(dev);
|
obj = to_iommu(dev);
|
||||||
|
|
||||||
@ -890,8 +888,10 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
|
|||||||
goto err_enable;
|
goto err_enable;
|
||||||
flush_iotlb_all(obj);
|
flush_iotlb_all(obj);
|
||||||
|
|
||||||
if (!try_module_get(obj->owner))
|
if (!try_module_get(obj->owner)) {
|
||||||
|
err = -ENODEV;
|
||||||
goto err_module;
|
goto err_module;
|
||||||
|
}
|
||||||
|
|
||||||
spin_unlock(&obj->iommu_lock);
|
spin_unlock(&obj->iommu_lock);
|
||||||
|
|
||||||
@ -940,17 +940,41 @@ static int omap_iommu_probe(struct platform_device *pdev)
|
|||||||
struct omap_iommu *obj;
|
struct omap_iommu *obj;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
struct iommu_platform_data *pdata = pdev->dev.platform_data;
|
struct iommu_platform_data *pdata = pdev->dev.platform_data;
|
||||||
|
struct device_node *of = pdev->dev.of_node;
|
||||||
|
|
||||||
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
|
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
|
||||||
if (!obj)
|
if (!obj)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
obj->nr_tlb_entries = pdata->nr_tlb_entries;
|
if (of) {
|
||||||
obj->name = pdata->name;
|
obj->name = dev_name(&pdev->dev);
|
||||||
|
obj->nr_tlb_entries = 32;
|
||||||
|
err = of_property_read_u32(of, "ti,#tlb-entries",
|
||||||
|
&obj->nr_tlb_entries);
|
||||||
|
if (err && err != -EINVAL)
|
||||||
|
return err;
|
||||||
|
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
|
||||||
|
return -EINVAL;
|
||||||
|
/*
|
||||||
|
* da_start and da_end are needed for omap-iovmm, so hardcode
|
||||||
|
* these values as used by OMAP3 ISP - the only user for
|
||||||
|
* omap-iovmm
|
||||||
|
*/
|
||||||
|
obj->da_start = 0;
|
||||||
|
obj->da_end = 0xfffff000;
|
||||||
|
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
|
||||||
|
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
|
||||||
|
} else {
|
||||||
|
obj->nr_tlb_entries = pdata->nr_tlb_entries;
|
||||||
|
obj->name = pdata->name;
|
||||||
|
obj->da_start = pdata->da_start;
|
||||||
|
obj->da_end = pdata->da_end;
|
||||||
|
}
|
||||||
|
if (obj->da_end <= obj->da_start)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
obj->dev = &pdev->dev;
|
obj->dev = &pdev->dev;
|
||||||
obj->ctx = (void *)obj + sizeof(*obj);
|
obj->ctx = (void *)obj + sizeof(*obj);
|
||||||
obj->da_start = pdata->da_start;
|
|
||||||
obj->da_end = pdata->da_end;
|
|
||||||
|
|
||||||
spin_lock_init(&obj->iommu_lock);
|
spin_lock_init(&obj->iommu_lock);
|
||||||
mutex_init(&obj->mmap_lock);
|
mutex_init(&obj->mmap_lock);
|
||||||
@ -958,33 +982,18 @@ static int omap_iommu_probe(struct platform_device *pdev)
|
|||||||
INIT_LIST_HEAD(&obj->mmap);
|
INIT_LIST_HEAD(&obj->mmap);
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
if (!res) {
|
obj->regbase = devm_ioremap_resource(obj->dev, res);
|
||||||
err = -ENODEV;
|
if (IS_ERR(obj->regbase))
|
||||||
goto err_mem;
|
return PTR_ERR(obj->regbase);
|
||||||
}
|
|
||||||
|
|
||||||
res = request_mem_region(res->start, resource_size(res),
|
|
||||||
dev_name(&pdev->dev));
|
|
||||||
if (!res) {
|
|
||||||
err = -EIO;
|
|
||||||
goto err_mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
obj->regbase = ioremap(res->start, resource_size(res));
|
|
||||||
if (!obj->regbase) {
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto err_ioremap;
|
|
||||||
}
|
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq < 0) {
|
if (irq < 0)
|
||||||
err = -ENODEV;
|
return -ENODEV;
|
||||||
goto err_irq;
|
|
||||||
}
|
err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
|
||||||
err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
|
dev_name(obj->dev), obj);
|
||||||
dev_name(&pdev->dev), obj);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto err_irq;
|
return err;
|
||||||
platform_set_drvdata(pdev, obj);
|
platform_set_drvdata(pdev, obj);
|
||||||
|
|
||||||
pm_runtime_irq_safe(obj->dev);
|
pm_runtime_irq_safe(obj->dev);
|
||||||
@ -992,42 +1001,34 @@ static int omap_iommu_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
dev_info(&pdev->dev, "%s registered\n", obj->name);
|
dev_info(&pdev->dev, "%s registered\n", obj->name);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_irq:
|
|
||||||
iounmap(obj->regbase);
|
|
||||||
err_ioremap:
|
|
||||||
release_mem_region(res->start, resource_size(res));
|
|
||||||
err_mem:
|
|
||||||
kfree(obj);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_iommu_remove(struct platform_device *pdev)
|
static int omap_iommu_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
int irq;
|
|
||||||
struct resource *res;
|
|
||||||
struct omap_iommu *obj = platform_get_drvdata(pdev);
|
struct omap_iommu *obj = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
iopgtable_clear_entry_all(obj);
|
iopgtable_clear_entry_all(obj);
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
|
||||||
free_irq(irq, obj);
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
||||||
release_mem_region(res->start, resource_size(res));
|
|
||||||
iounmap(obj->regbase);
|
|
||||||
|
|
||||||
pm_runtime_disable(obj->dev);
|
pm_runtime_disable(obj->dev);
|
||||||
|
|
||||||
dev_info(&pdev->dev, "%s removed\n", obj->name);
|
dev_info(&pdev->dev, "%s removed\n", obj->name);
|
||||||
kfree(obj);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct of_device_id omap_iommu_of_match[] = {
|
||||||
|
{ .compatible = "ti,omap2-iommu" },
|
||||||
|
{ .compatible = "ti,omap4-iommu" },
|
||||||
|
{ .compatible = "ti,dra7-iommu" },
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
|
||||||
|
|
||||||
static struct platform_driver omap_iommu_driver = {
|
static struct platform_driver omap_iommu_driver = {
|
||||||
.probe = omap_iommu_probe,
|
.probe = omap_iommu_probe,
|
||||||
.remove = omap_iommu_remove,
|
.remove = omap_iommu_remove,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "omap-iommu",
|
.name = "omap-iommu",
|
||||||
|
.of_match_table = of_match_ptr(omap_iommu_of_match),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1253,6 +1254,49 @@ static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int omap_iommu_add_device(struct device *dev)
|
||||||
|
{
|
||||||
|
struct omap_iommu_arch_data *arch_data;
|
||||||
|
struct device_node *np;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate the archdata iommu structure for DT-based devices.
|
||||||
|
*
|
||||||
|
* TODO: Simplify this when removing non-DT support completely from the
|
||||||
|
* IOMMU users.
|
||||||
|
*/
|
||||||
|
if (!dev->of_node)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
np = of_parse_phandle(dev->of_node, "iommus", 0);
|
||||||
|
if (!np)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
|
||||||
|
if (!arch_data) {
|
||||||
|
of_node_put(np);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL);
|
||||||
|
dev->archdata.iommu = arch_data;
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void omap_iommu_remove_device(struct device *dev)
|
||||||
|
{
|
||||||
|
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
||||||
|
|
||||||
|
if (!dev->of_node || !arch_data)
|
||||||
|
return;
|
||||||
|
|
||||||
|
kfree(arch_data->name);
|
||||||
|
kfree(arch_data);
|
||||||
|
}
|
||||||
|
|
||||||
static struct iommu_ops omap_iommu_ops = {
|
static struct iommu_ops omap_iommu_ops = {
|
||||||
.domain_init = omap_iommu_domain_init,
|
.domain_init = omap_iommu_domain_init,
|
||||||
.domain_destroy = omap_iommu_domain_destroy,
|
.domain_destroy = omap_iommu_domain_destroy,
|
||||||
@ -1262,6 +1306,8 @@ static struct iommu_ops omap_iommu_ops = {
|
|||||||
.unmap = omap_iommu_unmap,
|
.unmap = omap_iommu_unmap,
|
||||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||||
.domain_has_cap = omap_iommu_domain_has_cap,
|
.domain_has_cap = omap_iommu_domain_has_cap,
|
||||||
|
.add_device = omap_iommu_add_device,
|
||||||
|
.remove_device = omap_iommu_remove_device,
|
||||||
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
|
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -52,6 +52,8 @@ struct omap_iommu {
|
|||||||
void *ctx; /* iommu context: registres saved area */
|
void *ctx; /* iommu context: registres saved area */
|
||||||
u32 da_start;
|
u32 da_start;
|
||||||
u32 da_end;
|
u32 da_end;
|
||||||
|
|
||||||
|
int has_bus_err_back;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cr_regs {
|
struct cr_regs {
|
||||||
@ -130,6 +132,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
|
|||||||
#define MMU_READ_CAM 0x68
|
#define MMU_READ_CAM 0x68
|
||||||
#define MMU_READ_RAM 0x6c
|
#define MMU_READ_RAM 0x6c
|
||||||
#define MMU_EMU_FAULT_AD 0x70
|
#define MMU_EMU_FAULT_AD 0x70
|
||||||
|
#define MMU_GP_REG 0x88
|
||||||
|
|
||||||
#define MMU_REG_SIZE 256
|
#define MMU_REG_SIZE 256
|
||||||
|
|
||||||
@ -163,6 +166,8 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
|
|||||||
#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
|
#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
|
||||||
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
|
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
|
||||||
|
|
||||||
|
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* utilities for super page(16MB, 1MB, 64KB and 4KB)
|
* utilities for super page(16MB, 1MB, 64KB and 4KB)
|
||||||
*/
|
*/
|
||||||
|
@ -98,6 +98,9 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
|
|||||||
|
|
||||||
iommu_write_reg(obj, pa, MMU_TTB);
|
iommu_write_reg(obj, pa, MMU_TTB);
|
||||||
|
|
||||||
|
if (obj->has_bus_err_back)
|
||||||
|
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
|
||||||
|
|
||||||
__iommu_set_twl(obj, true);
|
__iommu_set_twl(obj, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -424,7 +424,8 @@ enum acpi_dmar_type {
|
|||||||
ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
|
ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
|
||||||
ACPI_DMAR_TYPE_ATSR = 2,
|
ACPI_DMAR_TYPE_ATSR = 2,
|
||||||
ACPI_DMAR_HARDWARE_AFFINITY = 3,
|
ACPI_DMAR_HARDWARE_AFFINITY = 3,
|
||||||
ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */
|
ACPI_DMAR_TYPE_ANDD = 4,
|
||||||
|
ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* DMAR Device Scope structure */
|
/* DMAR Device Scope structure */
|
||||||
@ -445,7 +446,8 @@ enum acpi_dmar_scope_type {
|
|||||||
ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
|
ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
|
||||||
ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
|
ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
|
||||||
ACPI_DMAR_SCOPE_TYPE_HPET = 4,
|
ACPI_DMAR_SCOPE_TYPE_HPET = 4,
|
||||||
ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */
|
ACPI_DMAR_SCOPE_TYPE_ACPI = 5,
|
||||||
|
ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct acpi_dmar_pci_path {
|
struct acpi_dmar_pci_path {
|
||||||
@ -507,6 +509,15 @@ struct acpi_dmar_rhsa {
|
|||||||
u32 proximity_domain;
|
u32 proximity_domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* 4: ACPI Namespace Device Declaration Structure */
|
||||||
|
|
||||||
|
struct acpi_dmar_andd {
|
||||||
|
struct acpi_dmar_header header;
|
||||||
|
u8 reserved[3];
|
||||||
|
u8 device_number;
|
||||||
|
u8 object_name[];
|
||||||
|
};
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
*
|
*
|
||||||
* HPET - High Precision Event Timer table
|
* HPET - High Precision Event Timer table
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
#include <linux/irqreturn.h>
|
#include <linux/irqreturn.h>
|
||||||
|
#include <linux/rwsem.h>
|
||||||
|
#include <linux/rcupdate.h>
|
||||||
|
|
||||||
struct acpi_dmar_header;
|
struct acpi_dmar_header;
|
||||||
|
|
||||||
@ -34,13 +36,19 @@ struct acpi_dmar_header;
|
|||||||
|
|
||||||
struct intel_iommu;
|
struct intel_iommu;
|
||||||
|
|
||||||
|
struct dmar_dev_scope {
|
||||||
|
struct device __rcu *dev;
|
||||||
|
u8 bus;
|
||||||
|
u8 devfn;
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DMAR_TABLE
|
#ifdef CONFIG_DMAR_TABLE
|
||||||
extern struct acpi_table_header *dmar_tbl;
|
extern struct acpi_table_header *dmar_tbl;
|
||||||
struct dmar_drhd_unit {
|
struct dmar_drhd_unit {
|
||||||
struct list_head list; /* list of drhd units */
|
struct list_head list; /* list of drhd units */
|
||||||
struct acpi_dmar_header *hdr; /* ACPI header */
|
struct acpi_dmar_header *hdr; /* ACPI header */
|
||||||
u64 reg_base_addr; /* register base address*/
|
u64 reg_base_addr; /* register base address*/
|
||||||
struct pci_dev **devices; /* target device array */
|
struct dmar_dev_scope *devices;/* target device array */
|
||||||
int devices_cnt; /* target device count */
|
int devices_cnt; /* target device count */
|
||||||
u16 segment; /* PCI domain */
|
u16 segment; /* PCI domain */
|
||||||
u8 ignored:1; /* ignore drhd */
|
u8 ignored:1; /* ignore drhd */
|
||||||
@ -48,33 +56,66 @@ struct dmar_drhd_unit {
|
|||||||
struct intel_iommu *iommu;
|
struct intel_iommu *iommu;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct dmar_pci_notify_info {
|
||||||
|
struct pci_dev *dev;
|
||||||
|
unsigned long event;
|
||||||
|
int bus;
|
||||||
|
u16 seg;
|
||||||
|
u16 level;
|
||||||
|
struct acpi_dmar_pci_path path[];
|
||||||
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
extern struct rw_semaphore dmar_global_lock;
|
||||||
extern struct list_head dmar_drhd_units;
|
extern struct list_head dmar_drhd_units;
|
||||||
|
|
||||||
#define for_each_drhd_unit(drhd) \
|
#define for_each_drhd_unit(drhd) \
|
||||||
list_for_each_entry(drhd, &dmar_drhd_units, list)
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
|
||||||
|
|
||||||
#define for_each_active_drhd_unit(drhd) \
|
#define for_each_active_drhd_unit(drhd) \
|
||||||
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
||||||
if (drhd->ignored) {} else
|
if (drhd->ignored) {} else
|
||||||
|
|
||||||
#define for_each_active_iommu(i, drhd) \
|
#define for_each_active_iommu(i, drhd) \
|
||||||
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
||||||
if (i=drhd->iommu, drhd->ignored) {} else
|
if (i=drhd->iommu, drhd->ignored) {} else
|
||||||
|
|
||||||
#define for_each_iommu(i, drhd) \
|
#define for_each_iommu(i, drhd) \
|
||||||
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
|
||||||
if (i=drhd->iommu, 0) {} else
|
if (i=drhd->iommu, 0) {} else
|
||||||
|
|
||||||
|
static inline bool dmar_rcu_check(void)
|
||||||
|
{
|
||||||
|
return rwsem_is_locked(&dmar_global_lock) ||
|
||||||
|
system_state == SYSTEM_BOOTING;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
|
||||||
|
|
||||||
|
#define for_each_dev_scope(a, c, p, d) \
|
||||||
|
for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
|
||||||
|
NULL, (p) < (c)); (p)++)
|
||||||
|
|
||||||
|
#define for_each_active_dev_scope(a, c, p, d) \
|
||||||
|
for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
|
||||||
|
|
||||||
extern int dmar_table_init(void);
|
extern int dmar_table_init(void);
|
||||||
extern int dmar_dev_scope_init(void);
|
extern int dmar_dev_scope_init(void);
|
||||||
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
||||||
struct pci_dev ***devices, u16 segment);
|
struct dmar_dev_scope **devices, u16 segment);
|
||||||
extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt);
|
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
|
||||||
|
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
|
||||||
|
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
|
||||||
|
void *start, void*end, u16 segment,
|
||||||
|
struct dmar_dev_scope *devices,
|
||||||
|
int devices_cnt);
|
||||||
|
extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
|
||||||
|
u16 segment, struct dmar_dev_scope *devices,
|
||||||
|
int count);
|
||||||
/* Intel IOMMU detection */
|
/* Intel IOMMU detection */
|
||||||
extern int detect_intel_iommu(void);
|
extern int detect_intel_iommu(void);
|
||||||
extern int enable_drhd_fault_handling(void);
|
extern int enable_drhd_fault_handling(void);
|
||||||
#else
|
#else
|
||||||
|
struct dmar_pci_notify_info;
|
||||||
static inline int detect_intel_iommu(void)
|
static inline int detect_intel_iommu(void)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@ -138,30 +179,9 @@ extern int arch_setup_dmar_msi(unsigned int irq);
|
|||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
#ifdef CONFIG_INTEL_IOMMU
|
||||||
extern int iommu_detected, no_iommu;
|
extern int iommu_detected, no_iommu;
|
||||||
extern struct list_head dmar_rmrr_units;
|
|
||||||
struct dmar_rmrr_unit {
|
|
||||||
struct list_head list; /* list of rmrr units */
|
|
||||||
struct acpi_dmar_header *hdr; /* ACPI header */
|
|
||||||
u64 base_address; /* reserved base address*/
|
|
||||||
u64 end_address; /* reserved end address */
|
|
||||||
struct pci_dev **devices; /* target devices */
|
|
||||||
int devices_cnt; /* target device count */
|
|
||||||
};
|
|
||||||
|
|
||||||
#define for_each_rmrr_units(rmrr) \
|
|
||||||
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
|
|
||||||
|
|
||||||
struct dmar_atsr_unit {
|
|
||||||
struct list_head list; /* list of ATSR units */
|
|
||||||
struct acpi_dmar_header *hdr; /* ACPI header */
|
|
||||||
struct pci_dev **devices; /* target devices */
|
|
||||||
int devices_cnt; /* target device count */
|
|
||||||
u8 include_all:1; /* include all ports */
|
|
||||||
};
|
|
||||||
|
|
||||||
int dmar_parse_rmrr_atsr_dev(void);
|
|
||||||
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
||||||
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
||||||
|
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
||||||
extern int intel_iommu_init(void);
|
extern int intel_iommu_init(void);
|
||||||
#else /* !CONFIG_INTEL_IOMMU: */
|
#else /* !CONFIG_INTEL_IOMMU: */
|
||||||
static inline int intel_iommu_init(void) { return -ENODEV; }
|
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||||
@ -173,7 +193,7 @@ static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
static inline int dmar_parse_rmrr_atsr_dev(void)
|
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -319,6 +319,7 @@ struct intel_iommu {
|
|||||||
int agaw; /* agaw of this iommu */
|
int agaw; /* agaw of this iommu */
|
||||||
int msagaw; /* max sagaw of this iommu */
|
int msagaw; /* max sagaw of this iommu */
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
|
u16 segment; /* PCI segment# */
|
||||||
unsigned char name[13]; /* Device Name */
|
unsigned char name[13]; /* Device Name */
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
#ifdef CONFIG_INTEL_IOMMU
|
||||||
|
@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
|||||||
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
|
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
|
||||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||||
void put_iova_domain(struct iova_domain *iovad);
|
void put_iova_domain(struct iova_domain *iovad);
|
||||||
|
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
||||||
|
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user