You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'iommu-updates-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: - code optimizations for the Intel VT-d driver - ability to switch off a previously enabled Intel IOMMU - support for 'struct iommu_device' for OMAP, Rockchip and Mediatek IOMMUs - header optimizations for IOMMU core code headers and a few fixes that became necessary in other parts of the kernel because of that - ACPI/IORT updates and fixes - Exynos IOMMU optimizations - updates for the IOMMU dma-api code to bring it closer to use per-cpu iova caches - new command-line option to set default domain type allocated by the iommu core code - another command line option to allow the Intel IOMMU switched off in a tboot environment - ARM/SMMU: TLB sync optimisations for SMMUv2, Support for using an IDENTITY domain in conjunction with DMA ops, Support for SMR masking, Support for 16-bit ASIDs (was previously broken) - various other small fixes and improvements * tag 'iommu-updates-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (63 commits) soc/qbman: Move dma-mapping.h include to qman_priv.h soc/qbman: Fix implicit header dependency now causing build fails iommu: Remove trace-events include from iommu.h iommu: Remove pci.h include from trace/events/iommu.h arm: dma-mapping: Don't override dma_ops in arch_setup_dma_ops() ACPI/IORT: Fix CONFIG_IOMMU_API dependency iommu/vt-d: Don't print the failure message when booting non-kdump kernel iommu: Move report_iommu_fault() to iommu.c iommu: Include device.h in iommu.h x86, iommu/vt-d: Add an option to disable Intel IOMMU force on iommu/arm-smmu: Return IOVA in iova_to_phys when SMMU is bypassed iommu/arm-smmu: Correct sid to mask iommu/amd: Fix incorrect error handling in amd_iommu_bind_pasid() iommu: Make iommu_bus_notifier return NOTIFY_DONE rather than error code omap3isp: Remove iommu_group related code iommu/omap: Add iommu-group support iommu/omap: Make use of 'struct iommu_device' iommu/omap: Store iommu_dev pointer in arch_data iommu/omap: Move data structures to omap-iommu.h iommu/omap: Drop legacy-style device support ...
This commit is contained in:
@@ -1578,6 +1578,15 @@
|
||||
extended tables themselves, and also PASID support. With
|
||||
this option set, extended tables will not be used even
|
||||
on hardware which claims to support them.
|
||||
tboot_noforce [Default Off]
|
||||
Do not force the Intel IOMMU enabled under tboot.
|
||||
By default, tboot will force Intel IOMMU on, which
|
||||
could harm performance of some high-throughput
|
||||
devices like 40GBit network cards, even if identity
|
||||
mapping is enabled.
|
||||
Note that using this option lowers the security
|
||||
provided by tboot because it makes the system
|
||||
vulnerable to DMA attacks.
|
||||
|
||||
intel_idle.max_cstate= [KNL,HW,ACPI,X86]
|
||||
0 disables intel_idle and fall back on acpi_idle.
|
||||
@@ -1644,6 +1653,12 @@
|
||||
nobypass [PPC/POWERNV]
|
||||
Disable IOMMU bypass, using IOMMU for PCI devices.
|
||||
|
||||
iommu.passthrough=
|
||||
[ARM64] Configure DMA to bypass the IOMMU by default.
|
||||
Format: { "0" | "1" }
|
||||
0 - Use IOMMU translation for DMA.
|
||||
1 - Bypass the IOMMU for DMA.
|
||||
unset - Use IOMMU translation for DMA.
|
||||
|
||||
io7= [HW] IO7 for Marvel based alpha systems
|
||||
See comment before marvel_specify_io7 in
|
||||
|
||||
@@ -60,6 +60,17 @@ conditions.
|
||||
aliases of secure registers have to be used during
|
||||
SMMU configuration.
|
||||
|
||||
- stream-match-mask : For SMMUs supporting stream matching and using
|
||||
#iommu-cells = <1>, specifies a mask of bits to ignore
|
||||
when matching stream IDs (e.g. this may be programmed
|
||||
into the SMRn.MASK field of every stream match register
|
||||
used). For cases where it is desirable to ignore some
|
||||
portion of every Stream ID (e.g. for certain MMU-500
|
||||
configurations given globally unique input IDs). This
|
||||
property is not valid for SMMUs using stream indexing,
|
||||
or using stream matching with #iommu-cells = <2>, and
|
||||
may be ignored if present in such cases.
|
||||
|
||||
** Deprecated properties:
|
||||
|
||||
- mmu-masters (deprecated in favour of the generic "iommus" binding) :
|
||||
@@ -109,3 +120,20 @@ conditions.
|
||||
master3 {
|
||||
iommus = <&smmu2 1 0x30>;
|
||||
};
|
||||
|
||||
|
||||
/* ARM MMU-500 with 10-bit stream ID input configuration */
|
||||
smmu3: iommu {
|
||||
compatible = "arm,mmu-500", "arm,smmu-v2";
|
||||
...
|
||||
#iommu-cells = <1>;
|
||||
/* always ignore appended 5-bit TBU number */
|
||||
stream-match-mask = 0x7c00;
|
||||
};
|
||||
|
||||
bus {
|
||||
/* bus whose child devices emit one unique 10-bit stream
|
||||
ID each, but may master through multiple SMMU TBUs */
|
||||
iommu-map = <0 &smmu3 0 0x400>;
|
||||
...
|
||||
};
|
||||
|
||||
@@ -2408,6 +2408,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct dma_map_ops *dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
|
||||
/*
|
||||
* Don't override the dma_ops if they have already been set. Ideally
|
||||
* this should be the only location where dma_ops are set, remove this
|
||||
* check when all other callers of set_dma_ops will have disappeared.
|
||||
*/
|
||||
if (dev->dma_ops)
|
||||
return;
|
||||
|
||||
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
|
||||
dma_ops = arm_get_iommu_dma_map_ops(coherent);
|
||||
else
|
||||
|
||||
+20
-125
@@ -28,6 +28,7 @@
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
@@ -879,34 +880,26 @@ static const struct dma_map_ops iommu_dma_ops = {
|
||||
.mapping_error = iommu_dma_mapping_error,
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO: Right now __iommu_setup_dma_ops() gets called too early to do
|
||||
* everything it needs to - the device is only partially created and the
|
||||
* IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
|
||||
* need this delayed attachment dance. Once IOMMU probe ordering is sorted
|
||||
* to move the arch_setup_dma_ops() call later, all the notifier bits below
|
||||
* become unnecessary, and will go away.
|
||||
*/
|
||||
struct iommu_dma_notifier_data {
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
const struct iommu_ops *ops;
|
||||
u64 dma_base;
|
||||
u64 size;
|
||||
};
|
||||
static LIST_HEAD(iommu_dma_masters);
|
||||
static DEFINE_MUTEX(iommu_dma_notifier_lock);
|
||||
|
||||
static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
|
||||
u64 dma_base, u64 size)
|
||||
static int __init __iommu_dma_init(void)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
return iommu_dma_init();
|
||||
}
|
||||
arch_initcall(__iommu_dma_init);
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (!ops)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the IOMMU driver has the DMA domain support that we require,
|
||||
* then the IOMMU core will have already configured a group for this
|
||||
* device, and allocated the default domain for that group.
|
||||
* The IOMMU core code allocates the default DMA domain, which the
|
||||
* underlying IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain)
|
||||
goto out_err;
|
||||
|
||||
@@ -917,109 +910,11 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
|
||||
dev->dma_ops = &iommu_dma_ops;
|
||||
}
|
||||
|
||||
return true;
|
||||
return;
|
||||
|
||||
out_err:
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
return false;
|
||||
}
|
||||
|
||||
static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
|
||||
u64 dma_base, u64 size)
|
||||
{
|
||||
struct iommu_dma_notifier_data *iommudata;
|
||||
|
||||
iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
|
||||
if (!iommudata)
|
||||
return;
|
||||
|
||||
iommudata->dev = dev;
|
||||
iommudata->ops = ops;
|
||||
iommudata->dma_base = dma_base;
|
||||
iommudata->size = size;
|
||||
|
||||
mutex_lock(&iommu_dma_notifier_lock);
|
||||
list_add(&iommudata->list, &iommu_dma_masters);
|
||||
mutex_unlock(&iommu_dma_notifier_lock);
|
||||
}
|
||||
|
||||
static int __iommu_attach_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct iommu_dma_notifier_data *master, *tmp;
|
||||
|
||||
if (action != BUS_NOTIFY_BIND_DRIVER)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&iommu_dma_notifier_lock);
|
||||
list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
|
||||
if (data == master->dev && do_iommu_attach(master->dev,
|
||||
master->ops, master->dma_base, master->size)) {
|
||||
list_del(&master->list);
|
||||
kfree(master);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&iommu_dma_notifier_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
|
||||
{
|
||||
struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (!nb)
|
||||
return -ENOMEM;
|
||||
|
||||
nb->notifier_call = __iommu_attach_notifier;
|
||||
|
||||
ret = bus_register_notifier(bus, nb);
|
||||
if (ret) {
|
||||
pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
|
||||
bus->name);
|
||||
kfree(nb);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init __iommu_dma_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = iommu_dma_init();
|
||||
if (!ret)
|
||||
ret = register_iommu_dma_ops_notifier(&platform_bus_type);
|
||||
if (!ret)
|
||||
ret = register_iommu_dma_ops_notifier(&amba_bustype);
|
||||
#ifdef CONFIG_PCI
|
||||
if (!ret)
|
||||
ret = register_iommu_dma_ops_notifier(&pci_bus_type);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
arch_initcall(__iommu_dma_init);
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
|
||||
if (!ops)
|
||||
return;
|
||||
/*
|
||||
* TODO: As a concession to the future, we're ready to handle being
|
||||
* called both early and late (i.e. after bus_add_device). Once all
|
||||
* the platform bus code is reworked to call us late and the notifier
|
||||
* junk above goes away, move the body of do_iommu_attach here.
|
||||
*/
|
||||
group = iommu_group_get(dev);
|
||||
if (group) {
|
||||
do_iommu_attach(dev, ops, dma_base, size);
|
||||
iommu_group_put(group);
|
||||
} else {
|
||||
queue_iommu_attach(dev, ops, dma_base, size);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
|
||||
@@ -514,6 +514,9 @@ int tboot_force_iommu(void)
|
||||
if (!tboot_enabled())
|
||||
return 0;
|
||||
|
||||
if (!intel_iommu_tboot_noforce)
|
||||
return 1;
|
||||
|
||||
if (no_iommu || swiotlb || dmar_disabled)
|
||||
pr_warning("Forcing Intel-IOMMU to enabled\n");
|
||||
|
||||
|
||||
@@ -618,6 +618,46 @@ static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool iort_iommu_driver_enabled(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
case ACPI_IORT_NODE_SMMU_V3:
|
||||
return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
|
||||
case ACPI_IORT_NODE_SMMU:
|
||||
return IS_BUILTIN(CONFIG_ARM_SMMU);
|
||||
default:
|
||||
pr_warn("IORT node type %u does not describe an SMMU\n", type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
static inline
|
||||
const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
|
||||
{
|
||||
return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
|
||||
!dev->iommu_group)
|
||||
err = ops->add_device(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
|
||||
{ return NULL; }
|
||||
static inline
|
||||
int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
|
||||
struct acpi_iort_node *node,
|
||||
u32 streamid)
|
||||
@@ -626,14 +666,31 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
|
||||
int ret = -ENODEV;
|
||||
struct fwnode_handle *iort_fwnode;
|
||||
|
||||
/*
|
||||
* If we already translated the fwspec there
|
||||
* is nothing left to do, return the iommu_ops.
|
||||
*/
|
||||
ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
|
||||
if (ops)
|
||||
return ops;
|
||||
|
||||
if (node) {
|
||||
iort_fwnode = iort_get_fwnode(node);
|
||||
if (!iort_fwnode)
|
||||
return NULL;
|
||||
|
||||
ops = iommu_ops_from_fwnode(iort_fwnode);
|
||||
/*
|
||||
* If the ops look-up fails, this means that either
|
||||
* the SMMU drivers have not been probed yet or that
|
||||
* the SMMU drivers are not built in the kernel;
|
||||
* Depending on whether the SMMU drivers are built-in
|
||||
* in the kernel or not, defer the IOMMU configuration
|
||||
* or just abort it.
|
||||
*/
|
||||
if (!ops)
|
||||
return NULL;
|
||||
return iort_iommu_driver_enabled(node->type) ?
|
||||
ERR_PTR(-EPROBE_DEFER) : NULL;
|
||||
|
||||
ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
|
||||
}
|
||||
@@ -676,6 +733,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
struct acpi_iort_node *node, *parent;
|
||||
const struct iommu_ops *ops = NULL;
|
||||
u32 streamid = 0;
|
||||
int err;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
struct pci_bus *bus = to_pci_dev(dev)->bus;
|
||||
@@ -707,6 +765,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
|
||||
while (parent) {
|
||||
ops = iort_iommu_xlate(dev, parent, streamid);
|
||||
if (IS_ERR_OR_NULL(ops))
|
||||
return ops;
|
||||
|
||||
parent = iort_node_map_platform_id(node, &streamid,
|
||||
IORT_IOMMU_TYPE,
|
||||
@@ -714,6 +774,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have reason to believe the IOMMU driver missed the initial
|
||||
* add_device callback for dev, replay it to get things in order.
|
||||
*/
|
||||
err = iort_add_device_replay(ops, dev);
|
||||
if (err)
|
||||
ops = ERR_PTR(err);
|
||||
|
||||
return ops;
|
||||
}
|
||||
|
||||
@@ -1052,6 +1120,4 @@ void __init acpi_iort_init(void)
|
||||
}
|
||||
|
||||
iort_init_platform_devices();
|
||||
|
||||
acpi_probe_device_table(iort);
|
||||
}
|
||||
|
||||
@@ -179,7 +179,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
|
||||
struct list_head *physnode_list;
|
||||
unsigned int node_id;
|
||||
int retval = -EINVAL;
|
||||
enum dev_dma_attr attr;
|
||||
|
||||
if (has_acpi_companion(dev)) {
|
||||
if (acpi_dev) {
|
||||
@@ -236,10 +235,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
|
||||
if (!has_acpi_companion(dev))
|
||||
ACPI_COMPANION_SET(dev, acpi_dev);
|
||||
|
||||
attr = acpi_get_dma_attr(acpi_dev);
|
||||
if (attr != DEV_DMA_NOT_SUPPORTED)
|
||||
acpi_dma_configure(dev, attr);
|
||||
|
||||
acpi_physnode_link_name(physical_node_name, node_id);
|
||||
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
|
||||
physical_node_name);
|
||||
|
||||
+8
-3
@@ -1363,20 +1363,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
||||
* @dev: The pointer to the device
|
||||
* @attr: device dma attributes
|
||||
*/
|
||||
void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
|
||||
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
|
||||
{
|
||||
const struct iommu_ops *iommu;
|
||||
u64 size;
|
||||
|
||||
iort_set_dma_mask(dev);
|
||||
|
||||
iommu = iort_iommu_configure(dev);
|
||||
if (IS_ERR(iommu))
|
||||
return PTR_ERR(iommu);
|
||||
|
||||
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||
/*
|
||||
* Assume dma valid range starts at 0 and covers the whole
|
||||
* coherent_dma_mask.
|
||||
*/
|
||||
arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
|
||||
attr == DEV_DMA_COHERENT);
|
||||
arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dma_configure);
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
@@ -356,6 +357,10 @@ re_probe:
|
||||
if (ret)
|
||||
goto pinctrl_bind_failed;
|
||||
|
||||
ret = dma_configure(dev);
|
||||
if (ret)
|
||||
goto dma_failed;
|
||||
|
||||
if (driver_sysfs_add(dev)) {
|
||||
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
|
||||
__func__, dev_name(dev));
|
||||
@@ -417,6 +422,8 @@ re_probe:
|
||||
goto done;
|
||||
|
||||
probe_failed:
|
||||
dma_deconfigure(dev);
|
||||
dma_failed:
|
||||
if (dev->bus)
|
||||
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
|
||||
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
|
||||
@@ -826,6 +833,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
||||
drv->remove(dev);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
dma_deconfigure(dev);
|
||||
|
||||
devres_release_all(dev);
|
||||
dev->driver = NULL;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
@@ -7,9 +7,11 @@
|
||||
* This file is released under the GPLv2.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
@@ -340,3 +342,42 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
|
||||
vunmap(cpu_addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common configuration to enable DMA API use for a device
|
||||
*/
|
||||
#include <linux/pci.h>
|
||||
|
||||
int dma_configure(struct device *dev)
|
||||
{
|
||||
struct device *bridge = NULL, *dma_dev = dev;
|
||||
enum dev_dma_attr attr;
|
||||
int ret = 0;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
bridge = pci_get_host_bridge_device(to_pci_dev(dev));
|
||||
dma_dev = bridge;
|
||||
if (IS_ENABLED(CONFIG_OF) && dma_dev->parent &&
|
||||
dma_dev->parent->of_node)
|
||||
dma_dev = dma_dev->parent;
|
||||
}
|
||||
|
||||
if (dma_dev->of_node) {
|
||||
ret = of_dma_configure(dev, dma_dev->of_node);
|
||||
} else if (has_acpi_companion(dma_dev)) {
|
||||
attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
|
||||
if (attr != DEV_DMA_NOT_SUPPORTED)
|
||||
ret = acpi_dma_configure(dev, attr);
|
||||
}
|
||||
|
||||
if (bridge)
|
||||
pci_put_host_bridge_device(bridge);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dma_deconfigure(struct device *dev)
|
||||
{
|
||||
of_dma_deconfigure(dev);
|
||||
acpi_dma_deconfigure(dev);
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/pci.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <linux/qed/qede_roce.h>
|
||||
#include <linux/qed/qed_chain.h>
|
||||
|
||||
@@ -696,9 +696,9 @@ out_clear_state:
|
||||
|
||||
out_unregister:
|
||||
mmu_notifier_unregister(&pasid_state->mn, mm);
|
||||
mmput(mm);
|
||||
|
||||
out_free:
|
||||
mmput(mm);
|
||||
free_pasid_state(pasid_state);
|
||||
|
||||
out:
|
||||
|
||||
+53
-74
@@ -554,9 +554,14 @@ struct arm_smmu_s2_cfg {
|
||||
};
|
||||
|
||||
struct arm_smmu_strtab_ent {
|
||||
bool valid;
|
||||
|
||||
bool bypass; /* Overrides s1/s2 config */
|
||||
/*
|
||||
* An STE is "assigned" if the master emitting the corresponding SID
|
||||
* is attached to a domain. The behaviour of an unassigned STE is
|
||||
* determined by the disable_bypass parameter, whereas an assigned
|
||||
* STE behaves according to s1_cfg/s2_cfg, which themselves are
|
||||
* configured according to the domain type.
|
||||
*/
|
||||
bool assigned;
|
||||
struct arm_smmu_s1_cfg *s1_cfg;
|
||||
struct arm_smmu_s2_cfg *s2_cfg;
|
||||
};
|
||||
@@ -632,6 +637,7 @@ enum arm_smmu_domain_stage {
|
||||
ARM_SMMU_DOMAIN_S1 = 0,
|
||||
ARM_SMMU_DOMAIN_S2,
|
||||
ARM_SMMU_DOMAIN_NESTED,
|
||||
ARM_SMMU_DOMAIN_BYPASS,
|
||||
};
|
||||
|
||||
struct arm_smmu_domain {
|
||||
@@ -1005,9 +1011,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||
* This is hideously complicated, but we only really care about
|
||||
* three cases at the moment:
|
||||
*
|
||||
* 1. Invalid (all zero) -> bypass (init)
|
||||
* 2. Bypass -> translation (attach)
|
||||
* 3. Translation -> bypass (detach)
|
||||
* 1. Invalid (all zero) -> bypass/fault (init)
|
||||
* 2. Bypass/fault -> translation/bypass (attach)
|
||||
* 3. Translation/bypass -> bypass/fault (detach)
|
||||
*
|
||||
* Given that we can't update the STE atomically and the SMMU
|
||||
* doesn't read the thing in a defined order, that leaves us
|
||||
@@ -1046,11 +1052,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||
}
|
||||
|
||||
/* Nuke the existing STE_0 value, as we're going to rewrite it */
|
||||
val = ste->valid ? STRTAB_STE_0_V : 0;
|
||||
val = STRTAB_STE_0_V;
|
||||
|
||||
/* Bypass/fault */
|
||||
if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
|
||||
if (!ste->assigned && disable_bypass)
|
||||
val |= STRTAB_STE_0_CFG_ABORT;
|
||||
else
|
||||
val |= STRTAB_STE_0_CFG_BYPASS;
|
||||
|
||||
if (ste->bypass) {
|
||||
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
|
||||
: STRTAB_STE_0_CFG_BYPASS;
|
||||
dst[0] = cpu_to_le64(val);
|
||||
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
|
||||
<< STRTAB_STE_1_SHCFG_SHIFT);
|
||||
@@ -1111,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
||||
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
|
||||
{
|
||||
unsigned int i;
|
||||
struct arm_smmu_strtab_ent ste = {
|
||||
.valid = true,
|
||||
.bypass = true,
|
||||
};
|
||||
struct arm_smmu_strtab_ent ste = { .assigned = false };
|
||||
|
||||
for (i = 0; i < nent; ++i) {
|
||||
arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
|
||||
@@ -1378,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED &&
|
||||
type != IOMMU_DOMAIN_DMA &&
|
||||
type != IOMMU_DOMAIN_IDENTITY)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
@@ -1509,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Restrict the stage to what we can actually support */
|
||||
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
|
||||
@@ -1579,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||
return step;
|
||||
}
|
||||
|
||||
static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
{
|
||||
int i;
|
||||
struct arm_smmu_master_data *master = fwspec->iommu_priv;
|
||||
@@ -1591,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
|
||||
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_smmu_detach_dev(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
|
||||
|
||||
master->ste.bypass = true;
|
||||
if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
|
||||
dev_warn(dev, "failed to install bypass STE\n");
|
||||
master->ste.assigned = false;
|
||||
arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
@@ -1620,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
ste = &master->ste;
|
||||
|
||||
/* Already attached to a different domain? */
|
||||
if (!ste->bypass)
|
||||
if (ste->assigned)
|
||||
arm_smmu_detach_dev(dev);
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
@@ -1641,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ste->bypass = false;
|
||||
ste->valid = true;
|
||||
ste->assigned = true;
|
||||
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
|
||||
ste->s1_cfg = NULL;
|
||||
ste->s2_cfg = NULL;
|
||||
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
||||
ste->s1_cfg = &smmu_domain->s1_cfg;
|
||||
ste->s2_cfg = NULL;
|
||||
arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
|
||||
@@ -1653,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
ste->s2_cfg = &smmu_domain->s2_cfg;
|
||||
}
|
||||
|
||||
ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
|
||||
if (ret < 0)
|
||||
ste->valid = false;
|
||||
|
||||
arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
|
||||
out_unlock:
|
||||
mutex_unlock(&smmu_domain->init_mutex);
|
||||
return ret;
|
||||
@@ -1704,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
return iova;
|
||||
|
||||
if (!ops)
|
||||
return 0;
|
||||
|
||||
@@ -1807,7 +1820,7 @@ static void arm_smmu_remove_device(struct device *dev)
|
||||
|
||||
master = fwspec->iommu_priv;
|
||||
smmu = master->smmu;
|
||||
if (master && master->ste.valid)
|
||||
if (master && master->ste.assigned)
|
||||
arm_smmu_detach_dev(dev);
|
||||
iommu_group_remove_device(dev);
|
||||
iommu_device_unlink(&smmu->iommu, dev);
|
||||
@@ -1837,6 +1850,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
@@ -1852,6 +1868,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
int ret = 0;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
|
||||
switch (attr) {
|
||||
@@ -1893,6 +1912,8 @@ static void arm_smmu_get_resv_regions(struct device *dev,
|
||||
return;
|
||||
|
||||
list_add_tail(®ion->list, head);
|
||||
|
||||
iommu_dma_get_resv_regions(dev, head);
|
||||
}
|
||||
|
||||
static void arm_smmu_put_resv_regions(struct device *dev,
|
||||
@@ -2761,51 +2782,9 @@ static struct platform_driver arm_smmu_driver = {
|
||||
.probe = arm_smmu_device_probe,
|
||||
.remove = arm_smmu_device_remove,
|
||||
};
|
||||
module_platform_driver(arm_smmu_driver);
|
||||
|
||||
static int __init arm_smmu_init(void)
|
||||
{
|
||||
static bool registered;
|
||||
int ret = 0;
|
||||
|
||||
if (!registered) {
|
||||
ret = platform_driver_register(&arm_smmu_driver);
|
||||
registered = !ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit arm_smmu_exit(void)
|
||||
{
|
||||
return platform_driver_unregister(&arm_smmu_driver);
|
||||
}
|
||||
|
||||
subsys_initcall(arm_smmu_init);
|
||||
module_exit(arm_smmu_exit);
|
||||
|
||||
static int __init arm_smmu_of_init(struct device_node *np)
|
||||
{
|
||||
int ret = arm_smmu_init();
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
|
||||
{
|
||||
if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
|
||||
return arm_smmu_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
|
||||
#endif
|
||||
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
|
||||
|
||||
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
|
||||
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
|
||||
|
||||
+219
-153
File diff suppressed because it is too large
Load Diff
+185
-102
@@ -61,15 +61,6 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
|
||||
return &cookie->iovad;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie;
|
||||
@@ -167,23 +158,100 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_put_dma_cookie);
|
||||
|
||||
static void iova_reserve_pci_windows(struct pci_dev *dev,
|
||||
struct iova_domain *iovad)
|
||||
/**
|
||||
* iommu_dma_get_resv_regions - Reserved region driver helper
|
||||
* @dev: Device from iommu_get_resv_regions()
|
||||
* @list: Reserved region list from iommu_get_resv_regions()
|
||||
*
|
||||
* IOMMU drivers can use this to implement their .get_resv_regions callback
|
||||
* for general non-IOMMU-specific reservations. Currently, this covers host
|
||||
* bridge windows for PCI devices.
|
||||
*/
|
||||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
|
||||
struct pci_host_bridge *bridge;
|
||||
struct resource_entry *window;
|
||||
unsigned long lo, hi;
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
return;
|
||||
|
||||
bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
|
||||
resource_list_for_each_entry(window, &bridge->windows) {
|
||||
if (resource_type(window->res) != IORESOURCE_MEM &&
|
||||
resource_type(window->res) != IORESOURCE_IO)
|
||||
struct iommu_resv_region *region;
|
||||
phys_addr_t start;
|
||||
size_t length;
|
||||
|
||||
if (resource_type(window->res) != IORESOURCE_MEM)
|
||||
continue;
|
||||
|
||||
lo = iova_pfn(iovad, window->res->start - window->offset);
|
||||
hi = iova_pfn(iovad, window->res->end - window->offset);
|
||||
reserve_iova(iovad, lo, hi);
|
||||
start = window->res->start - window->offset;
|
||||
length = window->res->end - window->res->start + 1;
|
||||
region = iommu_alloc_resv_region(start, length, 0,
|
||||
IOMMU_RESV_RESERVED);
|
||||
if (!region)
|
||||
return;
|
||||
|
||||
list_add_tail(®ion->list, list);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
|
||||
|
||||
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
|
||||
phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct iommu_dma_msi_page *msi_page;
|
||||
int i, num_pages;
|
||||
|
||||
start -= iova_offset(iovad, start);
|
||||
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
|
||||
|
||||
msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
|
||||
if (!msi_page)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
msi_page[i].phys = start;
|
||||
msi_page[i].iova = start;
|
||||
INIT_LIST_HEAD(&msi_page[i].list);
|
||||
list_add(&msi_page[i].list, &cookie->msi_page_list);
|
||||
start += iovad->granule;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iova_reserve_iommu_regions(struct device *dev,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct iommu_resv_region *region;
|
||||
LIST_HEAD(resv_regions);
|
||||
int ret = 0;
|
||||
|
||||
iommu_get_resv_regions(dev, &resv_regions);
|
||||
list_for_each_entry(region, &resv_regions, list) {
|
||||
unsigned long lo, hi;
|
||||
|
||||
/* We ARE the software that manages these! */
|
||||
if (region->type == IOMMU_RESV_SW_MSI)
|
||||
continue;
|
||||
|
||||
lo = iova_pfn(iovad, region->start);
|
||||
hi = iova_pfn(iovad, region->start + region->length - 1);
|
||||
reserve_iova(iovad, lo, hi);
|
||||
|
||||
if (region->type == IOMMU_RESV_MSI)
|
||||
ret = cookie_init_hw_msi_region(cookie, region->start,
|
||||
region->start + region->length);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
iommu_put_resv_regions(dev, &resv_regions);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_init_domain - Initialise a DMA mapping domain
|
||||
@@ -203,7 +271,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long order, base_pfn, end_pfn;
|
||||
bool pci = dev && dev_is_pci(dev);
|
||||
|
||||
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
||||
return -EINVAL;
|
||||
@@ -233,7 +300,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
* leave the cache limit at the top of their range to save an rb_last()
|
||||
* traversal on every allocation.
|
||||
*/
|
||||
if (pci)
|
||||
if (dev && dev_is_pci(dev))
|
||||
end_pfn &= DMA_BIT_MASK(32) >> order;
|
||||
|
||||
/* start_pfn is always nonzero for an already-initialised domain */
|
||||
@@ -248,12 +315,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
* area cache limit down for the benefit of the smaller one.
|
||||
*/
|
||||
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
|
||||
} else {
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
|
||||
if (pci)
|
||||
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
|
||||
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
return iova_reserve_iommu_regions(dev, domain);
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_dma_init_domain);
|
||||
|
||||
@@ -286,48 +356,67 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
}
|
||||
}
|
||||
|
||||
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
|
||||
dma_addr_t dma_limit, struct device *dev)
|
||||
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
||||
size_t size, dma_addr_t dma_limit, struct device *dev)
|
||||
{
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
unsigned long shift = iova_shift(iovad);
|
||||
unsigned long length = iova_align(iovad, size) >> shift;
|
||||
struct iova *iova = NULL;
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long shift, iova_len, iova = 0;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
|
||||
cookie->msi_iova += size;
|
||||
return cookie->msi_iova - size;
|
||||
}
|
||||
|
||||
shift = iova_shift(iovad);
|
||||
iova_len = size >> shift;
|
||||
/*
|
||||
* Freeing non-power-of-two-sized allocations back into the IOVA caches
|
||||
* will come back to bite us badly, so we have to waste a bit of space
|
||||
* rounding up anything cacheable to make sure that can't happen. The
|
||||
* order of the unadjusted size will still match upon freeing.
|
||||
*/
|
||||
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
|
||||
iova_len = roundup_pow_of_two(iova_len);
|
||||
|
||||
if (domain->geometry.force_aperture)
|
||||
dma_limit = min(dma_limit, domain->geometry.aperture_end);
|
||||
|
||||
/* Try to get PCI devices a SAC address */
|
||||
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
|
||||
iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
|
||||
true);
|
||||
/*
|
||||
* Enforce size-alignment to be safe - there could perhaps be an
|
||||
* attribute to control this per-device, or at least per-domain...
|
||||
*/
|
||||
if (!iova)
|
||||
iova = alloc_iova(iovad, length, dma_limit >> shift, true);
|
||||
iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
|
||||
|
||||
return iova;
|
||||
if (!iova)
|
||||
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
|
||||
|
||||
return (dma_addr_t)iova << shift;
|
||||
}
|
||||
|
||||
/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
|
||||
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
|
||||
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
||||
dma_addr_t iova, size_t size)
|
||||
{
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long shift = iova_shift(iovad);
|
||||
unsigned long pfn = dma_addr >> shift;
|
||||
struct iova *iova = find_iova(iovad, pfn);
|
||||
size_t size;
|
||||
|
||||
if (WARN_ON(!iova))
|
||||
return;
|
||||
/* The MSI case is only ever cleaning up its most recent allocation */
|
||||
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
|
||||
cookie->msi_iova -= size;
|
||||
else
|
||||
free_iova_fast(iovad, iova >> shift, size >> shift);
|
||||
}
|
||||
|
||||
size = iova_size(iova) << shift;
|
||||
size -= iommu_unmap(domain, pfn << shift, size);
|
||||
/* ...and if we can't, then something is horribly, horribly wrong */
|
||||
WARN_ON(size > 0);
|
||||
__free_iova(iovad, iova);
|
||||
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
size_t iova_off = iova_offset(iovad, dma_addr);
|
||||
|
||||
dma_addr -= iova_off;
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
|
||||
WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
|
||||
iommu_dma_free_iova(cookie, dma_addr, size);
|
||||
}
|
||||
|
||||
static void __iommu_dma_free_pages(struct page **pages, int count)
|
||||
@@ -409,7 +498,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
|
||||
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
*handle = DMA_ERROR_CODE;
|
||||
}
|
||||
@@ -437,11 +526,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t))
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iova *iova;
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
dma_addr_t dma_addr;
|
||||
dma_addr_t iova;
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
|
||||
*handle = DMA_ERROR_CODE;
|
||||
@@ -461,11 +550,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
|
||||
size = iova_align(iovad, size);
|
||||
iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
|
||||
if (!iova)
|
||||
goto out_free_pages;
|
||||
|
||||
size = iova_align(iovad, size);
|
||||
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
|
||||
goto out_free_iova;
|
||||
|
||||
@@ -481,19 +570,18 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
sg_miter_stop(&miter);
|
||||
}
|
||||
|
||||
dma_addr = iova_dma_addr(iovad, iova);
|
||||
if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
|
||||
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
|
||||
< size)
|
||||
goto out_free_sg;
|
||||
|
||||
*handle = dma_addr;
|
||||
*handle = iova;
|
||||
sg_free_table(&sgt);
|
||||
return pages;
|
||||
|
||||
out_free_sg:
|
||||
sg_free_table(&sgt);
|
||||
out_free_iova:
|
||||
__free_iova(iovad, iova);
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
out_free_pages:
|
||||
__iommu_dma_free_pages(pages, count);
|
||||
return NULL;
|
||||
@@ -527,22 +615,22 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
|
||||
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int prot)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
size_t iova_off = iova_offset(iovad, phys);
|
||||
size_t len = iova_align(iovad, size + iova_off);
|
||||
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
|
||||
dma_addr_t iova;
|
||||
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
dma_addr = iova_dma_addr(iovad, iova);
|
||||
if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
|
||||
__free_iova(iovad, iova);
|
||||
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
return dma_addr + iova_off;
|
||||
return iova + iova_off;
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
@@ -554,7 +642,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -643,10 +731,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iova *iova;
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct scatterlist *s, *prev = NULL;
|
||||
dma_addr_t dma_addr;
|
||||
dma_addr_t iova;
|
||||
size_t iova_len = 0;
|
||||
unsigned long mask = dma_get_seg_boundary(dev);
|
||||
int i;
|
||||
@@ -690,7 +778,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
prev = s;
|
||||
}
|
||||
|
||||
iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
||||
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
goto out_restore_sg;
|
||||
|
||||
@@ -698,14 +786,13 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
* We'll leave any physical concatenation to the IOMMU driver's
|
||||
* implementation - it knows better than we do.
|
||||
*/
|
||||
dma_addr = iova_dma_addr(iovad, iova);
|
||||
if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
|
||||
if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
|
||||
goto out_free_iova;
|
||||
|
||||
return __finalise_sg(dev, sg, nents, dma_addr);
|
||||
return __finalise_sg(dev, sg, nents, iova);
|
||||
|
||||
out_free_iova:
|
||||
__free_iova(iovad, iova);
|
||||
iommu_dma_free_iova(cookie, iova, iova_len);
|
||||
out_restore_sg:
|
||||
__invalidate_sg(sg, nents);
|
||||
return 0;
|
||||
@@ -714,11 +801,21 @@ out_restore_sg:
|
||||
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_addr_t start, end;
|
||||
struct scatterlist *tmp;
|
||||
int i;
|
||||
/*
|
||||
* The scatterlist segments are mapped into a single
|
||||
* contiguous IOVA allocation, so this is incredibly easy.
|
||||
*/
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
|
||||
start = sg_dma_address(sg);
|
||||
for_each_sg(sg_next(sg), tmp, nents - 1, i) {
|
||||
if (sg_dma_len(tmp) == 0)
|
||||
break;
|
||||
sg = tmp;
|
||||
}
|
||||
end = sg_dma_address(sg) + sg_dma_len(sg);
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
@@ -731,7 +828,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
|
||||
}
|
||||
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
@@ -744,8 +841,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iommu_dma_msi_page *msi_page;
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iova *iova;
|
||||
dma_addr_t iova;
|
||||
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||
size_t size = cookie_msi_granule(cookie);
|
||||
|
||||
@@ -758,29 +854,16 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
if (!msi_page)
|
||||
return NULL;
|
||||
|
||||
msi_page->phys = msi_addr;
|
||||
if (iovad) {
|
||||
iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
goto out_free_page;
|
||||
msi_page->iova = iova_dma_addr(iovad, iova);
|
||||
} else {
|
||||
msi_page->iova = cookie->msi_iova;
|
||||
cookie->msi_iova += size;
|
||||
}
|
||||
|
||||
if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
|
||||
goto out_free_iova;
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot);
|
||||
if (iommu_dma_mapping_error(dev, iova))
|
||||
goto out_free_page;
|
||||
|
||||
INIT_LIST_HEAD(&msi_page->list);
|
||||
msi_page->phys = msi_addr;
|
||||
msi_page->iova = iova;
|
||||
list_add(&msi_page->list, &cookie->msi_page_list);
|
||||
return msi_page;
|
||||
|
||||
out_free_iova:
|
||||
if (iovad)
|
||||
__free_iova(iovad, iova);
|
||||
else
|
||||
cookie->msi_iova -= size;
|
||||
out_free_page:
|
||||
kfree(msi_page);
|
||||
return NULL;
|
||||
|
||||
+18
-17
@@ -311,7 +311,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
|
||||
((void *)drhd) + drhd->header.length,
|
||||
dmaru->segment,
|
||||
dmaru->devices, dmaru->devices_cnt);
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
if (ret >= 0)
|
||||
@@ -391,7 +391,7 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
|
||||
{
|
||||
struct acpi_dmar_hardware_unit *drhd;
|
||||
struct dmar_drhd_unit *dmaru;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
drhd = (struct acpi_dmar_hardware_unit *)header;
|
||||
dmaru = dmar_find_dmaru(drhd);
|
||||
@@ -551,17 +551,16 @@ static int __init dmar_table_detect(void)
|
||||
status = AE_NOT_FOUND;
|
||||
}
|
||||
|
||||
return (ACPI_SUCCESS(status) ? 1 : 0);
|
||||
return ACPI_SUCCESS(status) ? 0 : -ENOENT;
|
||||
}
|
||||
|
||||
static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
|
||||
size_t len, struct dmar_res_callback *cb)
|
||||
{
|
||||
int ret = 0;
|
||||
struct acpi_dmar_header *iter, *next;
|
||||
struct acpi_dmar_header *end = ((void *)start) + len;
|
||||
|
||||
for (iter = start; iter < end && ret == 0; iter = next) {
|
||||
for (iter = start; iter < end; iter = next) {
|
||||
next = (void *)iter + iter->length;
|
||||
if (iter->length == 0) {
|
||||
/* Avoid looping forever on bad ACPI tables */
|
||||
@@ -570,8 +569,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
|
||||
} else if (next > end) {
|
||||
/* Avoid passing table end */
|
||||
pr_warn(FW_BUG "Record passes table end\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cb->print_entry)
|
||||
@@ -582,15 +580,19 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
|
||||
pr_debug("Unknown DMAR structure type %d\n",
|
||||
iter->type);
|
||||
} else if (cb->cb[iter->type]) {
|
||||
int ret;
|
||||
|
||||
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (!cb->ignore_unhandled) {
|
||||
pr_warn("No handler for DMAR structure type %d\n",
|
||||
iter->type);
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
|
||||
@@ -607,8 +609,8 @@ static int __init
|
||||
parse_dmar_table(void)
|
||||
{
|
||||
struct acpi_table_dmar *dmar;
|
||||
int ret = 0;
|
||||
int drhd_count = 0;
|
||||
int ret;
|
||||
struct dmar_res_callback cb = {
|
||||
.print_entry = true,
|
||||
.ignore_unhandled = true,
|
||||
@@ -891,17 +893,17 @@ int __init detect_intel_iommu(void)
|
||||
|
||||
down_write(&dmar_global_lock);
|
||||
ret = dmar_table_detect();
|
||||
if (ret)
|
||||
ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
|
||||
&validate_drhd_cb);
|
||||
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
|
||||
if (!ret)
|
||||
ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
|
||||
&validate_drhd_cb);
|
||||
if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
|
||||
iommu_detected = 1;
|
||||
/* Make sure ACS will be enabled */
|
||||
pci_request_acs();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
if (ret)
|
||||
if (!ret)
|
||||
x86_init.iommu.iommu_init = intel_iommu_init;
|
||||
#endif
|
||||
|
||||
@@ -911,10 +913,9 @@ int __init detect_intel_iommu(void)
|
||||
}
|
||||
up_write(&dmar_global_lock);
|
||||
|
||||
return ret ? 1 : -ENODEV;
|
||||
return ret ? ret : 1;
|
||||
}
|
||||
|
||||
|
||||
static void unmap_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
iounmap(iommu->reg);
|
||||
|
||||
@@ -171,6 +171,9 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
|
||||
#define REG_V5_PT_BASE_PFN 0x00C
|
||||
#define REG_V5_MMU_FLUSH_ALL 0x010
|
||||
#define REG_V5_MMU_FLUSH_ENTRY 0x014
|
||||
#define REG_V5_MMU_FLUSH_RANGE 0x018
|
||||
#define REG_V5_MMU_FLUSH_START 0x020
|
||||
#define REG_V5_MMU_FLUSH_END 0x024
|
||||
#define REG_V5_INT_STATUS 0x060
|
||||
#define REG_V5_INT_CLEAR 0x064
|
||||
#define REG_V5_FAULT_AR_VA 0x070
|
||||
@@ -319,14 +322,23 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < num_inv; i++) {
|
||||
if (MMU_MAJ_VER(data->version) < 5)
|
||||
if (MMU_MAJ_VER(data->version) < 5) {
|
||||
for (i = 0; i < num_inv; i++) {
|
||||
writel((iova & SPAGE_MASK) | 1,
|
||||
data->sfrbase + REG_MMU_FLUSH_ENTRY);
|
||||
else
|
||||
iova += SPAGE_SIZE;
|
||||
}
|
||||
} else {
|
||||
if (num_inv == 1) {
|
||||
writel((iova & SPAGE_MASK) | 1,
|
||||
data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
|
||||
iova += SPAGE_SIZE;
|
||||
} else {
|
||||
writel((iova & SPAGE_MASK),
|
||||
data->sfrbase + REG_V5_MMU_FLUSH_START);
|
||||
writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
|
||||
data->sfrbase + REG_V5_MMU_FLUSH_END);
|
||||
writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -747,16 +759,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
|
||||
goto err_counter;
|
||||
|
||||
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
|
||||
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
|
||||
domain->pgtable[i + 0] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 1] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 2] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 3] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 4] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 5] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 6] = ZERO_LV2LINK;
|
||||
domain->pgtable[i + 7] = ZERO_LV2LINK;
|
||||
}
|
||||
for (i = 0; i < NUM_LV1ENTRIES; i++)
|
||||
domain->pgtable[i] = ZERO_LV2LINK;
|
||||
|
||||
handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#define __FSL_PAMU_H
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/fsl_pamu_stash.h>
|
||||
|
||||
|
||||
@@ -183,6 +183,7 @@ static int rwbf_quirk;
|
||||
* (used when kernel is launched w/ TXT)
|
||||
*/
|
||||
static int force_on = 0;
|
||||
int intel_iommu_tboot_noforce;
|
||||
|
||||
/*
|
||||
* 0: Present
|
||||
@@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str)
|
||||
"Intel-IOMMU: enable pre-production PASID support\n");
|
||||
intel_iommu_pasid28 = 1;
|
||||
iommu_identity_mapping |= IDENTMAP_GFX;
|
||||
} else if (!strncmp(str, "tboot_noforce", 13)) {
|
||||
printk(KERN_INFO
|
||||
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
|
||||
intel_iommu_tboot_noforce = 1;
|
||||
}
|
||||
|
||||
str += strcspn(str, ",");
|
||||
@@ -4730,6 +4735,15 @@ static int intel_iommu_cpu_dead(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_disable_iommus(void)
|
||||
{
|
||||
struct intel_iommu *iommu = NULL;
|
||||
struct dmar_drhd_unit *drhd;
|
||||
|
||||
for_each_iommu(iommu, drhd)
|
||||
iommu_disable_translation(iommu);
|
||||
}
|
||||
|
||||
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
|
||||
{
|
||||
return container_of(dev, struct intel_iommu, iommu.dev);
|
||||
@@ -4840,8 +4854,28 @@ int __init intel_iommu_init(void)
|
||||
goto out_free_dmar;
|
||||
}
|
||||
|
||||
if (no_iommu || dmar_disabled)
|
||||
if (no_iommu || dmar_disabled) {
|
||||
/*
|
||||
* We exit the function here to ensure IOMMU's remapping and
|
||||
* mempool aren't setup, which means that the IOMMU's PMRs
|
||||
* won't be disabled via the call to init_dmars(). So disable
|
||||
* it explicitly here. The PMRs were setup by tboot prior to
|
||||
* calling SENTER, but the kernel is expected to reset/tear
|
||||
* down the PMRs.
|
||||
*/
|
||||
if (intel_iommu_tboot_noforce) {
|
||||
for_each_iommu(iommu, drhd)
|
||||
iommu_disable_protect_mem_regions(iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the IOMMUs are switched off, even when we
|
||||
* boot into a kexec kernel and the previous kernel left
|
||||
* them enabled
|
||||
*/
|
||||
intel_disable_iommus();
|
||||
goto out_free_dmar;
|
||||
}
|
||||
|
||||
if (list_empty(&dmar_rmrr_units))
|
||||
pr_info("No RMRR found\n");
|
||||
|
||||
@@ -408,14 +408,6 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
|
||||
size_t size;
|
||||
u64 irta;
|
||||
|
||||
if (!is_kdump_kernel()) {
|
||||
pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
|
||||
iommu->name);
|
||||
clear_ir_pre_enabled(iommu);
|
||||
iommu_disable_irq_remapping(iommu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check whether the old ir-table has the same size as ours */
|
||||
irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
|
||||
if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
|
||||
@@ -567,7 +559,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
init_ir_status(iommu);
|
||||
|
||||
if (ir_pre_enabled(iommu)) {
|
||||
if (iommu_load_old_irte(iommu))
|
||||
if (!is_kdump_kernel()) {
|
||||
pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
|
||||
iommu->name);
|
||||
clear_ir_pre_enabled(iommu);
|
||||
iommu_disable_irq_remapping(iommu);
|
||||
} else if (iommu_load_old_irte(iommu))
|
||||
pr_err("Failed to copy IR table for %s from previous kernel\n",
|
||||
iommu->name);
|
||||
else
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user