mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge branches 'arm/renesas', 'arm/smmu', 'ppc/pamu', 'x86/vt-d', 'x86/amd' and 'core' into next
This commit is contained in:
@@ -1749,6 +1749,18 @@
|
||||
nobypass [PPC/POWERNV]
|
||||
Disable IOMMU bypass, using IOMMU for PCI devices.
|
||||
|
||||
iommu.strict= [ARM64] Configure TLB invalidation behaviour
|
||||
Format: { "0" | "1" }
|
||||
0 - Lazy mode.
|
||||
Request that DMA unmap operations use deferred
|
||||
invalidation of hardware TLBs, for increased
|
||||
throughput at the cost of reduced device isolation.
|
||||
Will fall back to strict mode if not supported by
|
||||
the relevant IOMMU driver.
|
||||
1 - Strict mode (default).
|
||||
DMA unmap operations invalidate IOMMU hardware TLBs
|
||||
synchronously.
|
||||
|
||||
iommu.passthrough=
|
||||
[ARM64] Configure DMA to bypass the IOMMU by default.
|
||||
Format: { "0" | "1" }
|
||||
|
||||
@@ -12,6 +12,7 @@ Required Properties:
|
||||
|
||||
- "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU.
|
||||
- "renesas,ipmmu-r8a7743" for the R8A7743 (RZ/G1M) IPMMU.
|
||||
- "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU.
|
||||
- "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
|
||||
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
|
||||
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
|
||||
|
||||
@@ -9,6 +9,25 @@ blocks that can be used to create functional hardware objects/devices
|
||||
such as network interfaces, crypto accelerator instances, L2 switches,
|
||||
etc.
|
||||
|
||||
For an overview of the DPAA2 architecture and fsl-mc bus see:
|
||||
Documentation/networking/dpaa2/overview.rst
|
||||
|
||||
As described in the above overview, all DPAA2 objects in a DPRC share the
|
||||
same hardware "isolation context" and a 10-bit value called an ICID
|
||||
(isolation context id) is expressed by the hardware to identify
|
||||
the requester.
|
||||
|
||||
The generic 'iommus' property is insufficient to describe the relationship
|
||||
between ICIDs and IOMMUs, so an iommu-map property is used to define
|
||||
the set of possible ICIDs under a root DPRC and how they map to
|
||||
an IOMMU.
|
||||
|
||||
For generic IOMMU bindings, see
|
||||
Documentation/devicetree/bindings/iommu/iommu.txt.
|
||||
|
||||
For arm-smmu binding, see:
|
||||
Documentation/devicetree/bindings/iommu/arm,smmu.txt.
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible
|
||||
@@ -88,14 +107,34 @@ Sub-nodes:
|
||||
Value type: <phandle>
|
||||
Definition: Specifies the phandle to the PHY device node associated
|
||||
with the this dpmac.
|
||||
Optional properties:
|
||||
|
||||
- iommu-map: Maps an ICID to an IOMMU and associated iommu-specifier
|
||||
data.
|
||||
|
||||
The property is an arbitrary number of tuples of
|
||||
(icid-base,iommu,iommu-base,length).
|
||||
|
||||
Any ICID i in the interval [icid-base, icid-base + length) is
|
||||
associated with the listed IOMMU, with the iommu-specifier
|
||||
(i - icid-base + iommu-base).
|
||||
|
||||
Example:
|
||||
|
||||
smmu: iommu@5000000 {
|
||||
compatible = "arm,mmu-500";
|
||||
#iommu-cells = <1>;
|
||||
stream-match-mask = <0x7C00>;
|
||||
...
|
||||
};
|
||||
|
||||
fsl_mc: fsl-mc@80c000000 {
|
||||
compatible = "fsl,qoriq-mc";
|
||||
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
|
||||
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
|
||||
msi-parent = <&its>;
|
||||
/* define map for ICIDs 23-64 */
|
||||
iommu-map = <23 &smmu 23 41>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
|
||||
|
||||
clockgen: clocking@1300000 {
|
||||
compatible = "fsl,ls2080a-clockgen";
|
||||
@@ -321,6 +322,8 @@
|
||||
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
|
||||
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
|
||||
msi-parent = <&its>;
|
||||
iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
|
||||
dma-coherent;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <1>;
|
||||
|
||||
@@ -424,6 +427,9 @@
|
||||
compatible = "arm,mmu-500";
|
||||
reg = <0 0x5000000 0 0x800000>;
|
||||
#global-interrupts = <12>;
|
||||
#iommu-cells = <1>;
|
||||
stream-match-mask = <0x7C00>;
|
||||
dma-coherent;
|
||||
interrupts = <0 13 4>, /* global secure fault */
|
||||
<0 14 4>, /* combined secure interrupt */
|
||||
<0 15 4>, /* global non-secure fault */
|
||||
@@ -466,7 +472,6 @@
|
||||
<0 204 4>, <0 205 4>,
|
||||
<0 206 4>, <0 207 4>,
|
||||
<0 208 4>, <0 209 4>;
|
||||
mmu-masters = <&fsl_mc 0x300 0>;
|
||||
};
|
||||
|
||||
dspi: dspi@2100000 {
|
||||
|
||||
@@ -712,7 +712,7 @@ static void __iommu_sync_single_for_cpu(struct device *dev,
|
||||
if (is_device_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
|
||||
__dma_unmap_area(phys_to_virt(phys), size, dir);
|
||||
}
|
||||
|
||||
@@ -725,7 +725,7 @@ static void __iommu_sync_single_for_device(struct device *dev,
|
||||
if (is_device_dma_coherent(dev))
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
|
||||
__dma_map_area(phys_to_virt(phys), size, dir);
|
||||
}
|
||||
|
||||
@@ -738,9 +738,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
|
||||
|
||||
if (!iommu_dma_mapping_error(dev, dev_addr) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__iommu_sync_single_for_device(dev, dev_addr, size, dir);
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
!iommu_dma_mapping_error(dev, dev_addr))
|
||||
__dma_map_area(page_address(page) + offset, size, dir);
|
||||
|
||||
return dev_addr;
|
||||
}
|
||||
|
||||
@@ -45,6 +45,8 @@ struct vcpu_data {
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
|
||||
extern raw_spinlock_t irq_2_ir_lock;
|
||||
|
||||
extern bool irq_remapping_cap(enum irq_remap_cap cap);
|
||||
extern void set_irq_remapping_broken(void);
|
||||
extern int irq_remapping_prepare(void);
|
||||
|
||||
@@ -127,6 +127,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsl_mc_dma_configure(struct device *dev)
|
||||
{
|
||||
struct device *dma_dev = dev;
|
||||
|
||||
while (dev_is_fsl_mc(dma_dev))
|
||||
dma_dev = dma_dev->parent;
|
||||
|
||||
return of_dma_configure(dev, dma_dev->of_node, 0);
|
||||
}
|
||||
|
||||
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -148,6 +158,7 @@ struct bus_type fsl_mc_bus_type = {
|
||||
.name = "fsl-mc",
|
||||
.match = fsl_mc_bus_match,
|
||||
.uevent = fsl_mc_bus_uevent,
|
||||
.dma_configure = fsl_mc_dma_configure,
|
||||
.dev_groups = fsl_mc_dev_groups,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
|
||||
@@ -616,6 +627,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
|
||||
mc_dev->icid = parent_mc_dev->icid;
|
||||
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
|
||||
mc_dev->dev.dma_mask = &mc_dev->dma_mask;
|
||||
mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
|
||||
dev_set_msi_domain(&mc_dev->dev,
|
||||
dev_get_msi_domain(&parent_mc_dev->dev));
|
||||
}
|
||||
@@ -633,10 +645,6 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
|
||||
goto error_cleanup_dev;
|
||||
}
|
||||
|
||||
/* Objects are coherent, unless 'no shareability' flag set. */
|
||||
if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
|
||||
arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
|
||||
|
||||
/*
|
||||
* The device-specific probe callback will get invoked by device_add()
|
||||
*/
|
||||
|
||||
@@ -186,6 +186,19 @@ config INTEL_IOMMU
|
||||
and include PCI device scope covered by these DMA
|
||||
remapping devices.
|
||||
|
||||
config INTEL_IOMMU_DEBUGFS
|
||||
bool "Export Intel IOMMU internals in Debugfs"
|
||||
depends on INTEL_IOMMU && IOMMU_DEBUGFS
|
||||
help
|
||||
!!!WARNING!!!
|
||||
|
||||
DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
|
||||
|
||||
Expose Intel IOMMU internals in Debugfs.
|
||||
|
||||
This option is -NOT- intended for production environments, and should
|
||||
only be enabled for debugging Intel IOMMU.
|
||||
|
||||
config INTEL_IOMMU_SVM
|
||||
bool "Support for Shared Virtual Memory with Intel IOMMU"
|
||||
depends on INTEL_IOMMU && X86
|
||||
|
||||
@@ -17,6 +17,7 @@ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
|
||||
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
|
||||
obj-$(CONFIG_DMAR_TABLE) += dmar.o
|
||||
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
|
||||
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
|
||||
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
|
||||
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
|
||||
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
|
||||
|
||||
@@ -3083,6 +3083,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
|
||||
return (irq_remapping_enabled == 1);
|
||||
case IOMMU_CAP_NOEXEC:
|
||||
return false;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
@@ -1709,7 +1709,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int iommu_init_pci(struct amd_iommu *iommu)
|
||||
static int __init iommu_init_pci(struct amd_iommu *iommu)
|
||||
{
|
||||
int cap_ptr = iommu->cap_ptr;
|
||||
u32 range, misc, low, high;
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* IOMMU API for ARM architected SMMUv3 implementations.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* Copyright (C) 2015 ARM Limited
|
||||
*
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
@@ -567,7 +556,8 @@ struct arm_smmu_device {
|
||||
|
||||
int gerr_irq;
|
||||
int combined_irq;
|
||||
atomic_t sync_nr;
|
||||
u32 sync_nr;
|
||||
u8 prev_cmd_opcode;
|
||||
|
||||
unsigned long ias; /* IPA */
|
||||
unsigned long oas; /* PA */
|
||||
@@ -611,6 +601,7 @@ struct arm_smmu_domain {
|
||||
struct mutex init_mutex; /* Protects smmu pointer */
|
||||
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
bool non_strict;
|
||||
|
||||
enum arm_smmu_domain_stage stage;
|
||||
union {
|
||||
@@ -708,7 +699,7 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the SMMU to consume items. If drain is true, wait until the queue
|
||||
* Wait for the SMMU to consume items. If sync is true, wait until the queue
|
||||
* is empty. Otherwise, wait until there is at least one free slot.
|
||||
*/
|
||||
static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
|
||||
@@ -901,6 +892,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
|
||||
struct arm_smmu_queue *q = &smmu->cmdq.q;
|
||||
bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
|
||||
|
||||
smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
|
||||
|
||||
while (queue_insert_raw(q, cmd) == -ENOSPC) {
|
||||
if (queue_poll_cons(q, false, wfe))
|
||||
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
|
||||
@@ -948,15 +941,21 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
|
||||
struct arm_smmu_cmdq_ent ent = {
|
||||
.opcode = CMDQ_OP_CMD_SYNC,
|
||||
.sync = {
|
||||
.msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
|
||||
.msiaddr = virt_to_phys(&smmu->sync_count),
|
||||
},
|
||||
};
|
||||
|
||||
arm_smmu_cmdq_build_cmd(cmd, &ent);
|
||||
|
||||
spin_lock_irqsave(&smmu->cmdq.lock, flags);
|
||||
arm_smmu_cmdq_insert_cmd(smmu, cmd);
|
||||
|
||||
/* Piggy-back on the previous command if it's a SYNC */
|
||||
if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
|
||||
ent.sync.msidata = smmu->sync_nr;
|
||||
} else {
|
||||
ent.sync.msidata = ++smmu->sync_nr;
|
||||
arm_smmu_cmdq_build_cmd(cmd, &ent);
|
||||
arm_smmu_cmdq_insert_cmd(smmu, cmd);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
|
||||
|
||||
return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
|
||||
@@ -1372,15 +1371,11 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
|
||||
}
|
||||
|
||||
/* IO_PGTABLE API */
|
||||
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
|
||||
{
|
||||
arm_smmu_cmdq_issue_sync(smmu);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_sync(void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
__arm_smmu_tlb_sync(smmu_domain->smmu);
|
||||
|
||||
arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_context(void *cookie)
|
||||
@@ -1398,8 +1393,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
|
||||
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: when io-pgtable is in non-strict mode, we may get here with
|
||||
* PTEs previously cleared by unmaps on the current CPU not yet visible
|
||||
* to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
|
||||
* to guarantee those are observed before the TLBI. Do be careful, 007.
|
||||
*/
|
||||
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
|
||||
__arm_smmu_tlb_sync(smmu);
|
||||
arm_smmu_cmdq_issue_sync(smmu);
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||
@@ -1624,6 +1625,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
||||
|
||||
if (smmu_domain->non_strict)
|
||||
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
|
||||
|
||||
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
|
||||
if (!pgtbl_ops)
|
||||
return -ENOMEM;
|
||||
@@ -1772,12 +1776,20 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
||||
return ops->unmap(ops, iova, size);
|
||||
}
|
||||
|
||||
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (smmu_domain->smmu)
|
||||
arm_smmu_tlb_inv_context(smmu_domain);
|
||||
}
|
||||
|
||||
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
|
||||
|
||||
if (smmu)
|
||||
__arm_smmu_tlb_sync(smmu);
|
||||
arm_smmu_cmdq_issue_sync(smmu);
|
||||
}
|
||||
|
||||
static phys_addr_t
|
||||
@@ -1917,15 +1929,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
return 0;
|
||||
switch (domain->type) {
|
||||
case IOMMU_DOMAIN_UNMANAGED:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
||||
*(int *)data = smmu_domain->non_strict;
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1935,26 +1959,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
int ret = 0;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
if (smmu_domain->smmu) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
switch (domain->type) {
|
||||
case IOMMU_DOMAIN_UNMANAGED:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
if (smmu_domain->smmu) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (*(int *)data)
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
||||
else
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
}
|
||||
break;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
switch(attr) {
|
||||
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
||||
smmu_domain->non_strict = *(int *)data;
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
if (*(int *)data)
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
||||
else
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
||||
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
@@ -1999,7 +2034,7 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map = arm_smmu_map,
|
||||
.unmap = arm_smmu_unmap,
|
||||
.flush_iotlb_all = arm_smmu_iotlb_sync,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.add_device = arm_smmu_add_device,
|
||||
@@ -2180,7 +2215,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
atomic_set(&smmu->sync_nr, 0);
|
||||
ret = arm_smmu_init_queues(smmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -2353,8 +2387,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
|
||||
irq = smmu->combined_irq;
|
||||
if (irq) {
|
||||
/*
|
||||
* Cavium ThunderX2 implementation doesn't not support unique
|
||||
* irq lines. Use single irq line for all the SMMUv3 interrupts.
|
||||
* Cavium ThunderX2 implementation doesn't support unique irq
|
||||
* lines. Use a single irq line for all the SMMUv3 interrupts.
|
||||
*/
|
||||
ret = devm_request_threaded_irq(smmu->dev, irq,
|
||||
arm_smmu_combined_irq_handler,
|
||||
|
||||
@@ -52,6 +52,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/fsl/mc.h>
|
||||
|
||||
#include "io-pgtable.h"
|
||||
#include "arm-smmu-regs.h"
|
||||
@@ -246,6 +247,7 @@ struct arm_smmu_domain {
|
||||
const struct iommu_gather_ops *tlb_ops;
|
||||
struct arm_smmu_cfg cfg;
|
||||
enum arm_smmu_domain_stage stage;
|
||||
bool non_strict;
|
||||
struct mutex init_mutex; /* Protects smmu pointer */
|
||||
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
|
||||
struct iommu_domain domain;
|
||||
@@ -447,7 +449,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
|
||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
||||
|
||||
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
|
||||
/*
|
||||
* NOTE: this is not a relaxed write; it needs to guarantee that PTEs
|
||||
* cleared by the current CPU are visible to the SMMU before the TLBI.
|
||||
*/
|
||||
writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
|
||||
arm_smmu_tlb_sync_context(cookie);
|
||||
}
|
||||
|
||||
@@ -457,7 +463,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *base = ARM_SMMU_GR0(smmu);
|
||||
|
||||
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||
/* NOTE: see above */
|
||||
writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||
arm_smmu_tlb_sync_global(smmu);
|
||||
}
|
||||
|
||||
@@ -469,6 +476,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
|
||||
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
||||
|
||||
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
wmb();
|
||||
|
||||
if (stage1) {
|
||||
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||
|
||||
@@ -510,6 +520,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
|
||||
|
||||
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
wmb();
|
||||
|
||||
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||
}
|
||||
|
||||
@@ -863,6 +876,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
||||
|
||||
if (smmu_domain->non_strict)
|
||||
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
|
||||
|
||||
smmu_domain->smmu = smmu;
|
||||
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
|
||||
if (!pgtbl_ops) {
|
||||
@@ -1252,6 +1268,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
return ops->unmap(ops, iova, size);
|
||||
}
|
||||
|
||||
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (smmu_domain->tlb_ops)
|
||||
smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
|
||||
}
|
||||
|
||||
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
@@ -1459,6 +1483,8 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
group = pci_device_group(dev);
|
||||
else if (dev_is_fsl_mc(dev))
|
||||
group = fsl_mc_device_group(dev);
|
||||
else
|
||||
group = generic_device_group(dev);
|
||||
|
||||
@@ -1470,15 +1496,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
return 0;
|
||||
switch(domain->type) {
|
||||
case IOMMU_DOMAIN_UNMANAGED:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
||||
*(int *)data = smmu_domain->non_strict;
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1488,28 +1526,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
int ret = 0;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
if (smmu_domain->smmu) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
switch(domain->type) {
|
||||
case IOMMU_DOMAIN_UNMANAGED:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_NESTING:
|
||||
if (smmu_domain->smmu) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (*(int *)data)
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
||||
else
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
}
|
||||
break;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
switch (attr) {
|
||||
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
|
||||
smmu_domain->non_strict = *(int *)data;
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
if (*(int *)data)
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
||||
else
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
||||
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&smmu_domain->init_mutex);
|
||||
return ret;
|
||||
@@ -1562,7 +1610,7 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map = arm_smmu_map,
|
||||
.unmap = arm_smmu_unmap,
|
||||
.flush_iotlb_all = arm_smmu_iotlb_sync,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.add_device = arm_smmu_add_device,
|
||||
@@ -2036,6 +2084,10 @@ static void arm_smmu_bus_init(void)
|
||||
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_FSL_MC_BUS
|
||||
if (!iommu_present(&fsl_mc_bus_type))
|
||||
bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
|
||||
};
|
||||
struct list_head msi_page_list;
|
||||
spinlock_t msi_lock;
|
||||
|
||||
/* Domain for flush queue callback; NULL if flush queue not in use */
|
||||
struct iommu_domain *fq_domain;
|
||||
};
|
||||
|
||||
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
||||
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
|
||||
domain = cookie->fq_domain;
|
||||
/*
|
||||
* The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
|
||||
* implies that ops->flush_iotlb_all must be non-NULL.
|
||||
*/
|
||||
domain->ops->flush_iotlb_all(domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_init_domain - Initialise a DMA mapping domain
|
||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
||||
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long order, base_pfn, end_pfn;
|
||||
int attr;
|
||||
|
||||
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
||||
return -EINVAL;
|
||||
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
}
|
||||
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||
|
||||
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
|
||||
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
|
||||
cookie->fq_domain = domain;
|
||||
init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
|
||||
}
|
||||
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
||||
/* The MSI case is only ever cleaning up its most recent allocation */
|
||||
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
|
||||
cookie->msi_iova -= size;
|
||||
else if (cookie->fq_domain) /* non-strict mode */
|
||||
queue_iova(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad), 0);
|
||||
else
|
||||
free_iova_fast(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad));
|
||||
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
||||
dma_addr -= iova_off;
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
|
||||
WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
|
||||
WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
|
||||
if (!cookie->fq_domain)
|
||||
iommu_tlb_sync(domain);
|
||||
iommu_dma_free_iova(cookie, dma_addr, size);
|
||||
}
|
||||
|
||||
@@ -491,7 +521,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
|
||||
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
*handle = IOMMU_MAPPING_ERROR;
|
||||
}
|
||||
@@ -518,7 +548,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
unsigned long attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t))
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct page **pages;
|
||||
@@ -606,9 +636,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int prot)
|
||||
size_t size, int prot, struct iommu_domain *domain)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
size_t iova_off = 0;
|
||||
dma_addr_t iova;
|
||||
@@ -632,13 +661,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int prot)
|
||||
{
|
||||
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
|
||||
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
|
||||
iommu_get_dma_domain(dev));
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -726,7 +756,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct scatterlist *s, *prev = NULL;
|
||||
@@ -811,20 +841,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
sg = tmp;
|
||||
}
|
||||
end = sg_dma_address(sg) + sg_dma_len(sg);
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_dma_map(dev, phys, size,
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
|
||||
iommu_get_dma_domain(dev));
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
}
|
||||
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
@@ -850,7 +881,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
if (!msi_page)
|
||||
return NULL;
|
||||
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot);
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
|
||||
if (iommu_dma_mapping_error(dev, iova))
|
||||
goto out_free_page;
|
||||
|
||||
|
||||
@@ -814,6 +814,55 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
|
||||
{
|
||||
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dma_domain->domain_lock, flags);
|
||||
/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
|
||||
if (dma_domain->enabled) {
|
||||
pr_debug("Can't set geometry attributes as domain is active\n");
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Ensure that the geometry has been set for the domain */
|
||||
if (!dma_domain->geom_size) {
|
||||
pr_debug("Please configure geometry before setting the number of windows\n");
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure we have valid window count i.e. it should be less than
|
||||
* maximum permissible limit and should be a power of two.
|
||||
*/
|
||||
if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
|
||||
pr_debug("Invalid window count\n");
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
|
||||
w_count > 1 ? w_count : 0);
|
||||
if (!ret) {
|
||||
kfree(dma_domain->win_arr);
|
||||
dma_domain->win_arr = kcalloc(w_count,
|
||||
sizeof(*dma_domain->win_arr),
|
||||
GFP_ATOMIC);
|
||||
if (!dma_domain->win_arr) {
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dma_domain->win_cnt = w_count;
|
||||
}
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
|
||||
enum iommu_attr attr_type, void *data)
|
||||
{
|
||||
@@ -830,6 +879,9 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
|
||||
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
|
||||
ret = configure_domain_dma_state(dma_domain, *(int *)data);
|
||||
break;
|
||||
case DOMAIN_ATTR_WINDOWS:
|
||||
ret = fsl_pamu_set_windows(domain, *(u32 *)data);
|
||||
break;
|
||||
default:
|
||||
pr_debug("Unsupported attribute type\n");
|
||||
ret = -EINVAL;
|
||||
@@ -856,6 +908,9 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
|
||||
case DOMAIN_ATTR_FSL_PAMUV1:
|
||||
*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
|
||||
break;
|
||||
case DOMAIN_ATTR_WINDOWS:
|
||||
*(u32 *)data = dma_domain->win_cnt;
|
||||
break;
|
||||
default:
|
||||
pr_debug("Unsupported attribute type\n");
|
||||
ret = -EINVAL;
|
||||
@@ -916,13 +971,13 @@ static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
|
||||
static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_controller *pci_ctl;
|
||||
bool pci_endpt_partioning;
|
||||
bool pci_endpt_partitioning;
|
||||
struct iommu_group *group = NULL;
|
||||
|
||||
pci_ctl = pci_bus_to_host(pdev->bus);
|
||||
pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
|
||||
pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
|
||||
/* We can partition PCIe devices so assign device group to the device */
|
||||
if (pci_endpt_partioning) {
|
||||
if (pci_endpt_partitioning) {
|
||||
group = pci_device_group(&pdev->dev);
|
||||
|
||||
/*
|
||||
@@ -994,62 +1049,6 @@ static void fsl_pamu_remove_device(struct device *dev)
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
|
||||
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
|
||||
{
|
||||
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dma_domain->domain_lock, flags);
|
||||
/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
|
||||
if (dma_domain->enabled) {
|
||||
pr_debug("Can't set geometry attributes as domain is active\n");
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Ensure that the geometry has been set for the domain */
|
||||
if (!dma_domain->geom_size) {
|
||||
pr_debug("Please configure geometry before setting the number of windows\n");
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure we have valid window count i.e. it should be less than
|
||||
* maximum permissible limit and should be a power of two.
|
||||
*/
|
||||
if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
|
||||
pr_debug("Invalid window count\n");
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
|
||||
w_count > 1 ? w_count : 0);
|
||||
if (!ret) {
|
||||
kfree(dma_domain->win_arr);
|
||||
dma_domain->win_arr = kcalloc(w_count,
|
||||
sizeof(*dma_domain->win_arr),
|
||||
GFP_ATOMIC);
|
||||
if (!dma_domain->win_arr) {
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dma_domain->win_cnt = w_count;
|
||||
}
|
||||
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
|
||||
{
|
||||
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||
|
||||
return dma_domain->win_cnt;
|
||||
}
|
||||
|
||||
static const struct iommu_ops fsl_pamu_ops = {
|
||||
.capable = fsl_pamu_capable,
|
||||
.domain_alloc = fsl_pamu_domain_alloc,
|
||||
@@ -1058,8 +1057,6 @@ static const struct iommu_ops fsl_pamu_ops = {
|
||||
.detach_dev = fsl_pamu_detach_device,
|
||||
.domain_window_enable = fsl_pamu_window_enable,
|
||||
.domain_window_disable = fsl_pamu_window_disable,
|
||||
.domain_get_windows = fsl_pamu_get_windows,
|
||||
.domain_set_windows = fsl_pamu_set_windows,
|
||||
.iova_to_phys = fsl_pamu_iova_to_phys,
|
||||
.domain_set_attr = fsl_pamu_set_domain_attr,
|
||||
.domain_get_attr = fsl_pamu_get_domain_attr,
|
||||
|
||||
314
drivers/iommu/intel-iommu-debugfs.c
Normal file
314
drivers/iommu/intel-iommu-debugfs.c
Normal file
@@ -0,0 +1,314 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright © 2018 Intel Corporation.
|
||||
*
|
||||
* Authors: Gayatri Kammela <gayatri.kammela@intel.com>
|
||||
* Sohil Mehta <sohil.mehta@intel.com>
|
||||
* Jacob Pan <jacob.jun.pan@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/irq_remapping.h>
|
||||
|
||||
struct iommu_regset {
|
||||
int offset;
|
||||
const char *regs;
|
||||
};
|
||||
|
||||
#define IOMMU_REGSET_ENTRY(_reg_) \
|
||||
{ DMAR_##_reg_##_REG, __stringify(_reg_) }
|
||||
static const struct iommu_regset iommu_regs[] = {
|
||||
IOMMU_REGSET_ENTRY(VER),
|
||||
IOMMU_REGSET_ENTRY(CAP),
|
||||
IOMMU_REGSET_ENTRY(ECAP),
|
||||
IOMMU_REGSET_ENTRY(GCMD),
|
||||
IOMMU_REGSET_ENTRY(GSTS),
|
||||
IOMMU_REGSET_ENTRY(RTADDR),
|
||||
IOMMU_REGSET_ENTRY(CCMD),
|
||||
IOMMU_REGSET_ENTRY(FSTS),
|
||||
IOMMU_REGSET_ENTRY(FECTL),
|
||||
IOMMU_REGSET_ENTRY(FEDATA),
|
||||
IOMMU_REGSET_ENTRY(FEADDR),
|
||||
IOMMU_REGSET_ENTRY(FEUADDR),
|
||||
IOMMU_REGSET_ENTRY(AFLOG),
|
||||
IOMMU_REGSET_ENTRY(PMEN),
|
||||
IOMMU_REGSET_ENTRY(PLMBASE),
|
||||
IOMMU_REGSET_ENTRY(PLMLIMIT),
|
||||
IOMMU_REGSET_ENTRY(PHMBASE),
|
||||
IOMMU_REGSET_ENTRY(PHMLIMIT),
|
||||
IOMMU_REGSET_ENTRY(IQH),
|
||||
IOMMU_REGSET_ENTRY(IQT),
|
||||
IOMMU_REGSET_ENTRY(IQA),
|
||||
IOMMU_REGSET_ENTRY(ICS),
|
||||
IOMMU_REGSET_ENTRY(IRTA),
|
||||
IOMMU_REGSET_ENTRY(PQH),
|
||||
IOMMU_REGSET_ENTRY(PQT),
|
||||
IOMMU_REGSET_ENTRY(PQA),
|
||||
IOMMU_REGSET_ENTRY(PRS),
|
||||
IOMMU_REGSET_ENTRY(PECTL),
|
||||
IOMMU_REGSET_ENTRY(PEDATA),
|
||||
IOMMU_REGSET_ENTRY(PEADDR),
|
||||
IOMMU_REGSET_ENTRY(PEUADDR),
|
||||
IOMMU_REGSET_ENTRY(MTRRCAP),
|
||||
IOMMU_REGSET_ENTRY(MTRRDEF),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
|
||||
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
|
||||
IOMMU_REGSET_ENTRY(VCCAP),
|
||||
IOMMU_REGSET_ENTRY(VCMD),
|
||||
IOMMU_REGSET_ENTRY(VCRSP),
|
||||
};
|
||||
|
||||
static int iommu_regset_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
unsigned long flag;
|
||||
int i, ret = 0;
|
||||
u64 value;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
if (!drhd->reg_base_addr) {
|
||||
seq_puts(m, "IOMMU: Invalid base address\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
|
||||
iommu->name, drhd->reg_base_addr);
|
||||
seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
|
||||
/*
|
||||
* Publish the contents of the 64-bit hardware registers
|
||||
* by adding the offset to the pointer (virtual address).
|
||||
*/
|
||||
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
|
||||
value = dmar_readq(iommu->reg + iommu_regs[i].offset);
|
||||
seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
|
||||
iommu_regs[i].regs, iommu_regs[i].offset,
|
||||
value);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(iommu_regset);
|
||||
|
||||
static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
|
||||
int bus)
|
||||
{
|
||||
struct context_entry *context;
|
||||
int devfn;
|
||||
|
||||
seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
|
||||
seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
|
||||
|
||||
for (devfn = 0; devfn < 256; devfn++) {
|
||||
context = iommu_context_addr(iommu, bus, devfn, 0);
|
||||
if (!context)
|
||||
return;
|
||||
|
||||
if (!context_present(context))
|
||||
continue;
|
||||
|
||||
seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
|
||||
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
|
||||
context[0].hi, context[0].lo);
|
||||
}
|
||||
}
|
||||
|
||||
static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int bus;
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
|
||||
(u64)virt_to_phys(iommu->root_entry));
|
||||
seq_puts(m, "Root Table Entries:\n");
|
||||
|
||||
for (bus = 0; bus < 256; bus++) {
|
||||
if (!(iommu->root_entry[bus].lo & 1))
|
||||
continue;
|
||||
|
||||
seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
|
||||
iommu->root_entry[bus].hi,
|
||||
iommu->root_entry[bus].lo);
|
||||
|
||||
ctx_tbl_entry_show(m, iommu, bus);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static int dmar_translation_struct_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
root_tbl_entry_show(m, iommu);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
static void ir_tbl_remap_entry_show(struct seq_file *m,
|
||||
struct intel_iommu *iommu)
|
||||
{
|
||||
struct irte *ri_entry;
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n");
|
||||
|
||||
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
|
||||
ri_entry = &iommu->ir_table->base[idx];
|
||||
if (!ri_entry->present || ri_entry->p_pst)
|
||||
continue;
|
||||
|
||||
seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n",
|
||||
idx, PCI_BUS_NUM(ri_entry->sid),
|
||||
PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
|
||||
ri_entry->dest_id, ri_entry->vector,
|
||||
ri_entry->high, ri_entry->low);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
}
|
||||
|
||||
static void ir_tbl_posted_entry_show(struct seq_file *m,
|
||||
struct intel_iommu *iommu)
|
||||
{
|
||||
struct irte *pi_entry;
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n");
|
||||
|
||||
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
|
||||
pi_entry = &iommu->ir_table->base[idx];
|
||||
if (!pi_entry->present || !pi_entry->p_pst)
|
||||
continue;
|
||||
|
||||
seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n",
|
||||
idx, PCI_BUS_NUM(pi_entry->sid),
|
||||
PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
|
||||
pi_entry->pda_h, pi_entry->pda_l << 6,
|
||||
pi_entry->vector, pi_entry->high,
|
||||
pi_entry->low);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* For active IOMMUs go through the Interrupt remapping
|
||||
* table and print valid entries in a table format for
|
||||
* Remapped and Posted Interrupts.
|
||||
*/
|
||||
static int ir_translation_struct_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
u64 irta;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
if (!ecap_ir_support(iommu->ecap))
|
||||
continue;
|
||||
|
||||
seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
|
||||
iommu->name);
|
||||
|
||||
if (iommu->ir_table) {
|
||||
irta = virt_to_phys(iommu->ir_table->base);
|
||||
seq_printf(m, " IR table address:%llx\n", irta);
|
||||
ir_tbl_remap_entry_show(m, iommu);
|
||||
} else {
|
||||
seq_puts(m, "Interrupt Remapping is not enabled\n");
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
seq_puts(m, "****\n\n");
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
if (!cap_pi_support(iommu->cap))
|
||||
continue;
|
||||
|
||||
seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
|
||||
iommu->name);
|
||||
|
||||
if (iommu->ir_table) {
|
||||
irta = virt_to_phys(iommu->ir_table->base);
|
||||
seq_printf(m, " IR table address:%llx\n", irta);
|
||||
ir_tbl_posted_entry_show(m, iommu);
|
||||
} else {
|
||||
seq_puts(m, "Interrupt Remapping is not enabled\n");
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
|
||||
#endif
|
||||
|
||||
void __init intel_iommu_debugfs_init(void)
|
||||
{
|
||||
struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
|
||||
iommu_debugfs_dir);
|
||||
|
||||
debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
|
||||
&iommu_regset_fops);
|
||||
debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
|
||||
NULL, &dmar_translation_struct_fops);
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
|
||||
NULL, &ir_translation_struct_fops);
|
||||
#endif
|
||||
}
|
||||
@@ -185,16 +185,6 @@ static int rwbf_quirk;
|
||||
static int force_on = 0;
|
||||
int intel_iommu_tboot_noforce;
|
||||
|
||||
/*
|
||||
* 0: Present
|
||||
* 1-11: Reserved
|
||||
* 12-63: Context Ptr (12 - (haw-1))
|
||||
* 64-127: Reserved
|
||||
*/
|
||||
struct root_entry {
|
||||
u64 lo;
|
||||
u64 hi;
|
||||
};
|
||||
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
|
||||
|
||||
/*
|
||||
@@ -220,21 +210,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
|
||||
|
||||
return re->hi & VTD_PAGE_MASK;
|
||||
}
|
||||
/*
|
||||
* low 64 bits:
|
||||
* 0: present
|
||||
* 1: fault processing disable
|
||||
* 2-3: translation type
|
||||
* 12-63: address space root
|
||||
* high 64 bits:
|
||||
* 0-2: address width
|
||||
* 3-6: aval
|
||||
* 8-23: domain id
|
||||
*/
|
||||
struct context_entry {
|
||||
u64 lo;
|
||||
u64 hi;
|
||||
};
|
||||
|
||||
static inline void context_clear_pasid_enable(struct context_entry *context)
|
||||
{
|
||||
@@ -261,7 +236,7 @@ static inline bool __context_present(struct context_entry *context)
|
||||
return (context->lo & 1);
|
||||
}
|
||||
|
||||
static inline bool context_present(struct context_entry *context)
|
||||
bool context_present(struct context_entry *context)
|
||||
{
|
||||
return context_pasid_enabled(context) ?
|
||||
__context_present(context) :
|
||||
@@ -788,8 +763,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
|
||||
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
|
||||
}
|
||||
|
||||
static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
|
||||
u8 bus, u8 devfn, int alloc)
|
||||
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
|
||||
u8 devfn, int alloc)
|
||||
{
|
||||
struct root_entry *root = &iommu->root_entry[bus];
|
||||
struct context_entry *context;
|
||||
@@ -4862,6 +4837,7 @@ int __init intel_iommu_init(void)
|
||||
cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
|
||||
intel_iommu_cpu_dead);
|
||||
intel_iommu_enabled = 1;
|
||||
intel_iommu_debugfs_init();
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
|
||||
* in single-threaded environment with interrupt disabled, so no need to tabke
|
||||
* the dmar_global_lock.
|
||||
*/
|
||||
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
|
||||
DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
|
||||
static const struct irq_domain_ops intel_ir_domain_ops;
|
||||
|
||||
static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
|
||||
|
||||
@@ -587,6 +587,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
|
||||
}
|
||||
|
||||
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
|
||||
io_pgtable_tlb_sync(&data->iop);
|
||||
return size;
|
||||
}
|
||||
|
||||
@@ -642,6 +643,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
|
||||
io_pgtable_tlb_sync(iop);
|
||||
ptep = iopte_deref(pte[i], lvl);
|
||||
__arm_v7s_free_table(ptep, lvl + 1, data);
|
||||
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
|
||||
/*
|
||||
* Order the PTE update against queueing the IOVA, to
|
||||
* guarantee that a flush callback from a different CPU
|
||||
* has observed it before the TLBIALL can be issued.
|
||||
*/
|
||||
smp_wmb();
|
||||
} else {
|
||||
io_pgtable_tlb_add_flush(iop, iova, blk_size,
|
||||
blk_size, true);
|
||||
@@ -712,7 +720,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
|
||||
IO_PGTABLE_QUIRK_NO_PERMS |
|
||||
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
|
||||
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
|
||||
IO_PGTABLE_QUIRK_NO_DMA))
|
||||
IO_PGTABLE_QUIRK_NO_DMA |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
return NULL;
|
||||
|
||||
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
|
||||
|
||||
@@ -574,13 +574,13 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
|
||||
return 0;
|
||||
|
||||
tablep = iopte_deref(pte, data);
|
||||
} else if (unmap_idx >= 0) {
|
||||
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
|
||||
io_pgtable_tlb_sync(&data->iop);
|
||||
return size;
|
||||
}
|
||||
|
||||
if (unmap_idx < 0)
|
||||
return __arm_lpae_unmap(data, iova, size, lvl, tablep);
|
||||
|
||||
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
|
||||
return size;
|
||||
return __arm_lpae_unmap(data, iova, size, lvl, tablep);
|
||||
}
|
||||
|
||||
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
|
||||
@@ -610,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
|
||||
io_pgtable_tlb_sync(iop);
|
||||
ptep = iopte_deref(pte, data);
|
||||
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
|
||||
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
|
||||
/*
|
||||
* Order the PTE update against queueing the IOVA, to
|
||||
* guarantee that a flush callback from a different CPU
|
||||
* has observed it before the TLBIALL can be issued.
|
||||
*/
|
||||
smp_wmb();
|
||||
} else {
|
||||
io_pgtable_tlb_add_flush(iop, iova, size, size, true);
|
||||
}
|
||||
@@ -772,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
u64 reg;
|
||||
struct arm_lpae_io_pgtable *data;
|
||||
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
@@ -864,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
struct arm_lpae_io_pgtable *data;
|
||||
|
||||
/* The NS quirk doesn't apply at stage 2 */
|
||||
if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT))
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user