mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'iommu-updates-v5.20-or-v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
- The most intrusive patch is small and changes the default allocation
policy for DMA addresses.
Before the change the allocator tried its best to find an address in
the first 4GB. But that lead to performance problems when that space
gets exhaused, and since most devices are capable of 64-bit DMA these
days, we changed it to search in the full DMA-mask range from the
beginning.
This change has the potential to uncover bugs elsewhere, in the
kernel or the hardware. There is a Kconfig option and a command line
option to restore the old behavior, but none of them is enabled by
default.
- Add Robin Murphy as reviewer of IOMMU code and maintainer for the
dma-iommu and iova code
- Chaning IOVA magazine size from 1032 to 1024 bytes to save memory
- Some core code cleanups and dead-code removal
- Support for ACPI IORT RMR node
- Support for multiple PCI domains in the AMD-Vi driver
- ARM SMMU changes from Will Deacon:
- Add even more Qualcomm device-tree compatible strings
- Support dumping of IMP DEF Qualcomm registers on TLB sync
timeout
- Fix reference count leak on device tree node in Qualcomm driver
- Intel VT-d driver updates from Lu Baolu:
- Make intel-iommu.h private
- Optimize the use of two locks
- Extend the driver to support large-scale platforms
- Cleanup some dead code
- MediaTek IOMMU refactoring and support for TTBR up to 35bit
- Basic support for Exynos SysMMU v7
- VirtIO IOMMU driver gets a map/unmap_pages() implementation
- Other smaller cleanups and fixes
* tag 'iommu-updates-v5.20-or-v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (116 commits)
iommu/amd: Fix compile warning in init code
iommu/amd: Add support for AVIC when SNP is enabled
iommu/amd: Simplify and Consolidate Virtual APIC (AVIC) Enablement
ACPI/IORT: Fix build error implicit-function-declaration
drivers: iommu: fix clang -wformat warning
iommu/arm-smmu: qcom_iommu: Add of_node_put() when breaking out of loop
iommu/arm-smmu-qcom: Add SM6375 SMMU compatible
dt-bindings: arm-smmu: Add compatible for Qualcomm SM6375
MAINTAINERS: Add Robin Murphy as IOMMU SUBSYTEM reviewer
iommu/amd: Do not support IOMMUv2 APIs when SNP is enabled
iommu/amd: Do not support IOMMU_DOMAIN_IDENTITY after SNP is enabled
iommu/amd: Set translation valid bit only when IO page tables are in use
iommu/amd: Introduce function to check and enable SNP
iommu/amd: Globally detect SNP support
iommu/amd: Process all IVHDs before enabling IOMMU features
iommu/amd: Introduce global variable for storing common EFR and EFR2
iommu/amd: Introduce Support for Extended Feature 2 Register
iommu/amd: Change macro for IOMMU control register bit shift to decimal value
iommu/exynos: Enable default VM instance on SysMMU v7
iommu/exynos: Add SysMMU v7 register set
...
This commit is contained in:
@@ -2274,23 +2274,39 @@
|
||||
|
||||
ivrs_ioapic [HW,X86-64]
|
||||
Provide an override to the IOAPIC-ID<->DEVICE-ID
|
||||
mapping provided in the IVRS ACPI table. For
|
||||
example, to map IOAPIC-ID decimal 10 to
|
||||
PCI device 00:14.0 write the parameter as:
|
||||
mapping provided in the IVRS ACPI table.
|
||||
By default, PCI segment is 0, and can be omitted.
|
||||
For example:
|
||||
* To map IOAPIC-ID decimal 10 to PCI device 00:14.0
|
||||
write the parameter as:
|
||||
ivrs_ioapic[10]=00:14.0
|
||||
* To map IOAPIC-ID decimal 10 to PCI segment 0x1 and
|
||||
PCI device 00:14.0 write the parameter as:
|
||||
ivrs_ioapic[10]=0001:00:14.0
|
||||
|
||||
ivrs_hpet [HW,X86-64]
|
||||
Provide an override to the HPET-ID<->DEVICE-ID
|
||||
mapping provided in the IVRS ACPI table. For
|
||||
example, to map HPET-ID decimal 0 to
|
||||
PCI device 00:14.0 write the parameter as:
|
||||
mapping provided in the IVRS ACPI table.
|
||||
By default, PCI segment is 0, and can be omitted.
|
||||
For example:
|
||||
* To map HPET-ID decimal 0 to PCI device 00:14.0
|
||||
write the parameter as:
|
||||
ivrs_hpet[0]=00:14.0
|
||||
* To map HPET-ID decimal 10 to PCI segment 0x1 and
|
||||
PCI device 00:14.0 write the parameter as:
|
||||
ivrs_ioapic[10]=0001:00:14.0
|
||||
|
||||
ivrs_acpihid [HW,X86-64]
|
||||
Provide an override to the ACPI-HID:UID<->DEVICE-ID
|
||||
mapping provided in the IVRS ACPI table. For
|
||||
example, to map UART-HID:UID AMD0020:0 to
|
||||
PCI device 00:14.5 write the parameter as:
|
||||
mapping provided in the IVRS ACPI table.
|
||||
|
||||
For example, to map UART-HID:UID AMD0020:0 to
|
||||
PCI segment 0x1 and PCI device ID 00:14.5,
|
||||
write the parameter as:
|
||||
ivrs_acpihid[0001:00:14.5]=AMD0020:0
|
||||
|
||||
By default, PCI segment is 0, and can be omitted.
|
||||
For example, PCI device 00:14.5 write the parameter as:
|
||||
ivrs_acpihid[00:14.5]=AMD0020:0
|
||||
|
||||
js= [HW,JOY] Analog joystick
|
||||
|
||||
@@ -42,6 +42,7 @@ properties:
|
||||
- qcom,sdx55-smmu-500
|
||||
- qcom,sdx65-smmu-500
|
||||
- qcom,sm6350-smmu-500
|
||||
- qcom,sm6375-smmu-500
|
||||
- qcom,sm8150-smmu-500
|
||||
- qcom,sm8250-smmu-500
|
||||
- qcom,sm8350-smmu-500
|
||||
|
||||
@@ -101,6 +101,10 @@ properties:
|
||||
items:
|
||||
- const: bclk
|
||||
|
||||
mediatek,infracfg:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description: The phandle to the mediatek infracfg syscon
|
||||
|
||||
mediatek,larbs:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
minItems: 1
|
||||
@@ -167,6 +171,18 @@ allOf:
|
||||
required:
|
||||
- power-domains
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- mediatek,mt2712-m4u
|
||||
- mediatek,mt8173-m4u
|
||||
|
||||
then:
|
||||
required:
|
||||
- mediatek,infracfg
|
||||
|
||||
- if: # The IOMMUs don't have larbs.
|
||||
not:
|
||||
properties:
|
||||
@@ -191,6 +207,7 @@ examples:
|
||||
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_LOW>;
|
||||
clocks = <&infracfg CLK_INFRA_M4U>;
|
||||
clock-names = "bclk";
|
||||
mediatek,infracfg = <&infracfg>;
|
||||
mediatek,larbs = <&larb0>, <&larb1>, <&larb2>,
|
||||
<&larb3>, <&larb4>, <&larb5>;
|
||||
#iommu-cells = <1>;
|
||||
|
||||
14
MAINTAINERS
14
MAINTAINERS
@@ -10233,7 +10233,6 @@ L: iommu@lists.linux.dev
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
F: drivers/iommu/intel/
|
||||
F: include/linux/intel-iommu.h
|
||||
F: include/linux/intel-svm.h
|
||||
|
||||
INTEL IOP-ADMA DMA DRIVER
|
||||
@@ -10605,9 +10604,20 @@ T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
|
||||
F: fs/iomap/
|
||||
F: include/linux/iomap.h
|
||||
|
||||
IOMMU DRIVERS
|
||||
IOMMU DMA-API LAYER
|
||||
M: Robin Murphy <robin.murphy@arm.com>
|
||||
L: iommu@lists.linux.dev
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
F: drivers/iommu/dma-iommu.c
|
||||
F: drivers/iommu/iova.c
|
||||
F: include/linux/dma-iommu.h
|
||||
F: include/linux/iova.h
|
||||
|
||||
IOMMU SUBSYSTEM
|
||||
M: Joerg Roedel <joro@8bytes.org>
|
||||
M: Will Deacon <will@kernel.org>
|
||||
R: Robin Murphy <robin.murphy@arm.com>
|
||||
L: iommu@lists.linux.dev
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
* Copyright (c) 2006-2009, Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/export.h>
|
||||
@@ -516,17 +515,3 @@ struct acpi_table_header *tboot_get_dmar_table(struct acpi_table_header *dmar_tb
|
||||
|
||||
return dmar_tbl;
|
||||
}
|
||||
|
||||
int tboot_force_iommu(void)
|
||||
{
|
||||
if (!tboot_enabled())
|
||||
return 0;
|
||||
|
||||
if (no_iommu || dmar_disabled)
|
||||
pr_warn("Forcing Intel-IOMMU to enabled\n");
|
||||
|
||||
dmar_disabled = 0;
|
||||
no_iommu = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@
|
||||
#include <linux/mman.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/user-return-notifier.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
@@ -788,6 +788,294 @@ void acpi_configure_pmsi_domain(struct device *dev)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
static void iort_rmr_free(struct device *dev,
|
||||
struct iommu_resv_region *region)
|
||||
{
|
||||
struct iommu_iort_rmr_data *rmr_data;
|
||||
|
||||
rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
|
||||
kfree(rmr_data->sids);
|
||||
kfree(rmr_data);
|
||||
}
|
||||
|
||||
static struct iommu_iort_rmr_data *iort_rmr_alloc(
|
||||
struct acpi_iort_rmr_desc *rmr_desc,
|
||||
int prot, enum iommu_resv_type type,
|
||||
u32 *sids, u32 num_sids)
|
||||
{
|
||||
struct iommu_iort_rmr_data *rmr_data;
|
||||
struct iommu_resv_region *region;
|
||||
u32 *sids_copy;
|
||||
u64 addr = rmr_desc->base_address, size = rmr_desc->length;
|
||||
|
||||
rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
|
||||
if (!rmr_data)
|
||||
return NULL;
|
||||
|
||||
/* Create a copy of SIDs array to associate with this rmr_data */
|
||||
sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
|
||||
if (!sids_copy) {
|
||||
kfree(rmr_data);
|
||||
return NULL;
|
||||
}
|
||||
rmr_data->sids = sids_copy;
|
||||
rmr_data->num_sids = num_sids;
|
||||
|
||||
if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
|
||||
/* PAGE align base addr and size */
|
||||
addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
|
||||
|
||||
pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
|
||||
rmr_desc->base_address,
|
||||
rmr_desc->base_address + rmr_desc->length - 1,
|
||||
addr, addr + size - 1);
|
||||
}
|
||||
|
||||
region = &rmr_data->rr;
|
||||
INIT_LIST_HEAD(®ion->list);
|
||||
region->start = addr;
|
||||
region->length = size;
|
||||
region->prot = prot;
|
||||
region->type = type;
|
||||
region->free = iort_rmr_free;
|
||||
|
||||
return rmr_data;
|
||||
}
|
||||
|
||||
static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
|
||||
u32 count)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
u64 end, start = desc[i].base_address, length = desc[i].length;
|
||||
|
||||
if (!length) {
|
||||
pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
|
||||
start);
|
||||
continue;
|
||||
}
|
||||
|
||||
end = start + length - 1;
|
||||
|
||||
/* Check for address overlap */
|
||||
for (j = i + 1; j < count; j++) {
|
||||
u64 e_start = desc[j].base_address;
|
||||
u64 e_end = e_start + desc[j].length - 1;
|
||||
|
||||
if (start <= e_end && end >= e_start)
|
||||
pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
|
||||
start, end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Please note, we will keep the already allocated RMR reserve
|
||||
* regions in case of a memory allocation failure.
|
||||
*/
|
||||
static void iort_get_rmrs(struct acpi_iort_node *node,
|
||||
struct acpi_iort_node *smmu,
|
||||
u32 *sids, u32 num_sids,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
|
||||
struct acpi_iort_rmr_desc *rmr_desc;
|
||||
int i;
|
||||
|
||||
rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
|
||||
rmr->rmr_offset);
|
||||
|
||||
iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
|
||||
|
||||
for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
|
||||
struct iommu_iort_rmr_data *rmr_data;
|
||||
enum iommu_resv_type type;
|
||||
int prot = IOMMU_READ | IOMMU_WRITE;
|
||||
|
||||
if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
|
||||
type = IOMMU_RESV_DIRECT_RELAXABLE;
|
||||
else
|
||||
type = IOMMU_RESV_DIRECT;
|
||||
|
||||
if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
/* Attributes 0x00 - 0x03 represents device memory */
|
||||
if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
|
||||
ACPI_IORT_RMR_ATTR_DEVICE_GRE)
|
||||
prot |= IOMMU_MMIO;
|
||||
else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
|
||||
ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
|
||||
prot |= IOMMU_CACHE;
|
||||
|
||||
rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
|
||||
sids, num_sids);
|
||||
if (!rmr_data)
|
||||
return;
|
||||
|
||||
list_add_tail(&rmr_data->rr.list, head);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
|
||||
u32 new_count)
|
||||
{
|
||||
u32 *new_sids;
|
||||
u32 total_count = count + new_count;
|
||||
int i;
|
||||
|
||||
new_sids = krealloc_array(sids, count + new_count,
|
||||
sizeof(*new_sids), GFP_KERNEL);
|
||||
if (!new_sids)
|
||||
return NULL;
|
||||
|
||||
for (i = count; i < total_count; i++)
|
||||
new_sids[i] = id_start++;
|
||||
|
||||
return new_sids;
|
||||
}
|
||||
|
||||
static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
|
||||
u32 id_count)
|
||||
{
|
||||
int i;
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
/*
|
||||
* Make sure the kernel has preserved the boot firmware PCIe
|
||||
* configuration. This is required to ensure that the RMR PCIe
|
||||
* StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
|
||||
*/
|
||||
if (dev_is_pci(dev)) {
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
|
||||
|
||||
if (!host->preserve_config)
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
if (fwspec->ids[i] >= id_start &&
|
||||
fwspec->ids[i] <= id_start + id_count)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void iort_node_get_rmr_info(struct acpi_iort_node *node,
|
||||
struct acpi_iort_node *iommu,
|
||||
struct device *dev, struct list_head *head)
|
||||
{
|
||||
struct acpi_iort_node *smmu = NULL;
|
||||
struct acpi_iort_rmr *rmr;
|
||||
struct acpi_iort_id_mapping *map;
|
||||
u32 *sids = NULL;
|
||||
u32 num_sids = 0;
|
||||
int i;
|
||||
|
||||
if (!node->mapping_offset || !node->mapping_count) {
|
||||
pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
|
||||
node);
|
||||
return;
|
||||
}
|
||||
|
||||
rmr = (struct acpi_iort_rmr *)node->node_data;
|
||||
if (!rmr->rmr_offset || !rmr->rmr_count)
|
||||
return;
|
||||
|
||||
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
|
||||
node->mapping_offset);
|
||||
|
||||
/*
|
||||
* Go through the ID mappings and see if we have a match for SMMU
|
||||
* and dev(if !NULL). If found, get the sids for the Node.
|
||||
* Please note, id_count is equal to the number of IDs in the
|
||||
* range minus one.
|
||||
*/
|
||||
for (i = 0; i < node->mapping_count; i++, map++) {
|
||||
struct acpi_iort_node *parent;
|
||||
|
||||
if (!map->id_count)
|
||||
continue;
|
||||
|
||||
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
|
||||
map->output_reference);
|
||||
if (parent != iommu)
|
||||
continue;
|
||||
|
||||
/* If dev is valid, check RMR node corresponds to the dev SID */
|
||||
if (dev && !iort_rmr_has_dev(dev, map->output_base,
|
||||
map->id_count))
|
||||
continue;
|
||||
|
||||
/* Retrieve SIDs associated with the Node. */
|
||||
sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
|
||||
map->id_count + 1);
|
||||
if (!sids)
|
||||
return;
|
||||
|
||||
num_sids += map->id_count + 1;
|
||||
}
|
||||
|
||||
if (!sids)
|
||||
return;
|
||||
|
||||
iort_get_rmrs(node, smmu, sids, num_sids, head);
|
||||
kfree(sids);
|
||||
}
|
||||
|
||||
static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct acpi_table_iort *iort;
|
||||
struct acpi_iort_node *iort_node, *iort_end;
|
||||
int i;
|
||||
|
||||
/* Only supports ARM DEN 0049E.d onwards */
|
||||
if (iort_table->revision < 5)
|
||||
return;
|
||||
|
||||
iort = (struct acpi_table_iort *)iort_table;
|
||||
|
||||
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
|
||||
iort->node_offset);
|
||||
iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
|
||||
iort_table->length);
|
||||
|
||||
for (i = 0; i < iort->node_count; i++) {
|
||||
if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
|
||||
"IORT node pointer overflows, bad table!\n"))
|
||||
return;
|
||||
|
||||
if (iort_node->type == ACPI_IORT_NODE_RMR)
|
||||
iort_node_get_rmr_info(iort_node, iommu, dev, head);
|
||||
|
||||
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
|
||||
iort_node->length);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Populate the RMR list associated with a given IOMMU and dev(if provided).
|
||||
* If dev is NULL, the function populates all the RMRs associated with the
|
||||
* given IOMMU.
|
||||
*/
|
||||
static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
|
||||
struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct acpi_iort_node *iommu;
|
||||
|
||||
iommu = iort_get_iort_node(iommu_fwnode);
|
||||
if (!iommu)
|
||||
return;
|
||||
|
||||
iort_find_rmrs(iommu, dev, head);
|
||||
}
|
||||
|
||||
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
|
||||
{
|
||||
struct acpi_iort_node *iommu;
|
||||
@@ -806,27 +1094,22 @@ static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* iort_iommu_msi_get_resv_regions - Reserved region driver helper
|
||||
* @dev: Device from iommu_get_resv_regions()
|
||||
* @head: Reserved region list from iommu_get_resv_regions()
|
||||
*
|
||||
* Returns: Number of msi reserved regions on success (0 if platform
|
||||
* doesn't require the reservation or no associated msi regions),
|
||||
* appropriate error value otherwise. The ITS interrupt translation
|
||||
* spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
|
||||
* are the msi reserved regions.
|
||||
/*
|
||||
* Retrieve platform specific HW MSI reserve regions.
|
||||
* The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
|
||||
* associated with the device are the HW MSI reserved regions.
|
||||
*/
|
||||
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
|
||||
static void iort_iommu_msi_get_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct acpi_iort_its_group *its;
|
||||
struct acpi_iort_node *iommu_node, *its_node = NULL;
|
||||
int i, resv = 0;
|
||||
int i;
|
||||
|
||||
iommu_node = iort_get_msi_resv_iommu(dev);
|
||||
if (!iommu_node)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Current logic to reserve ITS regions relies on HW topologies
|
||||
@@ -846,7 +1129,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
|
||||
}
|
||||
|
||||
if (!its_node)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/* Move to ITS specific data */
|
||||
its = (struct acpi_iort_its_group *)its_node->node_data;
|
||||
@@ -860,16 +1143,53 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
|
||||
|
||||
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
|
||||
prot, IOMMU_RESV_MSI);
|
||||
if (region) {
|
||||
if (region)
|
||||
list_add_tail(®ion->list, head);
|
||||
resv++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (resv == its->its_count) ? resv : -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
|
||||
* @dev: Device from iommu_get_resv_regions()
|
||||
* @head: Reserved region list from iommu_get_resv_regions()
|
||||
*/
|
||||
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
|
||||
{
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
iort_iommu_msi_get_resv_regions(dev, head);
|
||||
iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
|
||||
* associated StreamIDs information.
|
||||
* @iommu_fwnode: fwnode associated with IOMMU
|
||||
* @head: Resereved region list
|
||||
*/
|
||||
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
||||
struct list_head *head)
|
||||
{
|
||||
iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
|
||||
|
||||
/**
|
||||
* iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
|
||||
* @iommu_fwnode: fwnode associated with IOMMU
|
||||
* @head: Resereved region list
|
||||
*/
|
||||
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, head, list)
|
||||
entry->free(NULL, entry);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
|
||||
|
||||
static inline bool iort_iommu_driver_enabled(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
@@ -1034,8 +1354,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
|
||||
}
|
||||
|
||||
#else
|
||||
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
|
||||
{ return 0; }
|
||||
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
|
||||
{ }
|
||||
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
|
||||
{ return -ENODEV; }
|
||||
#endif
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/smp.h>
|
||||
#include "agp.h"
|
||||
@@ -573,18 +573,15 @@ static void intel_gtt_cleanup(void)
|
||||
*/
|
||||
static inline int needs_ilk_vtd_wa(void)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
const unsigned short gpu_devid = intel_private.pcidev->device;
|
||||
|
||||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
/*
|
||||
* Query iommu subsystem to see if we need the workaround. Presumably
|
||||
* that was loaded first.
|
||||
*/
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
|
||||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
||||
intel_iommu_gfx_mapped)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
|
||||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
||||
device_iommu_mapped(&intel_private.pcidev->dev));
|
||||
}
|
||||
|
||||
static bool intel_gtt_can_wc(void)
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
#include <acpi/video.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-resv.h>
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
#include <linux/dma-resv.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/sync_file.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
|
||||
@@ -144,6 +144,32 @@ config IOMMU_DMA
|
||||
select IRQ_MSI_IOMMU
|
||||
select NEED_SG_DMA_LENGTH
|
||||
|
||||
config IOMMU_DMA_PCI_SAC
|
||||
bool "Enable 64-bit legacy PCI optimisation by default"
|
||||
depends on IOMMU_DMA
|
||||
help
|
||||
Enable by default an IOMMU optimisation for 64-bit legacy PCI devices,
|
||||
wherein the DMA API layer will always first try to allocate a 32-bit
|
||||
DMA address suitable for a single address cycle, before falling back
|
||||
to allocating from the device's full usable address range. If your
|
||||
system has 64-bit legacy PCI devices in 32-bit slots where using dual
|
||||
address cycles reduces DMA throughput significantly, this may be
|
||||
beneficial to overall performance.
|
||||
|
||||
If you have a modern PCI Express based system, this feature mostly just
|
||||
represents extra overhead in the allocation path for no practical
|
||||
benefit, and it should usually be preferable to say "n" here.
|
||||
|
||||
However, beware that this feature has also historically papered over
|
||||
bugs where the IOMMU address width and/or device DMA mask is not set
|
||||
correctly. If device DMA problems and IOMMU faults start occurring
|
||||
after disabling this option, it is almost certainly indicative of a
|
||||
latent driver or firmware/BIOS bug, which would previously have only
|
||||
manifested with several gigabytes worth of concurrent DMA mappings.
|
||||
|
||||
If this option is not set, the feature can still be re-enabled at
|
||||
boot time with the "iommu.forcedac=0" command-line argument.
|
||||
|
||||
# Shared Virtual Addressing
|
||||
config IOMMU_SVA
|
||||
bool
|
||||
@@ -363,6 +389,16 @@ config ARM_SMMU_QCOM
|
||||
When running on a Qualcomm platform that has the custom variant
|
||||
of the ARM SMMU, this needs to be built into the SMMU driver.
|
||||
|
||||
config ARM_SMMU_QCOM_DEBUG
|
||||
bool "ARM SMMU QCOM implementation defined debug support"
|
||||
depends on ARM_SMMU_QCOM
|
||||
help
|
||||
Support for implementation specific debug features in ARM SMMU
|
||||
hardware found in QTI platforms.
|
||||
|
||||
Say Y here to enable debug for issues such as TLB sync timeouts
|
||||
which requires implementation defined register dumps.
|
||||
|
||||
config ARM_SMMU_V3
|
||||
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
|
||||
depends on ARM64
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
|
||||
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
|
||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
extern void amd_iommu_apply_erratum_63(u16 devid);
|
||||
extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
|
||||
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
|
||||
extern int amd_iommu_init_devices(void);
|
||||
extern void amd_iommu_uninit_devices(void);
|
||||
extern void amd_iommu_init_notifier(void);
|
||||
extern int amd_iommu_init_api(void);
|
||||
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
||||
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
|
||||
@@ -114,10 +115,17 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
|
||||
amd_iommu_domain_set_pt_root(domain, 0);
|
||||
}
|
||||
|
||||
static inline int get_pci_sbdf_id(struct pci_dev *pdev)
|
||||
{
|
||||
int seg = pci_domain_nr(pdev->bus);
|
||||
u16 devid = pci_dev_id(pdev);
|
||||
|
||||
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
|
||||
}
|
||||
|
||||
extern bool translation_pre_enabled(struct amd_iommu *iommu);
|
||||
extern bool amd_iommu_is_attach_deferred(struct device *dev);
|
||||
extern int __init add_special_device(u8 type, u8 id, u16 *devid,
|
||||
extern int __init add_special_device(u8 type, u8 id, u32 *devid,
|
||||
bool cmd_line);
|
||||
|
||||
#ifdef CONFIG_DMI
|
||||
@@ -128,4 +136,10 @@ static inline void amd_iommu_apply_ivrs_quirks(void) { }
|
||||
|
||||
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
|
||||
u64 *root, int mode);
|
||||
extern struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
|
||||
|
||||
extern u64 amd_iommu_efr;
|
||||
extern u64 amd_iommu_efr2;
|
||||
|
||||
extern bool amd_iommu_snp_en;
|
||||
#endif
|
||||
|
||||
@@ -67,6 +67,7 @@
|
||||
#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
|
||||
#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
|
||||
#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
|
||||
#define MMIO_EXT_FEATURES2 0x01A0
|
||||
#define MMIO_CMD_HEAD_OFFSET 0x2000
|
||||
#define MMIO_CMD_TAIL_OFFSET 0x2008
|
||||
#define MMIO_EVT_HEAD_OFFSET 0x2010
|
||||
@@ -102,6 +103,12 @@
|
||||
#define FEATURE_GLXVAL_SHIFT 14
|
||||
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
|
||||
|
||||
/* Extended Feature 2 Bits */
|
||||
#define FEATURE_SNPAVICSUP_SHIFT 5
|
||||
#define FEATURE_SNPAVICSUP_MASK (0x07ULL << FEATURE_SNPAVICSUP_SHIFT)
|
||||
#define FEATURE_SNPAVICSUP_GAM(x) \
|
||||
((x & FEATURE_SNPAVICSUP_MASK) >> FEATURE_SNPAVICSUP_SHIFT == 0x1)
|
||||
|
||||
/* Note:
|
||||
* The current driver only support 16-bit PASID.
|
||||
* Currently, hardware only implement upto 16-bit PASID
|
||||
@@ -143,27 +150,28 @@
|
||||
#define EVENT_FLAG_I 0x008
|
||||
|
||||
/* feature control bits */
|
||||
#define CONTROL_IOMMU_EN 0x00ULL
|
||||
#define CONTROL_HT_TUN_EN 0x01ULL
|
||||
#define CONTROL_EVT_LOG_EN 0x02ULL
|
||||
#define CONTROL_EVT_INT_EN 0x03ULL
|
||||
#define CONTROL_COMWAIT_EN 0x04ULL
|
||||
#define CONTROL_INV_TIMEOUT 0x05ULL
|
||||
#define CONTROL_PASSPW_EN 0x08ULL
|
||||
#define CONTROL_RESPASSPW_EN 0x09ULL
|
||||
#define CONTROL_COHERENT_EN 0x0aULL
|
||||
#define CONTROL_ISOC_EN 0x0bULL
|
||||
#define CONTROL_CMDBUF_EN 0x0cULL
|
||||
#define CONTROL_PPRLOG_EN 0x0dULL
|
||||
#define CONTROL_PPRINT_EN 0x0eULL
|
||||
#define CONTROL_PPR_EN 0x0fULL
|
||||
#define CONTROL_GT_EN 0x10ULL
|
||||
#define CONTROL_GA_EN 0x11ULL
|
||||
#define CONTROL_GAM_EN 0x19ULL
|
||||
#define CONTROL_GALOG_EN 0x1CULL
|
||||
#define CONTROL_GAINT_EN 0x1DULL
|
||||
#define CONTROL_XT_EN 0x32ULL
|
||||
#define CONTROL_INTCAPXT_EN 0x33ULL
|
||||
#define CONTROL_IOMMU_EN 0
|
||||
#define CONTROL_HT_TUN_EN 1
|
||||
#define CONTROL_EVT_LOG_EN 2
|
||||
#define CONTROL_EVT_INT_EN 3
|
||||
#define CONTROL_COMWAIT_EN 4
|
||||
#define CONTROL_INV_TIMEOUT 5
|
||||
#define CONTROL_PASSPW_EN 8
|
||||
#define CONTROL_RESPASSPW_EN 9
|
||||
#define CONTROL_COHERENT_EN 10
|
||||
#define CONTROL_ISOC_EN 11
|
||||
#define CONTROL_CMDBUF_EN 12
|
||||
#define CONTROL_PPRLOG_EN 13
|
||||
#define CONTROL_PPRINT_EN 14
|
||||
#define CONTROL_PPR_EN 15
|
||||
#define CONTROL_GT_EN 16
|
||||
#define CONTROL_GA_EN 17
|
||||
#define CONTROL_GAM_EN 25
|
||||
#define CONTROL_GALOG_EN 28
|
||||
#define CONTROL_GAINT_EN 29
|
||||
#define CONTROL_XT_EN 50
|
||||
#define CONTROL_INTCAPXT_EN 51
|
||||
#define CONTROL_SNPAVIC_EN 61
|
||||
|
||||
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
|
||||
#define CTRL_INV_TO_NONE 0
|
||||
@@ -445,8 +453,6 @@ struct irq_remap_table {
|
||||
u32 *table;
|
||||
};
|
||||
|
||||
extern struct irq_remap_table **irq_lookup_table;
|
||||
|
||||
/* Interrupt remapping feature used? */
|
||||
extern bool amd_iommu_irq_remap;
|
||||
|
||||
@@ -456,6 +462,16 @@ extern bool amdr_ivrs_remap_support;
|
||||
/* kmem_cache to get tables with 128 byte alignement */
|
||||
extern struct kmem_cache *amd_iommu_irq_cache;
|
||||
|
||||
#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
|
||||
#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
|
||||
#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
|
||||
((devid) & 0xffff))
|
||||
|
||||
/* Make iterating over all pci segment easier */
|
||||
#define for_each_pci_segment(pci_seg) \
|
||||
list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list)
|
||||
#define for_each_pci_segment_safe(pci_seg, next) \
|
||||
list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list)
|
||||
/*
|
||||
* Make iterating over all IOMMUs easier
|
||||
*/
|
||||
@@ -478,13 +494,14 @@ extern struct kmem_cache *amd_iommu_irq_cache;
|
||||
struct amd_iommu_fault {
|
||||
u64 address; /* IO virtual address of the fault*/
|
||||
u32 pasid; /* Address space identifier */
|
||||
u16 device_id; /* Originating PCI device id */
|
||||
u32 sbdf; /* Originating PCI device id */
|
||||
u16 tag; /* PPR tag */
|
||||
u16 flags; /* Fault flags */
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct amd_iommu;
|
||||
struct iommu_domain;
|
||||
struct irq_domain;
|
||||
struct amd_irte_ops;
|
||||
@@ -530,6 +547,75 @@ struct protection_domain {
|
||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure contains information about one PCI segment in the system.
|
||||
*/
|
||||
struct amd_iommu_pci_seg {
|
||||
/* List with all PCI segments in the system */
|
||||
struct list_head list;
|
||||
|
||||
/* List of all available dev_data structures */
|
||||
struct llist_head dev_data_list;
|
||||
|
||||
/* PCI segment number */
|
||||
u16 id;
|
||||
|
||||
/* Largest PCI device id we expect translation requests for */
|
||||
u16 last_bdf;
|
||||
|
||||
/* Size of the device table */
|
||||
u32 dev_table_size;
|
||||
|
||||
/* Size of the alias table */
|
||||
u32 alias_table_size;
|
||||
|
||||
/* Size of the rlookup table */
|
||||
u32 rlookup_table_size;
|
||||
|
||||
/*
|
||||
* device table virtual address
|
||||
*
|
||||
* Pointer to the per PCI segment device table.
|
||||
* It is indexed by the PCI device id or the HT unit id and contains
|
||||
* information about the domain the device belongs to as well as the
|
||||
* page table root pointer.
|
||||
*/
|
||||
struct dev_table_entry *dev_table;
|
||||
|
||||
/*
|
||||
* The rlookup iommu table is used to find the IOMMU which is
|
||||
* responsible for a specific device. It is indexed by the PCI
|
||||
* device id.
|
||||
*/
|
||||
struct amd_iommu **rlookup_table;
|
||||
|
||||
/*
|
||||
* This table is used to find the irq remapping table for a given
|
||||
* device id quickly.
|
||||
*/
|
||||
struct irq_remap_table **irq_lookup_table;
|
||||
|
||||
/*
|
||||
* Pointer to a device table which the content of old device table
|
||||
* will be copied to. It's only be used in kdump kernel.
|
||||
*/
|
||||
struct dev_table_entry *old_dev_tbl_cpy;
|
||||
|
||||
/*
|
||||
* The alias table is a driver specific data structure which contains the
|
||||
* mappings of the PCI device ids to the actual requestor ids on the IOMMU.
|
||||
* More than one device can share the same requestor id.
|
||||
*/
|
||||
u16 *alias_table;
|
||||
|
||||
/*
|
||||
* A list of required unity mappings we find in ACPI. It is not locked
|
||||
* because as runtime it is only read. It is created at ACPI table
|
||||
* parsing time.
|
||||
*/
|
||||
struct list_head unity_map;
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure where we save information about one hardware AMD IOMMU in the
|
||||
* system.
|
||||
@@ -567,6 +653,9 @@ struct amd_iommu {
|
||||
/* Extended features */
|
||||
u64 features;
|
||||
|
||||
/* Extended features 2 */
|
||||
u64 features2;
|
||||
|
||||
/* IOMMUv2 */
|
||||
bool is_iommu_v2;
|
||||
|
||||
@@ -581,7 +670,7 @@ struct amd_iommu {
|
||||
u16 cap_ptr;
|
||||
|
||||
/* pci domain of this IOMMU */
|
||||
u16 pci_seg;
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
|
||||
/* start of exclusion range of that IOMMU */
|
||||
u64 exclusion_start;
|
||||
@@ -666,8 +755,8 @@ struct acpihid_map_entry {
|
||||
struct list_head list;
|
||||
u8 uid[ACPIHID_UID_LEN];
|
||||
u8 hid[ACPIHID_HID_LEN];
|
||||
u16 devid;
|
||||
u16 root_devid;
|
||||
u32 devid;
|
||||
u32 root_devid;
|
||||
bool cmd_line;
|
||||
struct iommu_group *group;
|
||||
};
|
||||
@@ -675,7 +764,7 @@ struct acpihid_map_entry {
|
||||
struct devid_map {
|
||||
struct list_head list;
|
||||
u8 id;
|
||||
u16 devid;
|
||||
u32 devid;
|
||||
bool cmd_line;
|
||||
};
|
||||
|
||||
@@ -689,7 +778,7 @@ struct iommu_dev_data {
|
||||
struct list_head list; /* For domain->dev_list */
|
||||
struct llist_node dev_data_list; /* For global dev_data_list */
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
struct pci_dev *pdev;
|
||||
struct device *dev;
|
||||
u16 devid; /* PCI Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
struct {
|
||||
@@ -709,6 +798,12 @@ extern struct list_head ioapic_map;
|
||||
extern struct list_head hpet_map;
|
||||
extern struct list_head acpihid_map;
|
||||
|
||||
/*
|
||||
* List with all PCI segments in the system. This list is not locked because
|
||||
* it is only written at driver initialization time
|
||||
*/
|
||||
extern struct list_head amd_iommu_pci_seg_list;
|
||||
|
||||
/*
|
||||
* List with all IOMMUs in the system. This list is not locked because it is
|
||||
* only written and read at driver initialization or suspend time
|
||||
@@ -748,39 +843,13 @@ struct unity_map_entry {
|
||||
int prot;
|
||||
};
|
||||
|
||||
/*
|
||||
* List of all unity mappings. It is not locked because as runtime it is only
|
||||
* read. It is created at ACPI table parsing time.
|
||||
*/
|
||||
extern struct list_head amd_iommu_unity_map;
|
||||
|
||||
/*
|
||||
* Data structures for device handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* Device table used by hardware. Read and write accesses by software are
|
||||
* locked with the amd_iommu_pd_table lock.
|
||||
*/
|
||||
extern struct dev_table_entry *amd_iommu_dev_table;
|
||||
|
||||
/*
|
||||
* Alias table to find requestor ids to device ids. Not locked because only
|
||||
* read on runtime.
|
||||
*/
|
||||
extern u16 *amd_iommu_alias_table;
|
||||
|
||||
/*
|
||||
* Reverse lookup table to find the IOMMU which translates a specific device.
|
||||
*/
|
||||
extern struct amd_iommu **amd_iommu_rlookup_table;
|
||||
|
||||
/* size of the dma_ops aperture as power of 2 */
|
||||
extern unsigned amd_iommu_aperture_order;
|
||||
|
||||
/* largest PCI device id we expect translation requests for */
|
||||
extern u16 amd_iommu_last_bdf;
|
||||
|
||||
/* allocation bitmap for domain ids */
|
||||
extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
||||
|
||||
@@ -913,6 +982,7 @@ struct irq_2_irte {
|
||||
|
||||
struct amd_ir_data {
|
||||
u32 cached_ga_tag;
|
||||
struct amd_iommu *iommu;
|
||||
struct irq_2_irte irq_2_irte;
|
||||
struct msi_msg msi_entry;
|
||||
void *entry; /* Pointer to union irte or struct irte_ga */
|
||||
@@ -930,9 +1000,9 @@ struct amd_ir_data {
|
||||
|
||||
struct amd_irte_ops {
|
||||
void (*prepare)(void *, u32, bool, u8, u32, int);
|
||||
void (*activate)(void *, u16, u16);
|
||||
void (*deactivate)(void *, u16, u16);
|
||||
void (*set_affinity)(void *, u16, u16, u8, u32);
|
||||
void (*activate)(struct amd_iommu *iommu, void *, u16, u16);
|
||||
void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16);
|
||||
void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32);
|
||||
void *(*get)(struct irq_remap_table *, int);
|
||||
void (*set_allocated)(struct irq_remap_table *, int);
|
||||
bool (*is_allocated)(struct irq_remap_table *, int);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -258,7 +258,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
||||
__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
|
||||
|
||||
/* pte could have been changed somewhere. */
|
||||
if (cmpxchg64(pte, __pte, __npte) != __pte)
|
||||
if (!try_cmpxchg64(pte, &__pte, __npte))
|
||||
free_page((unsigned long)page);
|
||||
else if (IOMMU_PTE_PRESENT(__pte))
|
||||
*updated = true;
|
||||
@@ -341,10 +341,8 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
|
||||
u64 *pt;
|
||||
int mode;
|
||||
|
||||
while (cmpxchg64(pte, pteval, 0) != pteval) {
|
||||
while (!try_cmpxchg64(pte, &pteval, 0))
|
||||
pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
|
||||
pteval = *pte;
|
||||
}
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(pteval))
|
||||
return;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -51,7 +51,7 @@ struct pasid_state {
|
||||
|
||||
struct device_state {
|
||||
struct list_head list;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
atomic_t count;
|
||||
struct pci_dev *pdev;
|
||||
struct pasid_state **states;
|
||||
@@ -83,35 +83,25 @@ static struct workqueue_struct *iommu_wq;
|
||||
|
||||
static void free_pasid_states(struct device_state *dev_state);
|
||||
|
||||
static u16 device_id(struct pci_dev *pdev)
|
||||
{
|
||||
u16 devid;
|
||||
|
||||
devid = pdev->bus->number;
|
||||
devid = (devid << 8) | pdev->devfn;
|
||||
|
||||
return devid;
|
||||
}
|
||||
|
||||
static struct device_state *__get_device_state(u16 devid)
|
||||
static struct device_state *__get_device_state(u32 sbdf)
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
|
||||
list_for_each_entry(dev_state, &state_list, list) {
|
||||
if (dev_state->devid == devid)
|
||||
if (dev_state->sbdf == sbdf)
|
||||
return dev_state;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct device_state *get_device_state(u16 devid)
|
||||
static struct device_state *get_device_state(u32 sbdf)
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
dev_state = __get_device_state(devid);
|
||||
dev_state = __get_device_state(sbdf);
|
||||
if (dev_state != NULL)
|
||||
atomic_inc(&dev_state->count);
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
@@ -528,15 +518,16 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
||||
unsigned long flags;
|
||||
struct fault *fault;
|
||||
bool finish;
|
||||
u16 tag, devid;
|
||||
u16 tag, devid, seg_id;
|
||||
int ret;
|
||||
|
||||
iommu_fault = data;
|
||||
tag = iommu_fault->tag & 0x1ff;
|
||||
finish = (iommu_fault->tag >> 9) & 1;
|
||||
|
||||
devid = iommu_fault->device_id;
|
||||
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
|
||||
seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
|
||||
devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
|
||||
pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
|
||||
devid & 0xff);
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
@@ -550,7 +541,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_state = get_device_state(iommu_fault->device_id);
|
||||
dev_state = get_device_state(iommu_fault->sbdf);
|
||||
if (dev_state == NULL)
|
||||
goto out;
|
||||
|
||||
@@ -609,7 +600,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
|
||||
struct pasid_state *pasid_state;
|
||||
struct device_state *dev_state;
|
||||
struct mm_struct *mm;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
@@ -617,8 +608,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
|
||||
if (!amd_iommu_v2_supported())
|
||||
return -ENODEV;
|
||||
|
||||
devid = device_id(pdev);
|
||||
dev_state = get_device_state(devid);
|
||||
sbdf = get_pci_sbdf_id(pdev);
|
||||
dev_state = get_device_state(sbdf);
|
||||
|
||||
if (dev_state == NULL)
|
||||
return -EINVAL;
|
||||
@@ -692,15 +683,15 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
|
||||
{
|
||||
struct pasid_state *pasid_state;
|
||||
struct device_state *dev_state;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!amd_iommu_v2_supported())
|
||||
return;
|
||||
|
||||
devid = device_id(pdev);
|
||||
dev_state = get_device_state(devid);
|
||||
sbdf = get_pci_sbdf_id(pdev);
|
||||
dev_state = get_device_state(sbdf);
|
||||
if (dev_state == NULL)
|
||||
return;
|
||||
|
||||
@@ -742,7 +733,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||||
struct iommu_group *group;
|
||||
unsigned long flags;
|
||||
int ret, tmp;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
|
||||
might_sleep();
|
||||
|
||||
@@ -759,7 +750,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||||
if (pasids <= 0 || pasids > (PASID_MASK + 1))
|
||||
return -EINVAL;
|
||||
|
||||
devid = device_id(pdev);
|
||||
sbdf = get_pci_sbdf_id(pdev);
|
||||
|
||||
dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
|
||||
if (dev_state == NULL)
|
||||
@@ -768,7 +759,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||||
spin_lock_init(&dev_state->lock);
|
||||
init_waitqueue_head(&dev_state->wq);
|
||||
dev_state->pdev = pdev;
|
||||
dev_state->devid = devid;
|
||||
dev_state->sbdf = sbdf;
|
||||
|
||||
tmp = pasids;
|
||||
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
|
||||
@@ -806,7 +797,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
if (__get_device_state(devid) != NULL) {
|
||||
if (__get_device_state(sbdf) != NULL) {
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
ret = -EBUSY;
|
||||
goto out_free_domain;
|
||||
@@ -838,16 +829,16 @@ void amd_iommu_free_device(struct pci_dev *pdev)
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
unsigned long flags;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
|
||||
if (!amd_iommu_v2_supported())
|
||||
return;
|
||||
|
||||
devid = device_id(pdev);
|
||||
sbdf = get_pci_sbdf_id(pdev);
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
dev_state = __get_device_state(devid);
|
||||
dev_state = __get_device_state(sbdf);
|
||||
if (dev_state == NULL) {
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
return;
|
||||
@@ -867,18 +858,18 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
unsigned long flags;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
int ret;
|
||||
|
||||
if (!amd_iommu_v2_supported())
|
||||
return -ENODEV;
|
||||
|
||||
devid = device_id(pdev);
|
||||
sbdf = get_pci_sbdf_id(pdev);
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
ret = -EINVAL;
|
||||
dev_state = __get_device_state(devid);
|
||||
dev_state = __get_device_state(sbdf);
|
||||
if (dev_state == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
@@ -898,18 +889,18 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
unsigned long flags;
|
||||
u16 devid;
|
||||
u32 sbdf;
|
||||
int ret;
|
||||
|
||||
if (!amd_iommu_v2_supported())
|
||||
return -ENODEV;
|
||||
|
||||
devid = device_id(pdev);
|
||||
sbdf = get_pci_sbdf_id(pdev);
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
ret = -EINVAL;
|
||||
dev_state = __get_device_state(devid);
|
||||
dev_state = __get_device_state(sbdf);
|
||||
if (dev_state == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
struct ivrs_quirk_entry {
|
||||
u8 id;
|
||||
u16 devid;
|
||||
u32 devid;
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -49,7 +49,7 @@ static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
|
||||
const struct ivrs_quirk_entry *i;
|
||||
|
||||
for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
|
||||
add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
|
||||
add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -564,9 +564,6 @@ static void apple_dart_release_device(struct device *dev)
|
||||
{
|
||||
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
dev_iommu_priv_set(dev, NULL);
|
||||
kfree(cfg);
|
||||
}
|
||||
@@ -771,7 +768,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
|
||||
.of_xlate = apple_dart_of_xlate,
|
||||
.def_domain_type = apple_dart_def_domain_type,
|
||||
.get_resv_regions = apple_dart_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
|
||||
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
|
||||
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
|
||||
}
|
||||
|
||||
static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
|
||||
static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 val = STRTAB_STE_0_V;
|
||||
|
||||
if (disable_bypass && !force)
|
||||
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
|
||||
else
|
||||
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
|
||||
|
||||
for (i = 0; i < nent; ++i) {
|
||||
arm_smmu_write_strtab_ent(NULL, -1, strtab);
|
||||
strtab[0] = cpu_to_le64(val);
|
||||
strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
|
||||
STRTAB_STE_1_SHCFG_INCOMING));
|
||||
strtab[2] = 0;
|
||||
strtab += STRTAB_STE_DWORDS;
|
||||
}
|
||||
}
|
||||
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
|
||||
arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
|
||||
arm_smmu_write_strtab_l1_desc(strtab, desc);
|
||||
return 0;
|
||||
}
|
||||
@@ -2537,6 +2546,19 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
|
||||
return sid < limit;
|
||||
}
|
||||
|
||||
static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
|
||||
{
|
||||
/* Check the SIDs are in range of the SMMU and our stream table */
|
||||
if (!arm_smmu_sid_in_range(smmu, sid))
|
||||
return -ERANGE;
|
||||
|
||||
/* Ensure l2 strtab is initialised */
|
||||
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
|
||||
return arm_smmu_init_l2_strtab(smmu, sid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_master *master)
|
||||
{
|
||||
@@ -2560,20 +2582,9 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
|
||||
new_stream->id = sid;
|
||||
new_stream->master = master;
|
||||
|
||||
/*
|
||||
* Check the SIDs are in range of the SMMU and our stream table
|
||||
*/
|
||||
if (!arm_smmu_sid_in_range(smmu, sid)) {
|
||||
ret = -ERANGE;
|
||||
ret = arm_smmu_init_sid_strtab(smmu, sid);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure l2 strtab is initialised */
|
||||
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
|
||||
ret = arm_smmu_init_l2_strtab(smmu, sid);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Insert into SID tree */
|
||||
new_node = &(smmu->streams.rb_node);
|
||||
@@ -2691,20 +2702,14 @@ err_free_master:
|
||||
|
||||
static void arm_smmu_release_device(struct device *dev)
|
||||
{
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct arm_smmu_master *master;
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||
return;
|
||||
|
||||
master = dev_iommu_priv_get(dev);
|
||||
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
|
||||
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
|
||||
arm_smmu_detach_dev(master);
|
||||
arm_smmu_disable_pasid(master);
|
||||
arm_smmu_remove_master(master);
|
||||
kfree(master);
|
||||
iommu_fwspec_free(dev);
|
||||
}
|
||||
|
||||
static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
||||
@@ -2760,58 +2765,27 @@ static void arm_smmu_get_resv_regions(struct device *dev,
|
||||
iommu_dma_get_resv_regions(dev, head);
|
||||
}
|
||||
|
||||
static bool arm_smmu_dev_has_feature(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!master)
|
||||
return false;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
return arm_smmu_master_iopf_supported(master);
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
return arm_smmu_master_sva_supported(master);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool arm_smmu_dev_feature_enabled(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!master)
|
||||
return false;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
return master->iopf_enabled;
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
return arm_smmu_master_sva_enabled(master);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int arm_smmu_dev_enable_feature(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!arm_smmu_dev_has_feature(dev, feat))
|
||||
if (!master)
|
||||
return -ENODEV;
|
||||
|
||||
if (arm_smmu_dev_feature_enabled(dev, feat))
|
||||
return -EBUSY;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
if (!arm_smmu_master_iopf_supported(master))
|
||||
return -EINVAL;
|
||||
if (master->iopf_enabled)
|
||||
return -EBUSY;
|
||||
master->iopf_enabled = true;
|
||||
return 0;
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
if (!arm_smmu_master_sva_supported(master))
|
||||
return -EINVAL;
|
||||
if (arm_smmu_master_sva_enabled(master))
|
||||
return -EBUSY;
|
||||
return arm_smmu_master_enable_sva(master);
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -2823,16 +2797,20 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
|
||||
{
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
if (!arm_smmu_dev_feature_enabled(dev, feat))
|
||||
if (!master)
|
||||
return -EINVAL;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
if (!master->iopf_enabled)
|
||||
return -EINVAL;
|
||||
if (master->sva_enabled)
|
||||
return -EBUSY;
|
||||
master->iopf_enabled = false;
|
||||
return 0;
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
if (!arm_smmu_master_sva_enabled(master))
|
||||
return -EINVAL;
|
||||
return arm_smmu_master_disable_sva(master);
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -2847,9 +2825,6 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.device_group = arm_smmu_device_group,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.dev_has_feat = arm_smmu_dev_has_feature,
|
||||
.dev_feat_enabled = arm_smmu_dev_feature_enabled,
|
||||
.dev_enable_feat = arm_smmu_dev_enable_feature,
|
||||
.dev_disable_feat = arm_smmu_dev_disable_feature,
|
||||
.sva_bind = arm_smmu_sva_bind,
|
||||
@@ -3049,7 +3024,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
|
||||
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
|
||||
cfg->strtab_base_cfg = reg;
|
||||
|
||||
arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
|
||||
arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3743,6 +3718,36 @@ static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
|
||||
return devm_ioremap_resource(dev, &res);
|
||||
}
|
||||
|
||||
static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
|
||||
{
|
||||
struct list_head rmr_list;
|
||||
struct iommu_resv_region *e;
|
||||
|
||||
INIT_LIST_HEAD(&rmr_list);
|
||||
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
|
||||
|
||||
list_for_each_entry(e, &rmr_list, list) {
|
||||
__le64 *step;
|
||||
struct iommu_iort_rmr_data *rmr;
|
||||
int ret, i;
|
||||
|
||||
rmr = container_of(e, struct iommu_iort_rmr_data, rr);
|
||||
for (i = 0; i < rmr->num_sids; i++) {
|
||||
ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
|
||||
if (ret) {
|
||||
dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
|
||||
rmr->sids[i]);
|
||||
continue;
|
||||
}
|
||||
|
||||
step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
|
||||
arm_smmu_init_bypass_stes(step, 1, true);
|
||||
}
|
||||
}
|
||||
|
||||
iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
|
||||
}
|
||||
|
||||
static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
int irq, ret;
|
||||
@@ -3826,6 +3831,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
/* Record our private device structure */
|
||||
platform_set_drvdata(pdev, smmu);
|
||||
|
||||
/* Check for RMRs and install bypass STEs if any */
|
||||
arm_smmu_rmr_install_bypass_ste(smmu);
|
||||
|
||||
/* Reset the device */
|
||||
ret = arm_smmu_device_reset(smmu, bypass);
|
||||
if (ret)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user