mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
"Core:
- IOMMU memory usage observability - This will make the memory used
for IO page tables explicitly visible.
- Simplify arch_setup_dma_ops()
Intel VT-d:
- Consolidate domain cache invalidation
- Remove private data from page fault message
- Allocate DMAR fault interrupts locally
- Cleanup and refactoring
ARM-SMMUv2:
- Support for fault debugging hardware on Qualcomm implementations
- Re-land support for the ->domain_alloc_paging() callback
ARM-SMMUv3:
- Improve handling of MSI allocation failure
- Drop support for the "disable_bypass" cmdline option
- Major rework of the CD creation code, following on directly from
the STE rework merged last time around.
- Add unit tests for the new STE/CD manipulation logic
AMD-Vi:
- Final part of SVA changes with generic IO page fault handling
Renesas IPMMU:
- Add support for R8A779H0 hardware
... and a couple smaller fixes and updates across the sub-tree"
* tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (80 commits)
iommu/arm-smmu-v3: Make the kunit into a module
arm64: Properly clean up iommu-dma remnants
iommu/amd: Enable Guest Translation after reading IOMMU feature register
iommu/vt-d: Decouple igfx_off from graphic identity mapping
iommu/amd: Fix compilation error
iommu/arm-smmu-v3: Add unit tests for arm_smmu_write_entry
iommu/arm-smmu-v3: Build the whole CD in arm_smmu_make_s1_cd()
iommu/arm-smmu-v3: Move the CD generation for SVA into a function
iommu/arm-smmu-v3: Allocate the CD table entry in advance
iommu/arm-smmu-v3: Make arm_smmu_alloc_cd_ptr()
iommu/arm-smmu-v3: Consolidate clearing a CD table entry
iommu/arm-smmu-v3: Move the CD generation for S1 domains into a function
iommu/arm-smmu-v3: Make CD programming use arm_smmu_write_entry()
iommu/arm-smmu-v3: Add an ops indirection to the STE code
iommu/arm-smmu-qcom: Don't build debug features as a kernel module
iommu/amd: Add SVA domain support
iommu: Add ops->domain_alloc_sva()
iommu/amd: Initial SVA support for AMD IOMMU
iommu/amd: Add support for enable/disable IOPF
iommu/amd: Add IO page fault notifier handler
...
This commit is contained in:
@@ -1435,7 +1435,7 @@ PAGE_SIZE multiple when read back.
|
||||
sec_pagetables
|
||||
Amount of memory allocated for secondary page tables,
|
||||
this currently includes KVM mmu allocations on x86
|
||||
and arm64.
|
||||
and arm64 and IOMMU page tables.
|
||||
|
||||
percpu (npn)
|
||||
Amount of memory used for storing per-cpu kernel
|
||||
|
||||
69
Documentation/devicetree/bindings/iommu/qcom,tbu.yaml
Normal file
69
Documentation/devicetree/bindings/iommu/qcom,tbu.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/iommu/qcom,tbu.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcomm TBU (Translation Buffer Unit)
|
||||
|
||||
maintainers:
|
||||
- Georgi Djakov <quic_c_gdjako@quicinc.com>
|
||||
|
||||
description:
|
||||
The Qualcomm SMMU500 implementation consists of TCU and TBU. The TBU contains
|
||||
a Translation Lookaside Buffer (TLB) that caches page tables. TBUs provides
|
||||
debug features to trace and trigger debug transactions. There are multiple TBU
|
||||
instances with each client core.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sc7280-tbu
|
||||
- qcom,sdm845-tbu
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
interconnects:
|
||||
maxItems: 1
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
qcom,stream-id-range:
|
||||
description: |
|
||||
Phandle of a SMMU device and Stream ID range (address and size) that
|
||||
is assigned by the TBU
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
items:
|
||||
- items:
|
||||
- description: phandle of a smmu node
|
||||
- description: stream id base address
|
||||
- description: stream id size
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- qcom,stream-id-range
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
|
||||
#include <dt-bindings/interconnect/qcom,icc.h>
|
||||
#include <dt-bindings/interconnect/qcom,sdm845.h>
|
||||
|
||||
tbu@150e1000 {
|
||||
compatible = "qcom,sdm845-tbu";
|
||||
reg = <0x150e1000 0x1000>;
|
||||
clocks = <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
|
||||
interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
|
||||
&config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
|
||||
power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC>;
|
||||
qcom,stream-id-range = <&apps_smmu 0x1c00 0x400>;
|
||||
};
|
||||
...
|
||||
@@ -50,6 +50,7 @@ properties:
|
||||
- renesas,ipmmu-r8a779a0 # R-Car V3U
|
||||
- renesas,ipmmu-r8a779f0 # R-Car S4-8
|
||||
- renesas,ipmmu-r8a779g0 # R-Car V4H
|
||||
- renesas,ipmmu-r8a779h0 # R-Car V4M
|
||||
- const: renesas,rcar-gen4-ipmmu-vmsa # R-Car Gen4
|
||||
|
||||
reg:
|
||||
|
||||
@@ -1110,8 +1110,8 @@ KernelStack
|
||||
PageTables
|
||||
Memory consumed by userspace page tables
|
||||
SecPageTables
|
||||
Memory consumed by secondary page tables, this currently
|
||||
currently includes KVM mmu allocations on x86 and arm64.
|
||||
Memory consumed by secondary page tables, this currently includes
|
||||
KVM mmu and IOMMU allocations on x86 and arm64.
|
||||
NFS_Unstable
|
||||
Always zero. Previous counted pages which had been written to
|
||||
the server, but has not been committed to stable storage.
|
||||
|
||||
@@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
/*
|
||||
* Plug in direct dma map ops.
|
||||
*/
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
/*
|
||||
* IOC hardware snoops all DMA traffic keeping the caches consistent
|
||||
|
||||
@@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
||||
/*
|
||||
|
||||
@@ -1709,11 +1709,15 @@ void arm_iommu_detach_device(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
u64 dma_base = 0, size = 1ULL << 32;
|
||||
|
||||
if (dev->dma_range_map) {
|
||||
dma_base = dma_range_map_min(dev->dma_range_map);
|
||||
size = dma_range_map_max(dev->dma_range_map) - dma_base;
|
||||
}
|
||||
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
||||
if (IS_ERR(mapping)) {
|
||||
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
||||
@@ -1744,8 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||
|
||||
#else
|
||||
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1753,8 +1756,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
|
||||
|
||||
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
/*
|
||||
* Due to legacy code that sets the ->dma_coherent flag from a bus
|
||||
@@ -1774,7 +1776,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
return;
|
||||
|
||||
if (device_iommu_mapped(dev))
|
||||
arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
|
||||
arm_setup_iommu_dma_ops(dev);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
dev->archdata.dma_ops_setup = true;
|
||||
|
||||
@@ -46,7 +46,6 @@ config ARM64
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYSCALL_WRAPPER
|
||||
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAS_ZONE_DMA_SET if EXPERT
|
||||
select ARCH_HAVE_ELF_PROT
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
@@ -39,15 +38,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
dcache_clean_poc(start, start + size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
{
|
||||
dev->dma_ops = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
int cls = cache_line_size_of_cpu();
|
||||
|
||||
@@ -58,8 +49,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
ARCH_DMA_MINALIGN, cls);
|
||||
|
||||
dev->dma_coherent = coherent;
|
||||
if (device_iommu_mapped(dev))
|
||||
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
}
|
||||
|
||||
@@ -8,17 +8,12 @@
|
||||
void acpi_arch_dma_setup(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
u64 mask, end = 0;
|
||||
u64 mask, end;
|
||||
const struct bus_dma_region *map = NULL;
|
||||
|
||||
ret = acpi_dma_get_range(dev, &map);
|
||||
if (!ret && map) {
|
||||
const struct bus_dma_region *r = map;
|
||||
|
||||
for (end = 0; r->size; r++) {
|
||||
if (r->dma_start + r->size - 1 > end)
|
||||
end = r->dma_start + r->size - 1;
|
||||
}
|
||||
end = dma_range_map_max(map);
|
||||
|
||||
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
||||
dev->bus_dma_limit = end;
|
||||
|
||||
@@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
dev->dma_coherent = coherent;
|
||||
}
|
||||
|
||||
@@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
|
||||
TAINT_CPU_OUT_OF_SPEC,
|
||||
|
||||
@@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
u64 end, mask;
|
||||
u64 size = 0;
|
||||
const struct bus_dma_region *map = NULL;
|
||||
|
||||
/*
|
||||
@@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev)
|
||||
}
|
||||
|
||||
if (dev->coherent_dma_mask)
|
||||
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||
end = dev->coherent_dma_mask;
|
||||
else
|
||||
size = 1ULL << 32;
|
||||
end = (1ULL << 32) - 1;
|
||||
|
||||
ret = acpi_dma_get_range(dev, &map);
|
||||
if (!ret && map) {
|
||||
const struct bus_dma_region *r = map;
|
||||
|
||||
for (end = 0; r->size; r++) {
|
||||
if (r->dma_start + r->size - 1 > end)
|
||||
end = r->dma_start + r->size - 1;
|
||||
}
|
||||
|
||||
size = end + 1;
|
||||
end = dma_range_map_max(map);
|
||||
dev->dma_range_map = map;
|
||||
}
|
||||
|
||||
if (ret == -ENODEV)
|
||||
ret = iort_dma_get_ranges(dev, &size);
|
||||
ret = iort_dma_get_ranges(dev, &end);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Limit coherent and dma mask based on size retrieved from
|
||||
* firmware.
|
||||
*/
|
||||
end = size - 1;
|
||||
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
||||
dev->bus_dma_limit = end;
|
||||
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
|
||||
|
||||
@@ -1367,7 +1367,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
|
||||
{ return -ENODEV; }
|
||||
#endif
|
||||
|
||||
static int nc_dma_get_range(struct device *dev, u64 *size)
|
||||
static int nc_dma_get_range(struct device *dev, u64 *limit)
|
||||
{
|
||||
struct acpi_iort_node *node;
|
||||
struct acpi_iort_named_component *ncomp;
|
||||
@@ -1384,13 +1384,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<ncomp->memory_address_limit;
|
||||
*limit = ncomp->memory_address_limit >= 64 ? U64_MAX :
|
||||
(1ULL << ncomp->memory_address_limit) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
static int rc_dma_get_range(struct device *dev, u64 *limit)
|
||||
{
|
||||
struct acpi_iort_node *node;
|
||||
struct acpi_iort_root_complex *rc;
|
||||
@@ -1408,8 +1408,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*size = rc->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<rc->memory_address_limit;
|
||||
*limit = rc->memory_address_limit >= 64 ? U64_MAX :
|
||||
(1ULL << rc->memory_address_limit) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1417,16 +1417,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
/**
|
||||
* iort_dma_get_ranges() - Look up DMA addressing limit for the device
|
||||
* @dev: device to lookup
|
||||
* @size: DMA range size result pointer
|
||||
* @limit: DMA limit result pointer
|
||||
*
|
||||
* Return: 0 on success, an error otherwise.
|
||||
*/
|
||||
int iort_dma_get_ranges(struct device *dev, u64 *size)
|
||||
int iort_dma_get_ranges(struct device *dev, u64 *limit)
|
||||
{
|
||||
if (dev_is_pci(dev))
|
||||
return rc_dma_get_range(dev, size);
|
||||
return rc_dma_get_range(dev, limit);
|
||||
else
|
||||
return nc_dma_get_range(dev, size);
|
||||
return nc_dma_get_range(dev, limit);
|
||||
}
|
||||
|
||||
static void __init acpi_iort_register_irq(int hwirq, const char *name,
|
||||
|
||||
@@ -1675,12 +1675,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/*
|
||||
* Historically this routine doesn't fail driver probing due to errors
|
||||
* in acpi_iommu_configure_id().
|
||||
*/
|
||||
|
||||
arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
|
||||
arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -561,11 +561,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
|
||||
|
||||
void hv_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
/*
|
||||
* Hyper-V does not offer a vIOMMU in the guest
|
||||
* VM, so pass 0/NULL for the IOMMU settings
|
||||
*/
|
||||
arch_setup_dma_ops(dev, 0, 0, coherent);
|
||||
arch_setup_dma_ops(dev, coherent);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
|
||||
|
||||
|
||||
@@ -376,13 +376,17 @@ config ARM_SMMU_QCOM
|
||||
|
||||
config ARM_SMMU_QCOM_DEBUG
|
||||
bool "ARM SMMU QCOM implementation defined debug support"
|
||||
depends on ARM_SMMU_QCOM
|
||||
depends on ARM_SMMU_QCOM=y
|
||||
help
|
||||
Support for implementation specific debug features in ARM SMMU
|
||||
hardware found in QTI platforms.
|
||||
hardware found in QTI platforms. This include support for
|
||||
the Translation Buffer Units (TBU) that can be used to obtain
|
||||
additional information when debugging memory management issues
|
||||
like context faults.
|
||||
|
||||
Say Y here to enable debug for issues such as TLB sync timeouts
|
||||
which requires implementation defined register dumps.
|
||||
Say Y here to enable debug for issues such as context faults
|
||||
or TLB sync timeouts which requires implementation defined
|
||||
register dumps.
|
||||
|
||||
config ARM_SMMU_V3
|
||||
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
|
||||
@@ -397,9 +401,9 @@ config ARM_SMMU_V3
|
||||
Say Y here if your system includes an IOMMU device implementing
|
||||
the ARM SMMUv3 architecture.
|
||||
|
||||
if ARM_SMMU_V3
|
||||
config ARM_SMMU_V3_SVA
|
||||
bool "Shared Virtual Addressing support for the ARM SMMUv3"
|
||||
depends on ARM_SMMU_V3
|
||||
select IOMMU_SVA
|
||||
select IOMMU_IOPF
|
||||
select MMU_NOTIFIER
|
||||
@@ -410,6 +414,17 @@ config ARM_SMMU_V3_SVA
|
||||
Say Y here if your system supports SVA extensions such as PCIe PASID
|
||||
and PRI.
|
||||
|
||||
config ARM_SMMU_V3_KUNIT_TEST
|
||||
tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
|
||||
depends on KUNIT
|
||||
depends on ARM_SMMU_V3_SVA
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
Enable this option to unit-test arm-smmu-v3 driver functions.
|
||||
|
||||
If unsure, say N.
|
||||
endif
|
||||
|
||||
config S390_IOMMU
|
||||
def_bool y if S390 && PCI
|
||||
depends on S390 && PCI
|
||||
|
||||
@@ -7,9 +7,12 @@ config AMD_IOMMU
|
||||
select PCI_ATS
|
||||
select PCI_PRI
|
||||
select PCI_PASID
|
||||
select MMU_NOTIFIER
|
||||
select IOMMU_API
|
||||
select IOMMU_IOVA
|
||||
select IOMMU_IO_PGTABLE
|
||||
select IOMMU_SVA
|
||||
select IOMMU_IOPF
|
||||
select IOMMUFD_DRIVER if IOMMUFD
|
||||
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
|
||||
help
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
|
||||
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
|
||||
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
|
||||
|
||||
@@ -17,10 +17,16 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
|
||||
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
|
||||
u8 cntrl_intr, u8 cntrl_log,
|
||||
u32 status_run_mask, u32 status_overflow_mask);
|
||||
void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
|
||||
void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
|
||||
void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
|
||||
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
|
||||
gfp_t gfp, size_t size);
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
||||
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
|
||||
@@ -33,22 +39,47 @@ int amd_iommu_prepare(void);
|
||||
int amd_iommu_enable(void);
|
||||
void amd_iommu_disable(void);
|
||||
int amd_iommu_reenable(int mode);
|
||||
int amd_iommu_enable_faulting(void);
|
||||
int amd_iommu_enable_faulting(unsigned int cpu);
|
||||
extern int amd_iommu_guest_ir;
|
||||
extern enum io_pgtable_fmt amd_iommu_pgtable;
|
||||
extern int amd_iommu_gpt_level;
|
||||
|
||||
bool amd_iommu_v2_supported(void);
|
||||
/* Protection domain ops */
|
||||
struct protection_domain *protection_domain_alloc(unsigned int type);
|
||||
void protection_domain_free(struct protection_domain *domain);
|
||||
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
void amd_iommu_domain_free(struct iommu_domain *dom);
|
||||
int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
|
||||
struct iommu_domain *domain);
|
||||
|
||||
/* Device capabilities */
|
||||
int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
|
||||
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
|
||||
/* SVA/PASID */
|
||||
bool amd_iommu_pasid_supported(void);
|
||||
|
||||
/* IOPF */
|
||||
int amd_iommu_iopf_init(struct amd_iommu *iommu);
|
||||
void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
|
||||
void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
struct iommu_page_response *resp);
|
||||
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data);
|
||||
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data);
|
||||
|
||||
/* GCR3 setup */
|
||||
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
|
||||
ioasid_t pasid, unsigned long gcr3);
|
||||
int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
|
||||
|
||||
/* PPR */
|
||||
int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu);
|
||||
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_enable_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_poll_ppr_log(struct amd_iommu *iommu);
|
||||
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
|
||||
|
||||
/*
|
||||
* This function flushes all internal caches of
|
||||
* the IOMMU used by this driver.
|
||||
@@ -56,6 +87,7 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
|
||||
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
|
||||
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
|
||||
void amd_iommu_domain_update(struct protection_domain *domain);
|
||||
void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set);
|
||||
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
|
||||
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
|
||||
u64 address, size_t size);
|
||||
@@ -73,9 +105,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
|
||||
}
|
||||
#endif
|
||||
|
||||
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
|
||||
int status, int tag);
|
||||
|
||||
static inline bool is_rd890_iommu(struct pci_dev *pdev)
|
||||
{
|
||||
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
|
||||
@@ -134,14 +163,6 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
|
||||
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
|
||||
}
|
||||
|
||||
static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
|
||||
return page ? page_address(page) : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called after device probe completes. During probe
|
||||
* use rlookup_amd_iommu() get the iommu.
|
||||
@@ -157,6 +178,11 @@ static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_dat
|
||||
return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
|
||||
}
|
||||
|
||||
static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||
{
|
||||
return container_of(dom, struct protection_domain, domain);
|
||||
}
|
||||
|
||||
bool translation_pre_enabled(struct amd_iommu *iommu);
|
||||
bool amd_iommu_is_attach_deferred(struct device *dev);
|
||||
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user