Merge tag 'iommu-updates-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - Consolidate iommu_map/unmap functions.

   There have been blocking and atomic variants so far, but that was
   problematic as this approach does not scale with required new
   variants which just differ in the GFP flags used. So Jason
   consolidated this back into single functions that take a GFP
   parameter.

 - Retire the detach_dev() call-back in iommu_ops

 - Arm SMMU updates from Will:
     - Device-tree binding updates:
         - Cater for three power domains on SM6375
         - Document existing compatible strings for Qualcomm SoCs
         - Tighten up clocks description for platform-specific
           compatible strings
     - Enable Qualcomm workarounds for some additional platforms that
       need them

 - Intel VT-d updates from Lu Baolu:
     - Add Intel IOMMU performance monitoring support
     - Set No Execute Enable bit in PASID table entry
     - Two performance optimizations
     - Fix PASID directory pointer coherency
     - Fix missed rollbacks in error path
     - Cleanups

 - Apple t8110 DART support

 - Exynos IOMMU:
     - Implement better fault handling
     - Error handling fixes

 - Renesas IPMMU:
     - Add device tree bindings for r8a779g0

 - AMD IOMMU:
     - Various fixes for handling on SNP-enabled systems and
       handling of faults with unknown request-ids
     - Cleanups and other small fixes

 - Various other smaller fixes and cleanups

* tag 'iommu-updates-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (71 commits)
  iommu/amd: Skip attach device domain is same as new domain
  iommu: Attach device group to old domain in error path
  iommu/vt-d: Allow to use flush-queue when first level is default
  iommu/vt-d: Fix PASID directory pointer coherency
  iommu/vt-d: Avoid superfluous IOTLB tracking in lazy mode
  iommu/vt-d: Fix error handling in sva enable/disable paths
  iommu/amd: Improve page fault error reporting
  iommu/amd: Do not identity map v2 capable device when snp is enabled
  iommu: Fix error unwind in iommu_group_alloc()
  iommu/of: mark an unused function as __maybe_unused
  iommu: dart: DART_T8110_ERROR range should be 0 to 5
  iommu/vt-d: Enable IOMMU perfmon support
  iommu/vt-d: Add IOMMU perfmon overflow handler support
  iommu/vt-d: Support cpumask for IOMMU perfmon
  iommu/vt-d: Add IOMMU perfmon support
  iommu/vt-d: Support Enhanced Command Interface
  iommu/vt-d: Retrieve IOMMU perfmon capability information
  iommu/vt-d: Support size of the register set in DRHD
  iommu/vt-d: Set No Execute Enable bit in PASID table entry
  iommu/vt-d: Remove sva from intel_svm_dev
  ...
This commit is contained in:
Linus Torvalds
2023-02-24 13:40:13 -08:00
63 changed files with 2488 additions and 641 deletions

View File

@@ -0,0 +1,37 @@
What: /sys/bus/event_source/devices/dmar*/format
Date: Jan 2023
KernelVersion: 6.3
Contact: Kan Liang <kan.liang@linux.intel.com>
Description: Read-only. Attribute group to describe the magic bits
that go into perf_event_attr.config,
perf_event_attr.config1 or perf_event_attr.config2 for
the IOMMU pmu. (See also
ABI/testing/sysfs-bus-event_source-devices-format).
Each attribute in this group defines a bit range in
perf_event_attr.config, perf_event_attr.config1,
or perf_event_attr.config2. All supported attributes
are listed below (See the VT-d Spec 4.0 for possible
attribute values)::
event = "config:0-27" - event ID
event_group = "config:28-31" - event group ID
filter_requester_en = "config1:0" - Enable Requester ID filter
filter_domain_en = "config1:1" - Enable Domain ID filter
filter_pasid_en = "config1:2" - Enable PASID filter
filter_ats_en = "config1:3" - Enable Address Type filter
filter_page_table_en= "config1:4" - Enable Page Table Level filter
filter_requester_id = "config1:16-31" - Requester ID filter
filter_domain = "config1:32-47" - Domain ID filter
filter_pasid = "config2:0-21" - PASID filter
filter_ats = "config2:24-28" - Address Type filter
filter_page_table = "config2:32-36" - Page Table Level filter
What: /sys/bus/event_source/devices/dmar*/cpumask
Date: Jan 2023
KernelVersion: 6.3
Contact: Kan Liang <kan.liang@linux.intel.com>
Description: Read-only. This file always returns the CPU to which the
IOMMU pmu is bound for access to all IOMMU pmu performance
monitoring events.

View File

@@ -24,6 +24,7 @@ properties:
compatible:
enum:
- apple,t8103-dart
- apple,t8110-dart
- apple,t6000-dart
reg:

View File

@@ -36,13 +36,17 @@ properties:
- enum:
- qcom,qcm2290-smmu-500
- qcom,qdu1000-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7180-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8180x-smmu-500
- qcom,sc8280xp-smmu-500
- qcom,sdm670-smmu-500
- qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500
- qcom,sm6115-smmu-500
- qcom,sm6125-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm6375-smmu-500
- qcom,sm8150-smmu-500
@@ -52,14 +56,6 @@ properties:
- const: qcom,smmu-500
- const: arm,mmu-500
- description: Qcom SoCs implementing "arm,mmu-500" (non-qcom implementation)
deprecated: true
items:
- enum:
- qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500
- const: arm,mmu-500
- description: Qcom SoCs implementing "arm,mmu-500" (legacy binding)
deprecated: true
items:
@@ -84,6 +80,7 @@ properties:
items:
- enum:
- qcom,sc7280-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
- const: qcom,adreno-smmu
- const: arm,mmu-500
@@ -201,7 +198,8 @@ properties:
maxItems: 7
power-domains:
maxItems: 1
minItems: 1
maxItems: 3
nvidia,memory-controller:
description: |
@@ -366,6 +364,56 @@ allOf:
- description: interface clock required to access smmu's registers
through the TCU's programming interface.
# Disallow clocks for all other platforms with specific compatibles
- if:
properties:
compatible:
contains:
enum:
- cavium,smmu-v2
- marvell,ap806-smmu-500
- nvidia,smmu-500
- qcom,qcm2290-smmu-500
- qcom,qdu1000-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7180-smmu-500
- qcom,sc8180x-smmu-500
- qcom,sc8280xp-smmu-500
- qcom,sdm670-smmu-500
- qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500
- qcom,sm6115-smmu-500
- qcom,sm6125-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm6375-smmu-500
- qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
then:
properties:
clock-names: false
clocks: false
- if:
properties:
compatible:
contains:
const: qcom,sm6375-smmu-500
then:
properties:
power-domains:
items:
- description: SNoC MMU TBU RT GDSC
- description: SNoC MMU TBU NRT GDSC
- description: SNoC TURING MMU TBU0 GDSC
required:
- power-domains
else:
properties:
power-domains:
maxItems: 1
examples:
- |+
/* SMMU with stream matching or stream indexing */

View File

@@ -10,6 +10,7 @@ to non-secure vs secure interrupt line.
- compatible : Should be one of:
"qcom,msm8916-iommu"
"qcom,msm8953-iommu"
Followed by "qcom,msm-iommu-v1".

View File

@@ -49,6 +49,7 @@ properties:
- enum:
- renesas,ipmmu-r8a779a0 # R-Car V3U
- renesas,ipmmu-r8a779f0 # R-Car S4-8
- renesas,ipmmu-r8a779g0 # R-Car V4H
- const: renesas,rcar-gen4-ipmmu-vmsa # R-Car Gen4
reg:

View File

@@ -52,6 +52,30 @@ properties:
Address and Length pairs. Specifies regions of memory that are
acceptable to allocate from.
iommu-addresses:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: >
A list of phandle and specifier pairs that describe static IO virtual
address space mappings and carveouts associated with a given reserved
memory region. The phandle in the first cell refers to the device for
which the mapping or carveout is to be created.
The specifier consists of an address/size pair and denotes the IO
virtual address range of the region for the given device. The exact
format depends on the values of the "#address-cells" and "#size-cells"
properties of the device referenced via the phandle.
When used in combination with a "reg" property, an IOVA mapping is to
be established for this memory region. One example where this can be
useful is to create an identity mapping for physical memory that the
firmware has configured some hardware to access (such as a bootsplash
framebuffer).
If no "reg" property is specified, the "iommu-addresses" property
defines carveout regions in the IOVA space for the given device. This
can be useful if a certain memory region should not be mapped through
the IOMMU.
no-map:
type: boolean
description: >
@@ -89,12 +113,69 @@ allOf:
- no-map
oneOf:
- required:
- reg
- oneOf:
- required:
- reg
- required:
- size
- required:
- size
- oneOf:
# IOMMU reservations
- required:
- iommu-addresses
# IOMMU mappings
- required:
- reg
- iommu-addresses
additionalProperties: true
examples:
- |
/ {
compatible = "foo";
model = "foo";
#address-cells = <2>;
#size-cells = <2>;
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
adsp_resv: reservation-adsp {
/*
* Restrict IOVA mappings for ADSP buffers to the 512 MiB region
* from 0x40000000 - 0x5fffffff. Anything outside is reserved by
* the ADSP for I/O memory and private memory allocations.
*/
iommu-addresses = <&adsp 0x0 0x00000000 0x00 0x40000000>,
<&adsp 0x0 0x60000000 0xff 0xa0000000>;
};
fb: framebuffer@90000000 {
reg = <0x0 0x90000000 0x0 0x00800000>;
iommu-addresses = <&dc0 0x0 0x90000000 0x0 0x00800000>;
};
};
bus@0 {
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0x40000000>;
adsp: adsp@2990000 {
reg = <0x2990000 0x2000>;
memory-region = <&adsp_resv>;
};
dc0: display@15200000 {
reg = <0x15200000 0x10000>;
memory-region = <&fb>;
};
};
};
...

View File

@@ -10354,7 +10354,6 @@ L: iommu@lists.linux.dev
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
F: drivers/iommu/intel/
F: include/linux/intel-svm.h
INTEL IPU3 CSI-2 CIO2 DRIVER
M: Yong Zhi <yong.zhi@intel.com>

View File

@@ -984,7 +984,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len,
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
GFP_KERNEL);
if (ret < 0)
goto fail;
iova += len;
@@ -1207,7 +1208,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot);
ret = iommu_map(mapping->domain, iova, phys, len, prot,
GFP_KERNEL);
if (ret < 0)
goto fail;
count += len >> PAGE_SHIFT;
@@ -1379,7 +1381,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
prot, GFP_KERNEL);
if (ret < 0)
goto fail;
@@ -1443,7 +1446,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
if (ret < 0)
goto fail;

View File

@@ -186,9 +186,10 @@ static inline unsigned long *get_st_pto(unsigned long entry)
/* Prototypes */
void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(void);
unsigned long *dma_alloc_cpu_table(gfp_t gfp);
void dma_cleanup_tables(unsigned long *);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
gfp_t gfp);
void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
extern const struct dma_map_ops s390_pci_dma_ops;

View File

@@ -27,11 +27,11 @@ static int zpci_refresh_global(struct zpci_dev *zdev)
zdev->iommu_pages * PAGE_SIZE);
}
unsigned long *dma_alloc_cpu_table(void)
unsigned long *dma_alloc_cpu_table(gfp_t gfp)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
table = kmem_cache_alloc(dma_region_table_cache, gfp);
if (!table)
return NULL;
@@ -45,11 +45,11 @@ static void dma_free_cpu_table(void *table)
kmem_cache_free(dma_region_table_cache, table);
}
static unsigned long *dma_alloc_page_table(void)
static unsigned long *dma_alloc_page_table(gfp_t gfp)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
table = kmem_cache_alloc(dma_page_table_cache, gfp);
if (!table)
return NULL;
@@ -63,7 +63,7 @@ static void dma_free_page_table(void *table)
kmem_cache_free(dma_page_table_cache, table);
}
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep)
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{
unsigned long old_rte, rte;
unsigned long *sto;
@@ -72,7 +72,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *rtep)
if (reg_entry_isvalid(rte)) {
sto = get_rt_sto(rte);
} else {
sto = dma_alloc_cpu_table();
sto = dma_alloc_cpu_table(gfp);
if (!sto)
return NULL;
@@ -90,7 +90,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *rtep)
return sto;
}
static unsigned long *dma_get_page_table_origin(unsigned long *step)
static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
{
unsigned long old_ste, ste;
unsigned long *pto;
@@ -99,7 +99,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step)
if (reg_entry_isvalid(ste)) {
pto = get_st_pto(ste);
} else {
pto = dma_alloc_page_table();
pto = dma_alloc_page_table(gfp);
if (!pto)
return NULL;
set_st_pto(&ste, virt_to_phys(pto));
@@ -116,18 +116,19 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step)
return pto;
}
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
gfp_t gfp)
{
unsigned long *sto, *pto;
unsigned int rtx, sx, px;
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx]);
sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto)
return NULL;
sx = calc_sx(dma_addr);
pto = dma_get_page_table_origin(&sto[sx]);
pto = dma_get_page_table_origin(&sto[sx], gfp);
if (!pto)
return NULL;
@@ -170,7 +171,8 @@ static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
return -EINVAL;
for (i = 0; i < nr_pages; i++) {
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
GFP_ATOMIC);
if (!entry) {
rc = -ENOMEM;
goto undo_cpu_trans;
@@ -186,7 +188,8 @@ undo_cpu_trans:
while (i-- > 0) {
page_addr -= PAGE_SIZE;
dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
GFP_ATOMIC);
if (!entry)
break;
dma_update_cpu_trans(entry, page_addr, flags);
@@ -576,7 +579,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
spin_lock_init(&zdev->iommu_bitmap_lock);
zdev->dma_table = dma_alloc_cpu_table();
zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
if (!zdev->dma_table) {
rc = -ENOMEM;
goto out;

View File

@@ -475,7 +475,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
u32 offset = (r->offset + i) << imem->iommu_pgshift;
ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret < 0) {
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);

View File

@@ -1057,7 +1057,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
size, IOMMU_READ | IOMMU_WRITE);
size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (err < 0)
goto free_iova;

View File

@@ -105,7 +105,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
pb->dma = iova_dma_addr(&host1x->iova, alloc);
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
IOMMU_READ);
IOMMU_READ, GFP_KERNEL);
if (err)
goto iommu_free_iova;
} else {

View File

@@ -276,8 +276,8 @@ iter_chunk:
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
err = iommu_map_atomic(pd->domain, va_start,
pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags, GFP_KERNEL);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -293,8 +293,8 @@ iter_chunk:
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
err = iommu_map_atomic(pd->domain, va_start,
pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags, GFP_KERNEL);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);

View File

@@ -32,7 +32,8 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for cpmxchg64()
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -70,7 +71,8 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
config IOMMU_IO_PGTABLE_DART
bool "Apple DART Formats"
select IOMMU_IO_PGTABLE
depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for cpmxchg64()
help
Enable support for the Apple DART pagetable formats. These include
the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
@@ -284,7 +286,8 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARCH_RENESAS || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
@@ -304,7 +307,8 @@ config SPAPR_TCE_IOMMU
config APPLE_DART
tristate "Apple DART IOMMU Support"
depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARCH_APPLE || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_DART
select IOMMU_API
select IOMMU_IO_PGTABLE_DART
default ARCH_APPLE
@@ -319,7 +323,8 @@ config APPLE_DART
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARM64 || ARM || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -466,7 +471,8 @@ config MTK_IOMMU_V1
config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
depends on ARCH_QCOM || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select QCOM_SCM
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE

View File

@@ -3475,15 +3475,26 @@ found:
return 1;
}
#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p, *addr;
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
char acpiid[ACPIID_LEN] = {0};
int i;
addr = strchr(str, '@');
if (!addr) {
addr = strchr(str, '=');
if (!addr)
goto not_found;
++addr;
if (strlen(addr) > ACPIID_LEN)
goto not_found;
if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
@@ -3496,6 +3507,9 @@ static int __init parse_ivrs_acpihid(char *str)
/* We have the '@', make it the terminator to get just the acpiid */
*addr++ = 0;
if (strlen(str) > ACPIID_LEN + 1)
goto not_found;
if (sscanf(str, "=%s", acpiid) != 1)
goto not_found;

View File

@@ -558,6 +558,15 @@ static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
* prevent logging it.
*/
if (IS_IOMMU_MEM_TRANSACTION(flags)) {
/* Device not attached to domain properly */
if (dev_data->domain == NULL) {
pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
PCI_FUNC(devid), domain_id);
goto out;
}
if (!report_iommu_fault(&dev_data->domain->domain,
&pdev->dev, address,
IS_WRITE_REQUEST(flags) ?
@@ -667,7 +676,14 @@ retry:
event[0], event[1], event[2], event[3]);
}
memset(__evt, 0, 4 * sizeof(u32));
/*
* To detect the hardware errata 732 we need to clear the
* entry back to zero. This issue does not exist on SNP
* enabled system. Also this buffer is not writeable on
* SNP enabled system.
*/
if (!amd_iommu_snp_en)
memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -736,10 +752,13 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
entry[1] = raw[1];
/*
* To detect the hardware bug we need to clear the entry
* back to zero.
* To detect the hardware errata 733 we need to clear the
* entry back to zero. This issue does not exist on SNP
* enabled system. Also this buffer is not writeable on
* SNP enabled system.
*/
raw[0] = raw[1] = 0UL;
if (!amd_iommu_snp_en)
raw[0] = raw[1] = 0UL;
/* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
@@ -1702,27 +1721,29 @@ static int pdev_pri_ats_enable(struct pci_dev *pdev)
/* Only allow access to user-accessible pages */
ret = pci_enable_pasid(pdev, 0);
if (ret)
goto out_err;
return ret;
/* First reset the PRI state of the device */
ret = pci_reset_pri(pdev);
if (ret)
goto out_err;
goto out_err_pasid;
/* Enable PRI */
/* FIXME: Hardcode number of outstanding requests for now */
ret = pci_enable_pri(pdev, 32);
if (ret)
goto out_err;
goto out_err_pasid;
ret = pci_enable_ats(pdev, PAGE_SHIFT);
if (ret)
goto out_err;
goto out_err_pri;
return 0;
out_err:
out_err_pri:
pci_disable_pri(pdev);
out_err_pasid:
pci_disable_pasid(pdev);
return ret;
@@ -2072,6 +2093,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
if (ret)
goto out_err;
/* No need to allocate io pgtable ops in passthrough mode */
if (type == IOMMU_DOMAIN_IDENTITY)
return domain;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops) {
domain_id_free(domain->id);
@@ -2126,31 +2151,6 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
protection_domain_free(domain);
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct amd_iommu *iommu;
if (!check_device(dev))
return;
if (dev_data->domain != NULL)
detach_device(dev);
iommu = rlookup_amd_iommu(dev);
if (!iommu)
return;
#ifdef CONFIG_IRQ_REMAP
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
(dom->type == IOMMU_DOMAIN_UNMANAGED))
dev_data->use_vapic = 0;
#endif
iommu_completion_wait(iommu);
}
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
@@ -2159,6 +2159,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
struct amd_iommu *iommu = rlookup_amd_iommu(dev);
int ret;
/*
* Skip attach device to domain if new domain is same as
* devices current domain
*/
if (dev_data->domain == domain)
return 0;
dev_data->defer_attach = false;
if (dev_data->domain)
@@ -2387,12 +2394,17 @@ static int amd_iommu_def_domain_type(struct device *dev)
return 0;
/*
* Do not identity map IOMMUv2 capable devices when memory encryption is
* active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping.
* Do not identity map IOMMUv2 capable devices when:
* - memory encryption is active, because some of those devices
* (AMD GPUs) don't have the encryption bit in their DMA-mask
* and require remapping.
* - SNP is enabled, because it prohibits DTE[Mode]=0.
*/
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
if (dev_data->iommu_v2 &&
!cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
!amd_iommu_snp_en) {
return IOMMU_DOMAIN_IDENTITY;
}
return 0;
}
@@ -2416,7 +2428,6 @@ const struct iommu_ops amd_iommu_ops = {
.def_domain_type = amd_iommu_def_domain_type,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map_pages = amd_iommu_map_pages,
.unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,

File diff suppressed because it is too large Load Diff

View File

@@ -250,6 +250,8 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sc8280xp-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },

View File

@@ -387,28 +387,6 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
return 0;
}
static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
unsigned i;
if (WARN_ON(!qcom_domain->iommu))
return;
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -583,7 +561,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev,
.map_pages = qcom_iommu_map,
.unmap_pages = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,

Some files were not shown because too many files have changed in this diff Show More