You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (53 commits) iommu/amd: Set IOTLB invalidation timeout iommu/amd: Init stats for iommu=pt iommu/amd: Remove unnecessary cache flushes in amd_iommu_resume iommu/amd: Add invalidate-context call-back iommu/amd: Add amd_iommu_device_info() function iommu/amd: Adapt IOMMU driver to PCI register name changes iommu/amd: Add invalid_ppr callback iommu/amd: Implement notifiers for IOMMUv2 iommu/amd: Implement IO page-fault handler iommu/amd: Add routines to bind/unbind a pasid iommu/amd: Implement device aquisition code for IOMMUv2 iommu/amd: Add driver stub for AMD IOMMUv2 support iommu/amd: Add stat counter for IOMMUv2 events iommu/amd: Add device errata handling iommu/amd: Add function to get IOMMUv2 domain for pdev iommu/amd: Implement function to send PPR completions iommu/amd: Implement functions to manage GCR3 table iommu/amd: Implement IOMMUv2 TLB flushing routines iommu/amd: Add support for IOMMUv2 domain mode iommu/amd: Add amd_iommu_domain_direct_map function ...
This commit is contained in:
+12
-1
@@ -34,7 +34,9 @@ config AMD_IOMMU
|
||||
bool "AMD IOMMU support"
|
||||
select SWIOTLB
|
||||
select PCI_MSI
|
||||
select PCI_IOV
|
||||
select PCI_ATS
|
||||
select PCI_PRI
|
||||
select PCI_PASID
|
||||
select IOMMU_API
|
||||
depends on X86_64 && PCI && ACPI
|
||||
---help---
|
||||
@@ -58,6 +60,15 @@ config AMD_IOMMU_STATS
|
||||
information to userspace via debugfs.
|
||||
If unsure, say N.
|
||||
|
||||
config AMD_IOMMU_V2
|
||||
tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
|
||||
depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
|
||||
select MMU_NOTIFIER
|
||||
---help---
|
||||
This option enables support for the AMD IOMMUv2 features of the IOMMU
|
||||
hardware. Select this option if you want to use devices that support
|
||||
the the PCI PRI and PASID interface.
|
||||
|
||||
# Intel IOMMU support
|
||||
config DMAR_TABLE
|
||||
bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
obj-$(CONFIG_IOMMU_API) += iommu.o
|
||||
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
|
||||
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
|
||||
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
|
||||
obj-$(CONFIG_DMAR_TABLE) += dmar.o
|
||||
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
|
||||
obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
|
||||
|
||||
+845
-38
File diff suppressed because it is too large
Load Diff
+120
-13
@@ -25,6 +25,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/amd-iommu.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/gart.h>
|
||||
@@ -141,6 +142,12 @@ int amd_iommus_present;
|
||||
bool amd_iommu_np_cache __read_mostly;
|
||||
bool amd_iommu_iotlb_sup __read_mostly = true;
|
||||
|
||||
u32 amd_iommu_max_pasids __read_mostly = ~0;
|
||||
|
||||
bool amd_iommu_v2_present __read_mostly;
|
||||
|
||||
bool amd_iommu_force_isolation __read_mostly;
|
||||
|
||||
/*
|
||||
* The ACPI table parsing functions set this variable on an error
|
||||
*/
|
||||
@@ -299,6 +306,16 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
|
||||
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
|
||||
}
|
||||
|
||||
static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
|
||||
{
|
||||
u32 ctrl;
|
||||
|
||||
ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
|
||||
ctrl &= ~CTRL_INV_TO_MASK;
|
||||
ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
|
||||
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
|
||||
}
|
||||
|
||||
/* Function to enable the hardware */
|
||||
static void iommu_enable(struct amd_iommu *iommu)
|
||||
{
|
||||
@@ -581,21 +598,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
|
||||
free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(PPR_LOG_SIZE));
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return NULL;
|
||||
|
||||
return iommu->ppr_log;
|
||||
}
|
||||
|
||||
static void iommu_enable_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 entry;
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
|
||||
|
||||
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
|
||||
/* set head and tail to zero manually */
|
||||
writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_PPR_EN);
|
||||
}
|
||||
|
||||
static void __init free_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
|
||||
}
|
||||
|
||||
static void iommu_enable_gt(struct amd_iommu *iommu)
|
||||
{
|
||||
if (!iommu_feature(iommu, FEATURE_GT))
|
||||
return;
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_GT_EN);
|
||||
}
|
||||
|
||||
/* sets a specific bit in the device table entry. */
|
||||
static void set_dev_entry_bit(u16 devid, u8 bit)
|
||||
{
|
||||
int i = (bit >> 5) & 0x07;
|
||||
int _bit = bit & 0x1f;
|
||||
int i = (bit >> 6) & 0x03;
|
||||
int _bit = bit & 0x3f;
|
||||
|
||||
amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
|
||||
amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
|
||||
}
|
||||
|
||||
static int get_dev_entry_bit(u16 devid, u8 bit)
|
||||
{
|
||||
int i = (bit >> 5) & 0x07;
|
||||
int _bit = bit & 0x1f;
|
||||
int i = (bit >> 6) & 0x03;
|
||||
int _bit = bit & 0x3f;
|
||||
|
||||
return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
|
||||
return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
|
||||
}
|
||||
|
||||
|
||||
@@ -699,6 +764,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
|
||||
|
||||
iommu->features = ((u64)high << 32) | low;
|
||||
|
||||
if (iommu_feature(iommu, FEATURE_GT)) {
|
||||
int glxval;
|
||||
u32 pasids;
|
||||
u64 shift;
|
||||
|
||||
shift = iommu->features & FEATURE_PASID_MASK;
|
||||
shift >>= FEATURE_PASID_SHIFT;
|
||||
pasids = (1 << shift);
|
||||
|
||||
amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
|
||||
|
||||
glxval = iommu->features & FEATURE_GLXVAL_MASK;
|
||||
glxval >>= FEATURE_GLXVAL_SHIFT;
|
||||
|
||||
if (amd_iommu_max_glx_val == -1)
|
||||
amd_iommu_max_glx_val = glxval;
|
||||
else
|
||||
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
|
||||
}
|
||||
|
||||
if (iommu_feature(iommu, FEATURE_GT) &&
|
||||
iommu_feature(iommu, FEATURE_PPR)) {
|
||||
iommu->is_iommu_v2 = true;
|
||||
amd_iommu_v2_present = true;
|
||||
}
|
||||
|
||||
if (!is_rd890_iommu(iommu->dev))
|
||||
return;
|
||||
|
||||
@@ -901,6 +992,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
|
||||
{
|
||||
free_command_buffer(iommu);
|
||||
free_event_buffer(iommu);
|
||||
free_ppr_log(iommu);
|
||||
iommu_unmap_mmio_space(iommu);
|
||||
}
|
||||
|
||||
@@ -964,6 +1056,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
||||
init_iommu_from_acpi(iommu, h);
|
||||
init_iommu_devices(iommu);
|
||||
|
||||
if (iommu_feature(iommu, FEATURE_PPR)) {
|
||||
iommu->ppr_log = alloc_ppr_log(iommu);
|
||||
if (!iommu->ppr_log)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
|
||||
amd_iommu_np_cache = true;
|
||||
|
||||
@@ -1050,6 +1148,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
|
||||
iommu->int_enabled = true;
|
||||
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
|
||||
|
||||
if (iommu->ppr_log != NULL)
|
||||
iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1209,6 +1310,9 @@ static void iommu_init_flags(struct amd_iommu *iommu)
|
||||
* make IOMMU memory accesses cache coherent
|
||||
*/
|
||||
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
|
||||
|
||||
/* Set IOTLB invalidation timeout to 1s */
|
||||
iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
|
||||
}
|
||||
|
||||
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
|
||||
@@ -1274,6 +1378,8 @@ static void enable_iommus(void)
|
||||
iommu_set_device_table(iommu);
|
||||
iommu_enable_command_buffer(iommu);
|
||||
iommu_enable_event_buffer(iommu);
|
||||
iommu_enable_ppr_log(iommu);
|
||||
iommu_enable_gt(iommu);
|
||||
iommu_set_exclusion_range(iommu);
|
||||
iommu_init_msi(iommu);
|
||||
iommu_enable(iommu);
|
||||
@@ -1303,13 +1409,6 @@ static void amd_iommu_resume(void)
|
||||
|
||||
/* re-load the hardware */
|
||||
enable_iommus();
|
||||
|
||||
/*
|
||||
* we have to flush after the IOMMUs are enabled because a
|
||||
* disabled IOMMU will never execute the commands we send
|
||||
*/
|
||||
for_each_iommu(iommu)
|
||||
iommu_flush_all_caches(iommu);
|
||||
}
|
||||
|
||||
static int amd_iommu_suspend(void)
|
||||
@@ -1560,6 +1659,8 @@ static int __init parse_amd_iommu_options(char *str)
|
||||
amd_iommu_unmap_flush = true;
|
||||
if (strncmp(str, "off", 3) == 0)
|
||||
amd_iommu_disabled = true;
|
||||
if (strncmp(str, "force_isolation", 15) == 0)
|
||||
amd_iommu_force_isolation = true;
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -1572,3 +1673,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect,
|
||||
gart_iommu_hole_init,
|
||||
0,
|
||||
0);
|
||||
|
||||
bool amd_iommu_v2_supported(void)
|
||||
{
|
||||
return amd_iommu_v2_present;
|
||||
}
|
||||
EXPORT_SYMBOL(amd_iommu_v2_supported);
|
||||
|
||||
@@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void);
|
||||
extern void amd_iommu_uninit_devices(void);
|
||||
extern void amd_iommu_init_notifier(void);
|
||||
extern void amd_iommu_init_api(void);
|
||||
|
||||
/* IOMMUv2 specific functions */
|
||||
struct iommu_domain;
|
||||
|
||||
extern bool amd_iommu_v2_supported(void);
|
||||
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
|
||||
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
|
||||
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
|
||||
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
|
||||
extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
|
||||
u64 address);
|
||||
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
|
||||
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
|
||||
unsigned long cr3);
|
||||
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
|
||||
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
|
||||
|
||||
#define PPR_SUCCESS 0x0
|
||||
#define PPR_INVALID 0x1
|
||||
#define PPR_FAILURE 0xf
|
||||
|
||||
extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
|
||||
int status, int tag);
|
||||
|
||||
#ifndef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
static inline void amd_iommu_stats_init(void) { }
|
||||
|
||||
@@ -69,11 +69,14 @@
|
||||
#define MMIO_EXCL_BASE_OFFSET 0x0020
|
||||
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
|
||||
#define MMIO_EXT_FEATURES 0x0030
|
||||
#define MMIO_PPR_LOG_OFFSET 0x0038
|
||||
#define MMIO_CMD_HEAD_OFFSET 0x2000
|
||||
#define MMIO_CMD_TAIL_OFFSET 0x2008
|
||||
#define MMIO_EVT_HEAD_OFFSET 0x2010
|
||||
#define MMIO_EVT_TAIL_OFFSET 0x2018
|
||||
#define MMIO_STATUS_OFFSET 0x2020
|
||||
#define MMIO_PPR_HEAD_OFFSET 0x2030
|
||||
#define MMIO_PPR_TAIL_OFFSET 0x2038
|
||||
|
||||
|
||||
/* Extended Feature Bits */
|
||||
@@ -87,8 +90,17 @@
|
||||
#define FEATURE_HE (1ULL<<8)
|
||||
#define FEATURE_PC (1ULL<<9)
|
||||
|
||||
#define FEATURE_PASID_SHIFT 32
|
||||
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
|
||||
|
||||
#define FEATURE_GLXVAL_SHIFT 14
|
||||
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
|
||||
|
||||
#define PASID_MASK 0x000fffff
|
||||
|
||||
/* MMIO status bits */
|
||||
#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
|
||||
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
|
||||
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
|
||||
|
||||
/* event logging constants */
|
||||
#define EVENT_ENTRY_SIZE 0x10
|
||||
@@ -115,6 +127,7 @@
|
||||
#define CONTROL_EVT_LOG_EN 0x02ULL
|
||||
#define CONTROL_EVT_INT_EN 0x03ULL
|
||||
#define CONTROL_COMWAIT_EN 0x04ULL
|
||||
#define CONTROL_INV_TIMEOUT 0x05ULL
|
||||
#define CONTROL_PASSPW_EN 0x08ULL
|
||||
#define CONTROL_RESPASSPW_EN 0x09ULL
|
||||
#define CONTROL_COHERENT_EN 0x0aULL
|
||||
@@ -122,18 +135,34 @@
|
||||
#define CONTROL_CMDBUF_EN 0x0cULL
|
||||
#define CONTROL_PPFLOG_EN 0x0dULL
|
||||
#define CONTROL_PPFINT_EN 0x0eULL
|
||||
#define CONTROL_PPR_EN 0x0fULL
|
||||
#define CONTROL_GT_EN 0x10ULL
|
||||
|
||||
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
|
||||
#define CTRL_INV_TO_NONE 0
|
||||
#define CTRL_INV_TO_1MS 1
|
||||
#define CTRL_INV_TO_10MS 2
|
||||
#define CTRL_INV_TO_100MS 3
|
||||
#define CTRL_INV_TO_1S 4
|
||||
#define CTRL_INV_TO_10S 5
|
||||
#define CTRL_INV_TO_100S 6
|
||||
|
||||
/* command specific defines */
|
||||
#define CMD_COMPL_WAIT 0x01
|
||||
#define CMD_INV_DEV_ENTRY 0x02
|
||||
#define CMD_INV_IOMMU_PAGES 0x03
|
||||
#define CMD_INV_IOTLB_PAGES 0x04
|
||||
#define CMD_COMPLETE_PPR 0x07
|
||||
#define CMD_INV_ALL 0x08
|
||||
|
||||
#define CMD_COMPL_WAIT_STORE_MASK 0x01
|
||||
#define CMD_COMPL_WAIT_INT_MASK 0x02
|
||||
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
|
||||
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
|
||||
#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
|
||||
|
||||
#define PPR_STATUS_MASK 0xf
|
||||
#define PPR_STATUS_SHIFT 12
|
||||
|
||||
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
|
||||
|
||||
@@ -165,6 +194,23 @@
|
||||
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
|
||||
#define EVT_LEN_MASK (0x9ULL << 56)
|
||||
|
||||
/* Constants for PPR Log handling */
|
||||
#define PPR_LOG_ENTRIES 512
|
||||
#define PPR_LOG_SIZE_SHIFT 56
|
||||
#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
|
||||
#define PPR_ENTRY_SIZE 16
|
||||
#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
|
||||
|
||||
#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
|
||||
#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
|
||||
#define PPR_DEVID(x) ((x) & 0xffffULL)
|
||||
#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
|
||||
#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
|
||||
#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
|
||||
#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
|
||||
|
||||
#define PPR_REQ_FAULT 0x01
|
||||
|
||||
#define PAGE_MODE_NONE 0x00
|
||||
#define PAGE_MODE_1_LEVEL 0x01
|
||||
#define PAGE_MODE_2_LEVEL 0x02
|
||||
@@ -230,7 +276,24 @@
|
||||
#define IOMMU_PTE_IR (1ULL << 61)
|
||||
#define IOMMU_PTE_IW (1ULL << 62)
|
||||
|
||||
#define DTE_FLAG_IOTLB 0x01
|
||||
#define DTE_FLAG_IOTLB (0x01UL << 32)
|
||||
#define DTE_FLAG_GV (0x01ULL << 55)
|
||||
#define DTE_GLX_SHIFT (56)
|
||||
#define DTE_GLX_MASK (3)
|
||||
|
||||
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
|
||||
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
|
||||
#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
|
||||
|
||||
#define DTE_GCR3_INDEX_A 0
|
||||
#define DTE_GCR3_INDEX_B 1
|
||||
#define DTE_GCR3_INDEX_C 1
|
||||
|
||||
#define DTE_GCR3_SHIFT_A 58
|
||||
#define DTE_GCR3_SHIFT_B 16
|
||||
#define DTE_GCR3_SHIFT_C 43
|
||||
|
||||
#define GCR3_VALID 0x01ULL
|
||||
|
||||
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
|
||||
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
|
||||
@@ -257,6 +320,7 @@
|
||||
domain for an IOMMU */
|
||||
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
|
||||
translation */
|
||||
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
|
||||
|
||||
extern bool amd_iommu_dump;
|
||||
#define DUMP_printk(format, arg...) \
|
||||
@@ -285,6 +349,29 @@ extern bool amd_iommu_iotlb_sup;
|
||||
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
|
||||
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
|
||||
|
||||
|
||||
/*
|
||||
* This struct is used to pass information about
|
||||
* incoming PPR faults around.
|
||||
*/
|
||||
struct amd_iommu_fault {
|
||||
u64 address; /* IO virtual address of the fault*/
|
||||
u32 pasid; /* Address space identifier */
|
||||
u16 device_id; /* Originating PCI device id */
|
||||
u16 tag; /* PPR tag */
|
||||
u16 flags; /* Fault flags */
|
||||
|
||||
};
|
||||
|
||||
#define PPR_FAULT_EXEC (1 << 1)
|
||||
#define PPR_FAULT_READ (1 << 2)
|
||||
#define PPR_FAULT_WRITE (1 << 5)
|
||||
#define PPR_FAULT_USER (1 << 6)
|
||||
#define PPR_FAULT_RSVD (1 << 7)
|
||||
#define PPR_FAULT_GN (1 << 8)
|
||||
|
||||
struct iommu_domain;
|
||||
|
||||
/*
|
||||
* This structure contains generic data for IOMMU protection domains
|
||||
* independent of their use.
|
||||
@@ -297,11 +384,15 @@ struct protection_domain {
|
||||
u16 id; /* the domain id written to the device table */
|
||||
int mode; /* paging mode (0-6 levels) */
|
||||
u64 *pt_root; /* page table root pointer */
|
||||
int glx; /* Number of levels for GCR3 table */
|
||||
u64 *gcr3_tbl; /* Guest CR3 table */
|
||||
unsigned long flags; /* flags to find out type of domain */
|
||||
bool updated; /* complete domain flush required */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||
void *priv; /* private data */
|
||||
struct iommu_domain *iommu_domain; /* Pointer to generic
|
||||
domain structure */
|
||||
|
||||
};
|
||||
|
||||
@@ -315,10 +406,15 @@ struct iommu_dev_data {
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
atomic_t bind; /* Domain attach reverent count */
|
||||
u16 devid; /* PCI Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
bool passthrough; /* Default for device is pt_domain */
|
||||
struct {
|
||||
bool enabled;
|
||||
int qdep;
|
||||
} ats; /* ATS state */
|
||||
bool pri_tlp; /* PASID TLB required for
|
||||
PPR completions */
|
||||
u32 errata; /* Bitmap for errata to apply */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -399,6 +495,9 @@ struct amd_iommu {
|
||||
/* Extended features */
|
||||
u64 features;
|
||||
|
||||
/* IOMMUv2 */
|
||||
bool is_iommu_v2;
|
||||
|
||||
/*
|
||||
* Capability pointer. There could be more than one IOMMU per PCI
|
||||
* device function if there are more than one AMD IOMMU capability
|
||||
@@ -431,6 +530,9 @@ struct amd_iommu {
|
||||
/* MSI number for event interrupt */
|
||||
u16 evt_msi_num;
|
||||
|
||||
/* Base of the PPR log, if present */
|
||||
u8 *ppr_log;
|
||||
|
||||
/* true if interrupts for this IOMMU are already enabled */
|
||||
bool int_enabled;
|
||||
|
||||
@@ -484,7 +586,7 @@ extern struct list_head amd_iommu_pd_list;
|
||||
* Structure defining one entry in the device table
|
||||
*/
|
||||
struct dev_table_entry {
|
||||
u32 data[8];
|
||||
u64 data[4];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -549,6 +651,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
||||
*/
|
||||
extern bool amd_iommu_unmap_flush;
|
||||
|
||||
/* Smallest number of PASIDs supported by any IOMMU in the system */
|
||||
extern u32 amd_iommu_max_pasids;
|
||||
|
||||
extern bool amd_iommu_v2_present;
|
||||
|
||||
extern bool amd_iommu_force_isolation;
|
||||
|
||||
/* Max levels of glxval supported */
|
||||
extern int amd_iommu_max_glx_val;
|
||||
|
||||
/* takes bus and device/function and returns the device id
|
||||
* FIXME: should that be in generic PCI code? */
|
||||
static inline u16 calc_devid(u8 bus, u8 devfn)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -79,6 +79,24 @@
|
||||
#define LEVEL_STRIDE (9)
|
||||
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
|
||||
|
||||
/*
|
||||
* This bitmap is used to advertise the page sizes our hardware support
|
||||
* to the IOMMU core, which will then use this information to split
|
||||
* physically contiguous memory regions it is mapping into page sizes
|
||||
* that we support.
|
||||
*
|
||||
* Traditionally the IOMMU core just handed us the mappings directly,
|
||||
* after making sure the size is an order of a 4KiB page and that the
|
||||
* mapping has natural alignment.
|
||||
*
|
||||
* To retain this behavior, we currently advertise that we support
|
||||
* all page sizes that are an order of 4KiB.
|
||||
*
|
||||
* If at some point we'd like to utilize the IOMMU core's new behavior,
|
||||
* we could change this to advertise the real page sizes we support.
|
||||
*/
|
||||
#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
|
||||
|
||||
static inline int agaw_to_level(int agaw)
|
||||
{
|
||||
return agaw + 2;
|
||||
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||
|
||||
static int intel_iommu_map(struct iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t hpa,
|
||||
int gfp_order, int iommu_prot)
|
||||
size_t size, int iommu_prot)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
u64 max_addr;
|
||||
int prot = 0;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (iommu_prot & IOMMU_READ)
|
||||
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
||||
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
|
||||
prot |= DMA_PTE_SNP;
|
||||
|
||||
size = PAGE_SIZE << gfp_order;
|
||||
max_addr = iova + size;
|
||||
if (dmar_domain->max_addr < max_addr) {
|
||||
u64 end;
|
||||
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, int gfp_order)
|
||||
static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
size_t size = PAGE_SIZE << gfp_order;
|
||||
int order;
|
||||
|
||||
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
|
||||
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
|
||||
if (dmar_domain->max_addr == iova + size)
|
||||
dmar_domain->max_addr = iova;
|
||||
|
||||
return order;
|
||||
return PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
@@ -4060,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Group numbers are arbitrary. Device with the same group number
|
||||
* indicate the iommu cannot differentiate between them. To avoid
|
||||
* tracking used groups we just use the seg|bus|devfn of the lowest
|
||||
* level we're able to differentiate devices
|
||||
*/
|
||||
static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_dev *bridge;
|
||||
union {
|
||||
struct {
|
||||
u8 devfn;
|
||||
u8 bus;
|
||||
u16 segment;
|
||||
} pci;
|
||||
u32 group;
|
||||
} id;
|
||||
|
||||
if (iommu_no_mapping(dev))
|
||||
return -ENODEV;
|
||||
|
||||
id.pci.segment = pci_domain_nr(pdev->bus);
|
||||
id.pci.bus = pdev->bus->number;
|
||||
id.pci.devfn = pdev->devfn;
|
||||
|
||||
if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
|
||||
return -ENODEV;
|
||||
|
||||
bridge = pci_find_upstream_pcie_bridge(pdev);
|
||||
if (bridge) {
|
||||
if (pci_is_pcie(bridge)) {
|
||||
id.pci.bus = bridge->subordinate->number;
|
||||
id.pci.devfn = 0;
|
||||
} else {
|
||||
id.pci.bus = bridge->bus->number;
|
||||
id.pci.devfn = bridge->devfn;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pdev->is_virtfn && iommu_group_mf)
|
||||
id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
|
||||
|
||||
*groupid = id.group;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops intel_iommu_ops = {
|
||||
.domain_init = intel_iommu_domain_init,
|
||||
.domain_destroy = intel_iommu_domain_destroy,
|
||||
@@ -4069,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
|
||||
.unmap = intel_iommu_unmap,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.domain_has_cap = intel_iommu_domain_has_cap,
|
||||
.device_group = intel_iommu_device_group,
|
||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
|
||||
|
||||
+165
-10
@@ -16,6 +16,8 @@
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
@@ -25,8 +27,59 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
static ssize_t show_iommu_group(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned int groupid;
|
||||
|
||||
if (iommu_device_group(dev, &groupid))
|
||||
return 0;
|
||||
|
||||
return sprintf(buf, "%u", groupid);
|
||||
}
|
||||
static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
|
||||
|
||||
static int add_iommu_group(struct device *dev, void *data)
|
||||
{
|
||||
unsigned int groupid;
|
||||
|
||||
if (iommu_device_group(dev, &groupid) == 0)
|
||||
return device_create_file(dev, &dev_attr_iommu_group);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int remove_iommu_group(struct device *dev)
|
||||
{
|
||||
unsigned int groupid;
|
||||
|
||||
if (iommu_device_group(dev, &groupid) == 0)
|
||||
device_remove_file(dev, &dev_attr_iommu_group);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_device_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
if (action == BUS_NOTIFY_ADD_DEVICE)
|
||||
return add_iommu_group(dev, NULL);
|
||||
else if (action == BUS_NOTIFY_DEL_DEVICE)
|
||||
return remove_iommu_group(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block iommu_device_nb = {
|
||||
.notifier_call = iommu_device_notifier,
|
||||
};
|
||||
|
||||
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
|
||||
{
|
||||
bus_register_notifier(bus, &iommu_device_nb);
|
||||
bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -157,32 +210,134 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
|
||||
|
||||
int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, int gfp_order, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
size_t size;
|
||||
unsigned long orig_iova = iova;
|
||||
unsigned int min_pagesz;
|
||||
size_t orig_size = size;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(domain->ops->map == NULL))
|
||||
return -ENODEV;
|
||||
|
||||
size = PAGE_SIZE << gfp_order;
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
BUG_ON(!IS_ALIGNED(iova | paddr, size));
|
||||
/*
|
||||
* both the virtual address and the physical one, as well as
|
||||
* the size of the mapping, must be aligned (at least) to the
|
||||
* size of the smallest page supported by the hardware
|
||||
*/
|
||||
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
|
||||
pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
|
||||
"0x%x\n", iova, (unsigned long)paddr,
|
||||
(unsigned long)size, min_pagesz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return domain->ops->map(domain, iova, paddr, gfp_order, prot);
|
||||
pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
|
||||
(unsigned long)paddr, (unsigned long)size);
|
||||
|
||||
while (size) {
|
||||
unsigned long pgsize, addr_merge = iova | paddr;
|
||||
unsigned int pgsize_idx;
|
||||
|
||||
/* Max page size that still fits into 'size' */
|
||||
pgsize_idx = __fls(size);
|
||||
|
||||
/* need to consider alignment requirements ? */
|
||||
if (likely(addr_merge)) {
|
||||
/* Max page size allowed by both iova and paddr */
|
||||
unsigned int align_pgsize_idx = __ffs(addr_merge);
|
||||
|
||||
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
|
||||
}
|
||||
|
||||
/* build a mask of acceptable page sizes */
|
||||
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
||||
|
||||
/* throw away page sizes not supported by the hardware */
|
||||
pgsize &= domain->ops->pgsize_bitmap;
|
||||
|
||||
/* make sure we're still sane */
|
||||
BUG_ON(!pgsize);
|
||||
|
||||
/* pick the biggest page */
|
||||
pgsize_idx = __fls(pgsize);
|
||||
pgsize = 1UL << pgsize_idx;
|
||||
|
||||
pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
|
||||
(unsigned long)paddr, pgsize);
|
||||
|
||||
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
iova += pgsize;
|
||||
paddr += pgsize;
|
||||
size -= pgsize;
|
||||
}
|
||||
|
||||
/* unroll mapping in case something went wrong */
|
||||
if (ret)
|
||||
iommu_unmap(domain, orig_iova, orig_size - size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_map);
|
||||
|
||||
int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
|
||||
size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
||||
{
|
||||
size_t size;
|
||||
size_t unmapped_page, unmapped = 0;
|
||||
unsigned int min_pagesz;
|
||||
|
||||
if (unlikely(domain->ops->unmap == NULL))
|
||||
return -ENODEV;
|
||||
|
||||
size = PAGE_SIZE << gfp_order;
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
BUG_ON(!IS_ALIGNED(iova, size));
|
||||
/*
|
||||
* The virtual address, as well as the size of the mapping, must be
|
||||
* aligned (at least) to the size of the smallest page supported
|
||||
* by the hardware
|
||||
*/
|
||||
if (!IS_ALIGNED(iova | size, min_pagesz)) {
|
||||
pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
|
||||
iova, (unsigned long)size, min_pagesz);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return domain->ops->unmap(domain, iova, gfp_order);
|
||||
pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
|
||||
(unsigned long)size);
|
||||
|
||||
/*
|
||||
* Keep iterating until we either unmap 'size' bytes (or more)
|
||||
* or we hit an area that isn't mapped.
|
||||
*/
|
||||
while (unmapped < size) {
|
||||
size_t left = size - unmapped;
|
||||
|
||||
unmapped_page = domain->ops->unmap(domain, iova, left);
|
||||
if (!unmapped_page)
|
||||
break;
|
||||
|
||||
pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
|
||||
(unsigned long)unmapped_page);
|
||||
|
||||
iova += unmapped_page;
|
||||
unmapped += unmapped_page;
|
||||
}
|
||||
|
||||
return unmapped;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unmap);
|
||||
|
||||
int iommu_device_group(struct device *dev, unsigned int *groupid)
|
||||
{
|
||||
if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
|
||||
return dev->bus->iommu_ops->device_group(dev, groupid);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_device_group);
|
||||
|
||||
+12
-13
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
|
||||
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
|
||||
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
|
||||
|
||||
/* bitmap of the page sizes currently supported */
|
||||
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
|
||||
|
||||
static int msm_iommu_tex_class[4];
|
||||
|
||||
DEFINE_SPINLOCK(msm_iommu_lock);
|
||||
@@ -352,7 +355,7 @@ fail:
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
|
||||
phys_addr_t pa, int order, int prot)
|
||||
phys_addr_t pa, size_t len, int prot)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
unsigned long flags;
|
||||
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
|
||||
unsigned long *sl_pte;
|
||||
unsigned long sl_offset;
|
||||
unsigned int pgprot;
|
||||
size_t len = 0x1000UL << order;
|
||||
int ret = 0, tex, sh;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
@@ -463,8 +465,8 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
|
||||
int order)
|
||||
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
|
||||
size_t len)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
unsigned long flags;
|
||||
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
|
||||
unsigned long *sl_table;
|
||||
unsigned long *sl_pte;
|
||||
unsigned long sl_offset;
|
||||
size_t len = 0x1000UL << order;
|
||||
int i, ret = 0;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
|
||||
|
||||
ret = __flush_iotlb(domain);
|
||||
|
||||
/*
|
||||
* the IOMMU API requires us to return the order of the unmapped
|
||||
* page (on success).
|
||||
*/
|
||||
if (!ret)
|
||||
ret = order;
|
||||
fail:
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
return ret;
|
||||
|
||||
/* the IOMMU API requires us to return how many bytes were unmapped */
|
||||
len = ret ? 0 : len;
|
||||
return len;
|
||||
}
|
||||
|
||||
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.domain_has_cap = msm_iommu_domain_has_cap
|
||||
.domain_has_cap = msm_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static int __init get_tex_class(int icp, int ocp, int mt, int nos)
|
||||
|
||||
+37
-43
@@ -33,6 +33,9 @@
|
||||
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
|
||||
__i++)
|
||||
|
||||
/* bitmap of the page sizes currently supported */
|
||||
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
|
||||
|
||||
/**
|
||||
* struct omap_iommu_domain - omap iommu domain
|
||||
* @pgtable: the page table
|
||||
@@ -86,20 +89,24 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
|
||||
|
||||
/**
|
||||
* omap_iommu_save_ctx - Save registers for pm off-mode support
|
||||
* @obj: target iommu
|
||||
* @dev: client device
|
||||
**/
|
||||
void omap_iommu_save_ctx(struct omap_iommu *obj)
|
||||
void omap_iommu_save_ctx(struct device *dev)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
|
||||
arch_iommu->save_ctx(obj);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
|
||||
|
||||
/**
|
||||
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
|
||||
* @obj: target iommu
|
||||
* @dev: client device
|
||||
**/
|
||||
void omap_iommu_restore_ctx(struct omap_iommu *obj)
|
||||
void omap_iommu_restore_ctx(struct device *dev)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
|
||||
arch_iommu->restore_ctx(obj);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
|
||||
@@ -819,36 +826,24 @@ static int device_match_by_alias(struct device *dev, void *data)
|
||||
return strcmp(obj->name, name) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_find_iommu_device() - find an omap iommu device by name
|
||||
* @name: name of the iommu device
|
||||
*
|
||||
* The generic iommu API requires the caller to provide the device
|
||||
* he wishes to attach to a certain iommu domain.
|
||||
*
|
||||
* Drivers generally should not bother with this as it should just
|
||||
* be taken care of by the DMA-API using dev_archdata.
|
||||
*
|
||||
* This function is provided as an interim solution until the latter
|
||||
* materializes, and omap3isp is fully migrated to the DMA-API.
|
||||
*/
|
||||
struct device *omap_find_iommu_device(const char *name)
|
||||
{
|
||||
return driver_find_device(&omap_iommu_driver.driver, NULL,
|
||||
(void *)name,
|
||||
device_match_by_alias);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_find_iommu_device);
|
||||
|
||||
/**
|
||||
* omap_iommu_attach() - attach iommu device to an iommu domain
|
||||
* @dev: target omap iommu device
|
||||
* @name: name of target omap iommu device
|
||||
* @iopgd: page table
|
||||
**/
|
||||
static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
|
||||
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
struct omap_iommu *obj = to_iommu(dev);
|
||||
struct device *dev;
|
||||
struct omap_iommu *obj;
|
||||
|
||||
dev = driver_find_device(&omap_iommu_driver.driver, NULL,
|
||||
(void *)name,
|
||||
device_match_by_alias);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
obj = to_iommu(dev);
|
||||
|
||||
spin_lock(&obj->iommu_lock);
|
||||
|
||||
@@ -1019,12 +1014,11 @@ static void iopte_cachep_ctor(void *iopte)
|
||||
}
|
||||
|
||||
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
||||
phys_addr_t pa, int order, int prot)
|
||||
phys_addr_t pa, size_t bytes, int prot)
|
||||
{
|
||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
||||
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
||||
struct device *dev = oiommu->dev;
|
||||
size_t bytes = PAGE_SIZE << order;
|
||||
struct iotlb_entry e;
|
||||
int omap_pgsz;
|
||||
u32 ret, flags;
|
||||
@@ -1049,19 +1043,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
||||
int order)
|
||||
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
|
||||
size_t size)
|
||||
{
|
||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
||||
struct omap_iommu *oiommu = omap_domain->iommu_dev;
|
||||
struct device *dev = oiommu->dev;
|
||||
size_t unmap_size;
|
||||
|
||||
dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
|
||||
dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
|
||||
|
||||
unmap_size = iopgtable_clear_entry(oiommu, da);
|
||||
|
||||
return unmap_size ? get_order(unmap_size) : -EINVAL;
|
||||
return iopgtable_clear_entry(oiommu, da);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1069,6 +1060,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
||||
struct omap_iommu *oiommu;
|
||||
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&omap_domain->lock);
|
||||
@@ -1081,14 +1073,14 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
}
|
||||
|
||||
/* get a handle to and enable the omap iommu */
|
||||
oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
|
||||
oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
|
||||
if (IS_ERR(oiommu)) {
|
||||
ret = PTR_ERR(oiommu);
|
||||
dev_err(dev, "can't get omap iommu: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
omap_domain->iommu_dev = oiommu;
|
||||
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
|
||||
oiommu->domain = domain;
|
||||
|
||||
out:
|
||||
@@ -1100,7 +1092,8 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct omap_iommu_domain *omap_domain = domain->priv;
|
||||
struct omap_iommu *oiommu = to_iommu(dev);
|
||||
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
||||
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
|
||||
|
||||
spin_lock(&omap_domain->lock);
|
||||
|
||||
@@ -1114,7 +1107,7 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
|
||||
|
||||
omap_iommu_detach(oiommu);
|
||||
|
||||
omap_domain->iommu_dev = NULL;
|
||||
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
|
||||
|
||||
out:
|
||||
spin_unlock(&omap_domain->lock);
|
||||
@@ -1183,14 +1176,14 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
else if (iopte_is_large(*pte))
|
||||
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
|
||||
else
|
||||
dev_err(dev, "bogus pte 0x%x", *pte);
|
||||
dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
|
||||
} else {
|
||||
if (iopgd_is_section(*pgd))
|
||||
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
|
||||
else if (iopgd_is_super(*pgd))
|
||||
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
|
||||
else
|
||||
dev_err(dev, "bogus pgd 0x%x", *pgd);
|
||||
dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -1211,6 +1204,7 @@ static struct iommu_ops omap_iommu_ops = {
|
||||
.unmap = omap_iommu_unmap,
|
||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||
.domain_has_cap = omap_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static int __init omap_iommu_init(void)
|
||||
|
||||
+26
-22
@@ -231,12 +231,14 @@ static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
|
||||
|
||||
/**
|
||||
* omap_find_iovm_area - find iovma which includes @da
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
*
|
||||
* Find the existing iovma starting at @da
|
||||
*/
|
||||
struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
|
||||
struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
struct iovm_struct *area;
|
||||
|
||||
mutex_lock(&obj->mmap_lock);
|
||||
@@ -343,14 +345,15 @@ static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
|
||||
|
||||
/**
|
||||
* omap_da_to_va - convert (d) to (v)
|
||||
* @obj: objective iommu
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
* @va: mpu virtual address
|
||||
*
|
||||
* Returns mpu virtual addr which corresponds to a given device virtual addr
|
||||
*/
|
||||
void *omap_da_to_va(struct omap_iommu *obj, u32 da)
|
||||
void *omap_da_to_va(struct device *dev, u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
void *va = NULL;
|
||||
struct iovm_struct *area;
|
||||
|
||||
@@ -410,7 +413,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
|
||||
unsigned int i, j;
|
||||
struct scatterlist *sg;
|
||||
u32 da = new->da_start;
|
||||
int order;
|
||||
|
||||
if (!domain || !sgt)
|
||||
return -EINVAL;
|
||||
@@ -429,12 +431,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
|
||||
if (bytes_to_iopgsz(bytes) < 0)
|
||||
goto err_out;
|
||||
|
||||
order = get_order(bytes);
|
||||
|
||||
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
|
||||
i, da, pa, bytes);
|
||||
|
||||
err = iommu_map(domain, da, pa, order, flags);
|
||||
err = iommu_map(domain, da, pa, bytes, flags);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@@ -449,10 +449,9 @@ err_out:
|
||||
size_t bytes;
|
||||
|
||||
bytes = sg->length + sg->offset;
|
||||
order = get_order(bytes);
|
||||
|
||||
/* ignore failures.. we're already handling one */
|
||||
iommu_unmap(domain, da, order);
|
||||
iommu_unmap(domain, da, bytes);
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
@@ -467,7 +466,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
size_t total = area->da_end - area->da_start;
|
||||
const struct sg_table *sgt = area->sgt;
|
||||
struct scatterlist *sg;
|
||||
int i, err;
|
||||
int i;
|
||||
size_t unmapped;
|
||||
|
||||
BUG_ON(!sgtable_ok(sgt));
|
||||
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
|
||||
@@ -475,13 +475,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
start = area->da_start;
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes;
|
||||
int order;
|
||||
|
||||
bytes = sg->length + sg->offset;
|
||||
order = get_order(bytes);
|
||||
|
||||
err = iommu_unmap(domain, start, order);
|
||||
if (err < 0)
|
||||
unmapped = iommu_unmap(domain, start, bytes);
|
||||
if (unmapped < bytes)
|
||||
break;
|
||||
|
||||
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
|
||||
@@ -582,16 +580,18 @@ __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
|
||||
/**
|
||||
* omap_iommu_vmap - (d)-(p)-(v) address mapper
|
||||
* @obj: objective iommu
|
||||
* @domain: iommu domain
|
||||
* @dev: client device
|
||||
* @sgt: address of scatter gather table
|
||||
* @flags: iovma and page property
|
||||
*
|
||||
* Creates 1-n-1 mapping with given @sgt and returns @da.
|
||||
* All @sgt element must be io page size aligned.
|
||||
*/
|
||||
u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
|
||||
u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
|
||||
const struct sg_table *sgt, u32 flags)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
size_t bytes;
|
||||
void *va = NULL;
|
||||
|
||||
@@ -622,15 +622,17 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmap);
|
||||
|
||||
/**
|
||||
* omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
|
||||
* @obj: objective iommu
|
||||
* @domain: iommu domain
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
*
|
||||
* Free the iommu virtually contiguous memory area starting at
|
||||
* @da, which was returned by 'omap_iommu_vmap()'.
|
||||
*/
|
||||
struct sg_table *
|
||||
omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
|
||||
omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
struct sg_table *sgt;
|
||||
/*
|
||||
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
|
||||
@@ -647,7 +649,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
|
||||
|
||||
/**
|
||||
* omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
|
||||
* @obj: objective iommu
|
||||
* @dev: client device
|
||||
* @da: contiguous iommu virtual memory
|
||||
* @bytes: allocation size
|
||||
* @flags: iovma and page property
|
||||
@@ -656,9 +658,10 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
|
||||
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
|
||||
*/
|
||||
u32
|
||||
omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
|
||||
omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
|
||||
size_t bytes, u32 flags)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
void *va;
|
||||
struct sg_table *sgt;
|
||||
|
||||
@@ -698,15 +701,16 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
|
||||
|
||||
/**
|
||||
* omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
|
||||
* @obj: objective iommu
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
*
|
||||
* Frees the iommu virtually continuous memory area starting at
|
||||
* @da, as obtained from 'omap_iommu_vmalloc()'.
|
||||
*/
|
||||
void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
|
||||
const u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
struct sg_table *sgt;
|
||||
|
||||
sgt = unmap_vm_area(domain, obj, da, vfree,
|
||||
|
||||
Reference in New Issue
Block a user