You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (60 commits) dma-debug: make memory range checks more consistent dma-debug: warn of unmapping an invalid dma address dma-debug: fix dma_debug_add_bus() definition for !CONFIG_DMA_API_DEBUG dma-debug/x86: register pci bus for dma-debug leak detection dma-debug: add a check dma memory leaks dma-debug: add checks for kernel text and rodata dma-debug: print stacktrace of mapping path on unmap error dma-debug: Documentation update dma-debug: x86 architecture bindings dma-debug: add function to dump dma mappings dma-debug: add checks for sync_single_sg_* dma-debug: add checks for sync_single_range_* dma-debug: add checks for sync_single_* dma-debug: add checking for [alloc|free]_coherent dma-debug: add add checking for map/unmap_sg dma-debug: add checking for map/unmap_page/single dma-debug: add core checking functions dma-debug: add debugfs interface dma-debug: add kernel command line parameters dma-debug: add initialization code ... Fix trivial conflicts due to whitespace changes in arch/x86/kernel/pci-nommu.c
This commit is contained in:
@@ -609,3 +609,109 @@ size is the size (and should be a page-sized multiple).
|
||||
The return value will be either a pointer to the processor virtual
|
||||
address of the memory, or an error (via PTR_ERR()) if any part of the
|
||||
region is occupied.
|
||||
|
||||
Part III - Debug drivers use of the DMA-API
|
||||
-------------------------------------------
|
||||
|
||||
The DMA-API as described above as some constraints. DMA addresses must be
|
||||
released with the corresponding function with the same size for example. With
|
||||
the advent of hardware IOMMUs it becomes more and more important that drivers
|
||||
do not violate those constraints. In the worst case such a violation can
|
||||
result in data corruption up to destroyed filesystems.
|
||||
|
||||
To debug drivers and find bugs in the usage of the DMA-API checking code can
|
||||
be compiled into the kernel which will tell the developer about those
|
||||
violations. If your architecture supports it you can select the "Enable
|
||||
debugging of DMA-API usage" option in your kernel configuration. Enabling this
|
||||
option has a performance impact. Do not enable it in production kernels.
|
||||
|
||||
If you boot the resulting kernel will contain code which does some bookkeeping
|
||||
about what DMA memory was allocated for which device. If this code detects an
|
||||
error it prints a warning message with some details into your kernel log. An
|
||||
example warning message may look like this:
|
||||
|
||||
------------[ cut here ]------------
|
||||
WARNING: at /data2/repos/linux-2.6-iommu/lib/dma-debug.c:448
|
||||
check_unmap+0x203/0x490()
|
||||
Hardware name:
|
||||
forcedeth 0000:00:08.0: DMA-API: device driver frees DMA memory with wrong
|
||||
function [device address=0x00000000640444be] [size=66 bytes] [mapped as
|
||||
single] [unmapped as page]
|
||||
Modules linked in: nfsd exportfs bridge stp llc r8169
|
||||
Pid: 0, comm: swapper Tainted: G W 2.6.28-dmatest-09289-g8bb99c0 #1
|
||||
Call Trace:
|
||||
<IRQ> [<ffffffff80240b22>] warn_slowpath+0xf2/0x130
|
||||
[<ffffffff80647b70>] _spin_unlock+0x10/0x30
|
||||
[<ffffffff80537e75>] usb_hcd_link_urb_to_ep+0x75/0xc0
|
||||
[<ffffffff80647c22>] _spin_unlock_irqrestore+0x12/0x40
|
||||
[<ffffffff8055347f>] ohci_urb_enqueue+0x19f/0x7c0
|
||||
[<ffffffff80252f96>] queue_work+0x56/0x60
|
||||
[<ffffffff80237e10>] enqueue_task_fair+0x20/0x50
|
||||
[<ffffffff80539279>] usb_hcd_submit_urb+0x379/0xbc0
|
||||
[<ffffffff803b78c3>] cpumask_next_and+0x23/0x40
|
||||
[<ffffffff80235177>] find_busiest_group+0x207/0x8a0
|
||||
[<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50
|
||||
[<ffffffff803c7ea3>] check_unmap+0x203/0x490
|
||||
[<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50
|
||||
[<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0
|
||||
[<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0
|
||||
[<ffffffff8026df84>] handle_IRQ_event+0x34/0x70
|
||||
[<ffffffff8026ffe9>] handle_edge_irq+0xc9/0x150
|
||||
[<ffffffff8020e3ab>] do_IRQ+0xcb/0x1c0
|
||||
[<ffffffff8020c093>] ret_from_intr+0x0/0xa
|
||||
<EOI> <4>---[ end trace f6435a98e2a38c0e ]---
|
||||
|
||||
The driver developer can find the driver and the device including a stacktrace
|
||||
of the DMA-API call which caused this warning.
|
||||
|
||||
Per default only the first error will result in a warning message. All other
|
||||
errors will only silently counted. This limitation exist to prevent the code
|
||||
from flooding your kernel log. To support debugging a device driver this can
|
||||
be disabled via debugfs. See the debugfs interface documentation below for
|
||||
details.
|
||||
|
||||
The debugfs directory for the DMA-API debugging code is called dma-api/. In
|
||||
this directory the following files can currently be found:
|
||||
|
||||
dma-api/all_errors This file contains a numeric value. If this
|
||||
value is not equal to zero the debugging code
|
||||
will print a warning for every error it finds
|
||||
into the kernel log. Be carefull with this
|
||||
option. It can easily flood your logs.
|
||||
|
||||
dma-api/disabled This read-only file contains the character 'Y'
|
||||
if the debugging code is disabled. This can
|
||||
happen when it runs out of memory or if it was
|
||||
disabled at boot time
|
||||
|
||||
dma-api/error_count This file is read-only and shows the total
|
||||
numbers of errors found.
|
||||
|
||||
dma-api/num_errors The number in this file shows how many
|
||||
warnings will be printed to the kernel log
|
||||
before it stops. This number is initialized to
|
||||
one at system boot and be set by writing into
|
||||
this file
|
||||
|
||||
dma-api/min_free_entries
|
||||
This read-only file can be read to get the
|
||||
minimum number of free dma_debug_entries the
|
||||
allocator has ever seen. If this value goes
|
||||
down to zero the code will disable itself
|
||||
because it is not longer reliable.
|
||||
|
||||
dma-api/num_free_entries
|
||||
The current number of free dma_debug_entries
|
||||
in the allocator.
|
||||
|
||||
If you have this code compiled into your kernel it will be enabled by default.
|
||||
If you want to boot without the bookkeeping anyway you can provide
|
||||
'dma_debug=off' as a boot parameter. This will disable DMA-API debugging.
|
||||
Notice that you can not enable it again at runtime. You have to reboot to do
|
||||
so.
|
||||
|
||||
When the code disables itself at runtime this is most likely because it ran
|
||||
out of dma_debug_entries. These entries are preallocated at boot. The number
|
||||
of preallocated entries is defined per architecture. If it is too low for you
|
||||
boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
|
||||
architectural default.
|
||||
|
||||
@@ -492,6 +492,16 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
Range: 0 - 8192
|
||||
Default: 64
|
||||
|
||||
dma_debug=off If the kernel is compiled with DMA_API_DEBUG support
|
||||
this option disables the debugging code at boot.
|
||||
|
||||
dma_debug_entries=<number>
|
||||
This option allows to tune the number of preallocated
|
||||
entries for DMA-API debugging code. One entry is
|
||||
required per DMA-API allocation. Use this if the
|
||||
DMA-API debugging code disables itself because the
|
||||
architectural default is too low.
|
||||
|
||||
hpet= [X86-32,HPET] option to control HPET usage
|
||||
Format: { enable (default) | disable | force |
|
||||
verbose }
|
||||
|
||||
@@ -106,3 +106,5 @@ config HAVE_CLK
|
||||
The <linux/clk.h> calls support software clock gating and
|
||||
thus are a key power management tool on many systems.
|
||||
|
||||
config HAVE_DMA_API_DEBUG
|
||||
bool
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
|
||||
obj-y := setup.o
|
||||
ifeq ($(CONFIG_DMAR), y)
|
||||
obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o
|
||||
obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
|
||||
else
|
||||
obj-$(CONFIG_IA64_GENERIC) += machvec.o
|
||||
endif
|
||||
obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o
|
||||
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
|
||||
void *
|
||||
vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flags)
|
||||
{
|
||||
return intel_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
|
||||
|
||||
void
|
||||
vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
intel_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_free_coherent);
|
||||
|
||||
dma_addr_t
|
||||
vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
return intel_map_single(dev, (phys_addr_t)addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
|
||||
|
||||
void
|
||||
vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
intel_unmap_single(dev, iova, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
|
||||
|
||||
int
|
||||
vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
return intel_map_sg(dev, sglist, nents, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
|
||||
|
||||
void
|
||||
vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
||||
int nents, int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
intel_unmap_sg(dev, sglist, nents, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
|
||||
|
||||
int
|
||||
vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
|
||||
@@ -13,49 +13,34 @@
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include <asm/machvec.h>
|
||||
|
||||
extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
|
||||
|
||||
/* swiotlb declarations & definitions: */
|
||||
extern int swiotlb_late_init_with_default_size (size_t size);
|
||||
|
||||
/* hwiommu declarations & definitions: */
|
||||
|
||||
extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
|
||||
extern ia64_mv_dma_free_coherent sba_free_coherent;
|
||||
extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
|
||||
extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
|
||||
extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
|
||||
extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
|
||||
extern ia64_mv_dma_supported sba_dma_supported;
|
||||
extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
|
||||
|
||||
#define hwiommu_alloc_coherent sba_alloc_coherent
|
||||
#define hwiommu_free_coherent sba_free_coherent
|
||||
#define hwiommu_map_single_attrs sba_map_single_attrs
|
||||
#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
|
||||
#define hwiommu_map_sg_attrs sba_map_sg_attrs
|
||||
#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
|
||||
#define hwiommu_dma_supported sba_dma_supported
|
||||
#define hwiommu_dma_mapping_error sba_dma_mapping_error
|
||||
#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
|
||||
#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg
|
||||
#define hwiommu_sync_single_for_device machvec_dma_sync_single
|
||||
#define hwiommu_sync_sg_for_device machvec_dma_sync_sg
|
||||
|
||||
|
||||
/*
|
||||
* Note: we need to make the determination of whether or not to use
|
||||
* the sw I/O TLB based purely on the device structure. Anything else
|
||||
* would be unreliable or would be too intrusive.
|
||||
*/
|
||||
static inline int
|
||||
use_swiotlb (struct device *dev)
|
||||
static inline int use_swiotlb(struct device *dev)
|
||||
{
|
||||
return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask);
|
||||
return dev && dev->dma_mask &&
|
||||
!sba_dma_ops.dma_supported(dev, *dev->dma_mask);
|
||||
}
|
||||
|
||||
struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return &swiotlb_dma_ops;
|
||||
return &sba_dma_ops;
|
||||
}
|
||||
EXPORT_SYMBOL(hwsw_dma_get_ops);
|
||||
|
||||
void __init
|
||||
hwsw_init (void)
|
||||
{
|
||||
@@ -71,125 +56,3 @@ hwsw_init (void)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
||||
else
|
||||
return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
|
||||
void
|
||||
hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
else
|
||||
hwiommu_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
dma_addr_t
|
||||
hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
|
||||
else
|
||||
return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(hwsw_map_single_attrs);
|
||||
|
||||
void
|
||||
hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
|
||||
else
|
||||
return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(hwsw_unmap_single_attrs);
|
||||
|
||||
int
|
||||
hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
|
||||
else
|
||||
return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(hwsw_map_sg_attrs);
|
||||
|
||||
void
|
||||
hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
|
||||
else
|
||||
return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
|
||||
|
||||
void
|
||||
hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
swiotlb_sync_single_for_cpu(dev, addr, size, dir);
|
||||
else
|
||||
hwiommu_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
void
|
||||
hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
else
|
||||
hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
}
|
||||
|
||||
void
|
||||
hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
swiotlb_sync_single_for_device(dev, addr, size, dir);
|
||||
else
|
||||
hwiommu_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
void
|
||||
hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
else
|
||||
hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
}
|
||||
|
||||
int
|
||||
hwsw_dma_supported (struct device *dev, u64 mask)
|
||||
{
|
||||
if (hwiommu_dma_supported(dev, mask))
|
||||
return 1;
|
||||
return swiotlb_dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
int
|
||||
hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return hwiommu_dma_mapping_error(dev, dma_addr) ||
|
||||
swiotlb_dma_mapping_error(dev, dma_addr);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(hwsw_dma_mapping_error);
|
||||
EXPORT_SYMBOL(hwsw_dma_supported);
|
||||
EXPORT_SYMBOL(hwsw_alloc_coherent);
|
||||
EXPORT_SYMBOL(hwsw_free_coherent);
|
||||
EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
|
||||
EXPORT_SYMBOL(hwsw_sync_single_for_device);
|
||||
EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
|
||||
EXPORT_SYMBOL(hwsw_sync_sg_for_device);
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <linux/bitops.h> /* hweight64() */
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <asm/delay.h> /* ia64_get_itc() */
|
||||
#include <asm/io.h>
|
||||
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
|
||||
*
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt
|
||||
*/
|
||||
dma_addr_t
|
||||
sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
|
||||
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
|
||||
unsigned long poff, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
void *addr = page_address(page) + poff;
|
||||
dma_addr_t iovp;
|
||||
dma_addr_t offset;
|
||||
u64 *pdir_start;
|
||||
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
|
||||
#endif
|
||||
return SBA_IOVA(ioc, iovp, offset);
|
||||
}
|
||||
EXPORT_SYMBOL(sba_map_single_attrs);
|
||||
|
||||
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return sba_map_page(dev, virt_to_page(addr),
|
||||
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_MARK_CLEAN
|
||||
static SBA_INLINE void
|
||||
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
|
||||
*
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt
|
||||
*/
|
||||
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
#if DELAYED_RESOURCE_CNT > 0
|
||||
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
#endif /* DELAYED_RESOURCE_CNT == 0 */
|
||||
}
|
||||
EXPORT_SYMBOL(sba_unmap_single_attrs);
|
||||
|
||||
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
sba_unmap_page(dev, iova, size, dir, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* sba_alloc_coherent - allocate/map shared mem for DMA
|
||||
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs);
|
||||
*
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt
|
||||
*/
|
||||
void *
|
||||
static void *
|
||||
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
|
||||
*
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt
|
||||
*/
|
||||
void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
|
||||
static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
|
||||
free_pages((unsigned long) vaddr, get_order(size));
|
||||
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
|
||||
*
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt
|
||||
*/
|
||||
int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
int coalesced, filled = 0;
|
||||
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
|
||||
return filled;
|
||||
}
|
||||
EXPORT_SYMBOL(sba_map_sg_attrs);
|
||||
|
||||
/**
|
||||
* sba_unmap_sg_attrs - unmap Scatter/Gather list
|
||||
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs);
|
||||
*
|
||||
* See Documentation/PCI/PCI-DMA-mapping.txt
|
||||
*/
|
||||
void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
||||
int nents, int dir, struct dma_attrs *attrs)
|
||||
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
#ifdef ASSERT_PDIR_SANITY
|
||||
struct ioc *ioc;
|
||||
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
||||
#endif
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(sba_unmap_sg_attrs);
|
||||
|
||||
/**************************************************************
|
||||
*
|
||||
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = {
|
||||
},
|
||||
};
|
||||
|
||||
extern struct dma_map_ops swiotlb_dma_ops;
|
||||
|
||||
static int __init
|
||||
sba_init(void)
|
||||
{
|
||||
@@ -2077,6 +2095,7 @@ sba_init(void)
|
||||
* a successful kdump kernel boot is to use the swiotlb.
|
||||
*/
|
||||
if (is_kdump_kernel()) {
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
|
||||
panic("Unable to initialize software I/O TLB:"
|
||||
" Try machvec=dig boot option");
|
||||
@@ -2092,6 +2111,7 @@ sba_init(void)
|
||||
* If we didn't find something sba_iommu can claim, we
|
||||
* need to setup the swiotlb and switch to the dig machvec.
|
||||
*/
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
|
||||
panic("Unable to find SBA IOMMU or initialize "
|
||||
"software I/O TLB: Try machvec=dig boot option");
|
||||
@@ -2138,15 +2158,13 @@ nosbagart(char *str)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
sba_dma_supported (struct device *dev, u64 mask)
|
||||
static int sba_dma_supported (struct device *dev, u64 mask)
|
||||
{
|
||||
/* make sure it's at least 32bit capable */
|
||||
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
|
||||
}
|
||||
|
||||
int
|
||||
sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -2176,7 +2194,22 @@ sba_page_override(char *str)
|
||||
|
||||
__setup("sbapagesize=",sba_page_override);
|
||||
|
||||
EXPORT_SYMBOL(sba_dma_mapping_error);
|
||||
EXPORT_SYMBOL(sba_dma_supported);
|
||||
EXPORT_SYMBOL(sba_alloc_coherent);
|
||||
EXPORT_SYMBOL(sba_free_coherent);
|
||||
struct dma_map_ops sba_dma_ops = {
|
||||
.alloc_coherent = sba_alloc_coherent,
|
||||
.free_coherent = sba_free_coherent,
|
||||
.map_page = sba_map_page,
|
||||
.unmap_page = sba_unmap_page,
|
||||
.map_sg = sba_map_sg_attrs,
|
||||
.unmap_sg = sba_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = machvec_dma_sync_single,
|
||||
.sync_sg_for_cpu = machvec_dma_sync_sg,
|
||||
.sync_single_for_device = machvec_dma_sync_single,
|
||||
.sync_sg_for_device = machvec_dma_sync_sg,
|
||||
.dma_supported = sba_dma_supported,
|
||||
.mapping_error = sba_dma_mapping_error,
|
||||
};
|
||||
|
||||
void sba_dma_init(void)
|
||||
{
|
||||
dma_ops = &sba_dma_ops;
|
||||
}
|
||||
|
||||
@@ -11,99 +11,128 @@
|
||||
|
||||
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
|
||||
struct dma_mapping_ops {
|
||||
int (*mapping_error)(struct device *dev,
|
||||
dma_addr_t dma_addr);
|
||||
void* (*alloc_coherent)(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp);
|
||||
void (*free_coherent)(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
|
||||
size_t size, int direction);
|
||||
void (*unmap_single)(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction);
|
||||
void (*sync_single_for_cpu)(struct device *hwdev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
int direction);
|
||||
void (*sync_single_for_device)(struct device *hwdev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
int direction);
|
||||
void (*sync_single_range_for_cpu)(struct device *hwdev,
|
||||
dma_addr_t dma_handle, unsigned long offset,
|
||||
size_t size, int direction);
|
||||
void (*sync_single_range_for_device)(struct device *hwdev,
|
||||
dma_addr_t dma_handle, unsigned long offset,
|
||||
size_t size, int direction);
|
||||
void (*sync_sg_for_cpu)(struct device *hwdev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
int direction);
|
||||
void (*sync_sg_for_device)(struct device *hwdev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
int direction);
|
||||
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction);
|
||||
void (*unmap_sg)(struct device *hwdev,
|
||||
struct scatterlist *sg, int nents,
|
||||
int direction);
|
||||
int (*dma_supported_op)(struct device *hwdev, u64 mask);
|
||||
int is_phys;
|
||||
};
|
||||
|
||||
extern struct dma_mapping_ops *dma_ops;
|
||||
extern struct dma_map_ops *dma_ops;
|
||||
extern struct ia64_machine_vector ia64_mv;
|
||||
extern void set_iommu_machvec(void);
|
||||
|
||||
#define dma_alloc_coherent(dev, size, handle, gfp) \
|
||||
platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
|
||||
extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
/* coherent mem. is cheap */
|
||||
static inline void *
|
||||
dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag)
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *daddr, gfp_t gfp)
|
||||
{
|
||||
return dma_alloc_coherent(dev, size, dma_handle, flag);
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->alloc_coherent(dev, size, daddr, gfp);
|
||||
}
|
||||
#define dma_free_coherent platform_dma_free_coherent
|
||||
static inline void
|
||||
dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
#define dma_map_single_attrs platform_dma_map_single_attrs
|
||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, int dir)
|
||||
{
|
||||
return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
|
||||
}
|
||||
#define dma_map_sg_attrs platform_dma_map_sg_attrs
|
||||
static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, int dir)
|
||||
{
|
||||
return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
|
||||
}
|
||||
#define dma_unmap_single_attrs platform_dma_unmap_single_attrs
|
||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
|
||||
size_t size, int dir)
|
||||
{
|
||||
return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
|
||||
}
|
||||
#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
|
||||
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, int dir)
|
||||
{
|
||||
return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
|
||||
}
|
||||
#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
|
||||
#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
|
||||
#define dma_sync_single_for_device platform_dma_sync_single_for_device
|
||||
#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
|
||||
#define dma_mapping_error platform_dma_mapping_error
|
||||
|
||||
#define dma_map_page(dev, pg, off, size, dir) \
|
||||
dma_map_single(dev, page_address(pg) + (off), (size), (dir))
|
||||
#define dma_unmap_page(dev, dma_addr, size, dir) \
|
||||
dma_unmap_single(dev, dma_addr, size, dir)
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *caddr, dma_addr_t daddr)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->free_coherent(dev, size, caddr, daddr);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev,
|
||||
void *caddr, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->map_page(dev, virt_to_page(caddr),
|
||||
(unsigned long)caddr & ~PAGE_MASK, size,
|
||||
dir, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->unmap_page(dev, daddr, size, dir, attrs);
|
||||
}
|
||||
|
||||
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
|
||||
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
|
||||
|
||||
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->map_sg(dev, sgl, nents, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->unmap_sg(dev, sgl, nents, dir, attrs);
|
||||
}
|
||||
|
||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
|
||||
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->sync_single_for_cpu(dev, daddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->sync_sg_for_cpu(dev, sgl, nents, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t daddr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->sync_single_for_device(dev, daddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl,
|
||||
int nents,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
ops->sync_sg_for_device(dev, sgl, nents, dir);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->mapping_error(dev, daddr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->map_page(dev, page, offset, size, dir, NULL);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_single(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* Rest of this file is part of the "Advanced DMA API". Use at your own risk.
|
||||
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
|
||||
dma_sync_single_for_device(dev, dma_handle, size, dir)
|
||||
|
||||
#define dma_supported platform_dma_supported
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
return ops->dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_set_mask (struct device *dev, u64 mask)
|
||||
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
|
||||
|
||||
#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
|
||||
|
||||
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
return dma_ops;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif /* _ASM_IA64_DMA_MAPPING_H */
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#define _ASM_IA64_MACHVEC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
/* forward declarations: */
|
||||
struct device;
|
||||
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void);
|
||||
|
||||
/* DMA-mapping interface: */
|
||||
typedef void ia64_mv_dma_init (void);
|
||||
typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
|
||||
typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
|
||||
typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
|
||||
typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
|
||||
typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
|
||||
typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
|
||||
typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
|
||||
typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
|
||||
typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
|
||||
typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
|
||||
typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
|
||||
typedef int ia64_mv_dma_supported (struct device *, u64);
|
||||
|
||||
typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
|
||||
typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
|
||||
typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
|
||||
typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
|
||||
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
|
||||
typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
|
||||
|
||||
/*
|
||||
* WARNING: The legacy I/O space is _architected_. Platforms are
|
||||
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus)
|
||||
|
||||
extern void machvec_setup (char **);
|
||||
extern void machvec_timer_interrupt (int, void *);
|
||||
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
|
||||
extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
|
||||
extern void machvec_tlb_migrate_finish (struct mm_struct *);
|
||||
|
||||
# if defined (CONFIG_IA64_HP_SIM)
|
||||
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
|
||||
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
|
||||
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
|
||||
# define platform_dma_init ia64_mv.dma_init
|
||||
# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
|
||||
# define platform_dma_free_coherent ia64_mv.dma_free_coherent
|
||||
# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
|
||||
# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
|
||||
# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
|
||||
# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
|
||||
# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
|
||||
# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
|
||||
# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
|
||||
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
|
||||
# define platform_dma_mapping_error ia64_mv.dma_mapping_error
|
||||
# define platform_dma_supported ia64_mv.dma_supported
|
||||
# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
|
||||
# define platform_dma_get_ops ia64_mv.dma_get_ops
|
||||
# define platform_irq_to_vector ia64_mv.irq_to_vector
|
||||
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
|
||||
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
|
||||
@@ -203,19 +173,8 @@ struct ia64_machine_vector {
|
||||
ia64_mv_global_tlb_purge_t *global_tlb_purge;
|
||||
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
|
||||
ia64_mv_dma_init *dma_init;
|
||||
ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
|
||||
ia64_mv_dma_free_coherent *dma_free_coherent;
|
||||
ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
|
||||
ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
|
||||
ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
|
||||
ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
|
||||
ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
|
||||
ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
|
||||
ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
|
||||
ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
|
||||
ia64_mv_dma_mapping_error *dma_mapping_error;
|
||||
ia64_mv_dma_supported *dma_supported;
|
||||
ia64_mv_dma_get_required_mask *dma_get_required_mask;
|
||||
ia64_mv_dma_get_ops *dma_get_ops;
|
||||
ia64_mv_irq_to_vector *irq_to_vector;
|
||||
ia64_mv_local_vector_to_irq *local_vector_to_irq;
|
||||
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
|
||||
@@ -254,19 +213,8 @@ struct ia64_machine_vector {
|
||||
platform_global_tlb_purge, \
|
||||
platform_tlb_migrate_finish, \
|
||||
platform_dma_init, \
|
||||
platform_dma_alloc_coherent, \
|
||||
platform_dma_free_coherent, \
|
||||
platform_dma_map_single_attrs, \
|
||||
platform_dma_unmap_single_attrs, \
|
||||
platform_dma_map_sg_attrs, \
|
||||
platform_dma_unmap_sg_attrs, \
|
||||
platform_dma_sync_single_for_cpu, \
|
||||
platform_dma_sync_sg_for_cpu, \
|
||||
platform_dma_sync_single_for_device, \
|
||||
platform_dma_sync_sg_for_device, \
|
||||
platform_dma_mapping_error, \
|
||||
platform_dma_supported, \
|
||||
platform_dma_get_required_mask, \
|
||||
platform_dma_get_ops, \
|
||||
platform_irq_to_vector, \
|
||||
platform_local_vector_to_irq, \
|
||||
platform_pci_get_legacy_mem, \
|
||||
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
|
||||
# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
|
||||
# endif /* CONFIG_IA64_GENERIC */
|
||||
|
||||
extern void swiotlb_dma_init(void);
|
||||
extern struct dma_map_ops *dma_get_ops(struct device *);
|
||||
|
||||
/*
|
||||
* Define default versions so we can extend machvec for new platforms without having
|
||||
* to update the machvec files for all existing platforms.
|
||||
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
|
||||
# define platform_kernel_launch_event machvec_noop
|
||||
#endif
|
||||
#ifndef platform_dma_init
|
||||
# define platform_dma_init swiotlb_init
|
||||
# define platform_dma_init swiotlb_dma_init
|
||||
#endif
|
||||
#ifndef platform_dma_alloc_coherent
|
||||
# define platform_dma_alloc_coherent swiotlb_alloc_coherent
|
||||
#endif
|
||||
#ifndef platform_dma_free_coherent
|
||||
# define platform_dma_free_coherent swiotlb_free_coherent
|
||||
#endif
|
||||
#ifndef platform_dma_map_single_attrs
|
||||
# define platform_dma_map_single_attrs swiotlb_map_single_attrs
|
||||
#endif
|
||||
#ifndef platform_dma_unmap_single_attrs
|
||||
# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
|
||||
#endif
|
||||
#ifndef platform_dma_map_sg_attrs
|
||||
# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
|
||||
#endif
|
||||
#ifndef platform_dma_unmap_sg_attrs
|
||||
# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
|
||||
#endif
|
||||
#ifndef platform_dma_sync_single_for_cpu
|
||||
# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
|
||||
#endif
|
||||
#ifndef platform_dma_sync_sg_for_cpu
|
||||
# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
|
||||
#endif
|
||||
#ifndef platform_dma_sync_single_for_device
|
||||
# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
|
||||
#endif
|
||||
#ifndef platform_dma_sync_sg_for_device
|
||||
# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
|
||||
#endif
|
||||
#ifndef platform_dma_mapping_error
|
||||
# define platform_dma_mapping_error swiotlb_dma_mapping_error
|
||||
#endif
|
||||
#ifndef platform_dma_supported
|
||||
# define platform_dma_supported swiotlb_dma_supported
|
||||
#ifndef platform_dma_get_ops
|
||||
# define platform_dma_get_ops dma_get_ops
|
||||
#endif
|
||||
#ifndef platform_dma_get_required_mask
|
||||
# define platform_dma_get_required_mask ia64_dma_get_required_mask
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
#define _ASM_IA64_MACHVEC_DIG_VTD_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
|
||||
extern ia64_mv_dma_free_coherent vtd_free_coherent;
|
||||
extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
|
||||
extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
|
||||
extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
|
||||
extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
|
||||
extern ia64_mv_dma_supported iommu_dma_supported;
|
||||
extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
|
||||
extern ia64_mv_dma_init pci_iommu_alloc;
|
||||
|
||||
/*
|
||||
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
|
||||
#define platform_name "dig_vtd"
|
||||
#define platform_setup dig_setup
|
||||
#define platform_dma_init pci_iommu_alloc
|
||||
#define platform_dma_alloc_coherent vtd_alloc_coherent
|
||||
#define platform_dma_free_coherent vtd_free_coherent
|
||||
#define platform_dma_map_single_attrs vtd_map_single_attrs
|
||||
#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
|
||||
#define platform_dma_map_sg_attrs vtd_map_sg_attrs
|
||||
#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
|
||||
#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
|
||||
#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
|
||||
#define platform_dma_sync_single_for_device machvec_dma_sync_single
|
||||
#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
|
||||
#define platform_dma_supported iommu_dma_supported
|
||||
#define platform_dma_mapping_error vtd_dma_mapping_error
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
|
||||
|
||||
@@ -2,14 +2,7 @@
|
||||
#define _ASM_IA64_MACHVEC_HPZX1_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
|
||||
extern ia64_mv_dma_free_coherent sba_free_coherent;
|
||||
extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
|
||||
extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
|
||||
extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
|
||||
extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
|
||||
extern ia64_mv_dma_supported sba_dma_supported;
|
||||
extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
|
||||
extern ia64_mv_dma_init sba_dma_init;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
|
||||
*/
|
||||
#define platform_name "hpzx1"
|
||||
#define platform_setup dig_setup
|
||||
#define platform_dma_init machvec_noop
|
||||
#define platform_dma_alloc_coherent sba_alloc_coherent
|
||||
#define platform_dma_free_coherent sba_free_coherent
|
||||
#define platform_dma_map_single_attrs sba_map_single_attrs
|
||||
#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
|
||||
#define platform_dma_map_sg_attrs sba_map_sg_attrs
|
||||
#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
|
||||
#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
|
||||
#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
|
||||
#define platform_dma_sync_single_for_device machvec_dma_sync_single
|
||||
#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
|
||||
#define platform_dma_supported sba_dma_supported
|
||||
#define platform_dma_mapping_error sba_dma_mapping_error
|
||||
#define platform_dma_init sba_dma_init
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
|
||||
|
||||
@@ -2,18 +2,7 @@
|
||||
#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent;
|
||||
extern ia64_mv_dma_free_coherent hwsw_free_coherent;
|
||||
extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
|
||||
extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
|
||||
extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
|
||||
extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
|
||||
extern ia64_mv_dma_supported hwsw_dma_supported;
|
||||
extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
|
||||
extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
|
||||
extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
|
||||
extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
|
||||
extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
|
||||
extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define platform_name "hpzx1_swiotlb"
|
||||
|
||||
#define platform_setup dig_setup
|
||||
#define platform_dma_init machvec_noop
|
||||
#define platform_dma_alloc_coherent hwsw_alloc_coherent
|
||||
#define platform_dma_free_coherent hwsw_free_coherent
|
||||
#define platform_dma_map_single_attrs hwsw_map_single_attrs
|
||||
#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
|
||||
#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
|
||||
#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
|
||||
#define platform_dma_supported hwsw_dma_supported
|
||||
#define platform_dma_mapping_error hwsw_dma_mapping_error
|
||||
#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
|
||||
#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
|
||||
#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
|
||||
#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
|
||||
#define platform_dma_get_ops hwsw_dma_get_ops
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
|
||||
|
||||
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
|
||||
extern ia64_mv_readw_t __sn_readw_relaxed;
|
||||
extern ia64_mv_readl_t __sn_readl_relaxed;
|
||||
extern ia64_mv_readq_t __sn_readq_relaxed;
|
||||
extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
|
||||
extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
|
||||
extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
|
||||
extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
|
||||
extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
|
||||
extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
|
||||
extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
|
||||
extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
|
||||
extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
|
||||
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
|
||||
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
|
||||
extern ia64_mv_dma_supported sn_dma_supported;
|
||||
extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
|
||||
extern ia64_mv_dma_init sn_dma_init;
|
||||
extern ia64_mv_migrate_t sn_migrate;
|
||||
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
|
||||
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
|
||||
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
|
||||
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
|
||||
#define platform_pci_legacy_read sn_pci_legacy_read
|
||||
#define platform_pci_legacy_write sn_pci_legacy_write
|
||||
#define platform_dma_init machvec_noop
|
||||
#define platform_dma_alloc_coherent sn_dma_alloc_coherent
|
||||
#define platform_dma_free_coherent sn_dma_free_coherent
|
||||
#define platform_dma_map_single_attrs sn_dma_map_single_attrs
|
||||
#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
|
||||
#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
|
||||
#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
|
||||
#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
|
||||
#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
|
||||
#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
|
||||
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
|
||||
#define platform_dma_mapping_error sn_dma_mapping_error
|
||||
#define platform_dma_supported sn_dma_supported
|
||||
#define platform_dma_get_required_mask sn_dma_get_required_mask
|
||||
#define platform_dma_init sn_dma_init
|
||||
#define platform_migrate sn_migrate
|
||||
#define platform_kernel_launch_event sn_kernel_launch_event
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
||||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
|
||||
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
|
||||
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
||||
unwind.o mca.o mca_asm.o topology.o
|
||||
unwind.o mca.o mca_asm.o topology.o dma-mapping.o
|
||||
|
||||
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
|
||||
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
|
||||
@@ -43,9 +43,7 @@ ifneq ($(CONFIG_IA64_ESI),)
|
||||
obj-y += esi_stub.o # must be in kernel proper
|
||||
endif
|
||||
obj-$(CONFIG_DMAR) += pci-dma.o
|
||||
ifeq ($(CONFIG_DMAR), y)
|
||||
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
||||
endif
|
||||
|
||||
# The gate DSO image is built using a special linker script.
|
||||
targets += gate.so gate-syms.o
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||
int iommu_detected __read_mostly;
|
||||
|
||||
struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
struct dma_map_ops *dma_get_ops(struct device *dev)
|
||||
{
|
||||
return dma_ops;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_get_ops);
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
|
||||
EXPORT_SYMBOL(machvec_timer_interrupt);
|
||||
|
||||
void
|
||||
machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir)
|
||||
machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
mb();
|
||||
}
|
||||
EXPORT_SYMBOL(machvec_dma_sync_single);
|
||||
|
||||
void
|
||||
machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir)
|
||||
machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
mb();
|
||||
}
|
||||
|
||||
+25
-21
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1;
|
||||
int force_iommu __read_mostly;
|
||||
#endif
|
||||
|
||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||
int iommu_detected __read_mostly;
|
||||
|
||||
/* Dummy device used for NULL arguments (normally ISA). Better would
|
||||
be probably a smaller DMA mask, but this is bug-to-bug compatible
|
||||
to i386. */
|
||||
@@ -44,18 +41,7 @@ struct device fallback_dev = {
|
||||
.dma_mask = &fallback_dev.coherent_dma_mask,
|
||||
};
|
||||
|
||||
void __init pci_iommu_alloc(void)
|
||||
{
|
||||
/*
|
||||
* The order of these functions is important for
|
||||
* fall-back/fail-over reasons
|
||||
*/
|
||||
detect_intel_iommu();
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
pci_swiotlb_init();
|
||||
#endif
|
||||
}
|
||||
extern struct dma_map_ops intel_dma_ops;
|
||||
|
||||
static int __init pci_iommu_init(void)
|
||||
{
|
||||
@@ -79,15 +65,12 @@ iommu_dma_init(void)
|
||||
return;
|
||||
}
|
||||
|
||||
struct dma_mapping_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
int iommu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
|
||||
if (ops->dma_supported_op)
|
||||
return ops->dma_supported_op(dev, mask);
|
||||
if (ops->dma_supported)
|
||||
return ops->dma_supported(dev, mask);
|
||||
|
||||
/* Copied from i386. Doesn't make much sense, because it will
|
||||
only work for pci_alloc_coherent.
|
||||
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask)
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_dma_supported);
|
||||
|
||||
void __init pci_iommu_alloc(void)
|
||||
{
|
||||
dma_ops = &intel_dma_ops;
|
||||
|
||||
dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
|
||||
dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
|
||||
dma_ops->sync_single_for_device = machvec_dma_sync_single;
|
||||
dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
|
||||
dma_ops->dma_supported = iommu_dma_supported;
|
||||
|
||||
/*
|
||||
* The order of these functions is important for
|
||||
* fall-back/fail-over reasons
|
||||
*/
|
||||
detect_intel_iommu();
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
pci_swiotlb_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -13,23 +13,37 @@
|
||||
int swiotlb __read_mostly;
|
||||
EXPORT_SYMBOL(swiotlb);
|
||||
|
||||
struct dma_mapping_ops swiotlb_dma_ops = {
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
.alloc_coherent = swiotlb_alloc_coherent,
|
||||
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
if (dev->coherent_dma_mask != DMA_64BIT_MASK)
|
||||
gfp |= GFP_DMA;
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
}
|
||||
|
||||
struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc_coherent = ia64_swiotlb_alloc_coherent,
|
||||
.free_coherent = swiotlb_free_coherent,
|
||||
.map_single = swiotlb_map_single,
|
||||
.unmap_single = swiotlb_unmap_single,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
|
||||
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.map_sg = swiotlb_map_sg,
|
||||
.unmap_sg = swiotlb_unmap_sg,
|
||||
.dma_supported_op = swiotlb_dma_supported,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
|
||||
void __init swiotlb_dma_init(void)
|
||||
{
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
swiotlb_init();
|
||||
}
|
||||
|
||||
void __init pci_swiotlb_init(void)
|
||||
{
|
||||
if (!iommu_detected) {
|
||||
|
||||
+50
-39
@@ -10,7 +10,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-attrs.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/sn/intr.h>
|
||||
#include <asm/sn/pcibus_provider_defs.h>
|
||||
@@ -31,7 +31,7 @@
|
||||
* this function. Of course, SN only supports devices that have 32 or more
|
||||
* address bits when using the PMU.
|
||||
*/
|
||||
int sn_dma_supported(struct device *dev, u64 mask)
|
||||
static int sn_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_supported);
|
||||
|
||||
/**
|
||||
* sn_dma_set_mask - set the DMA mask
|
||||
@@ -75,7 +74,7 @@ EXPORT_SYMBOL(sn_dma_set_mask);
|
||||
* queue for a SCSI controller). See Documentation/DMA-API.txt for
|
||||
* more information.
|
||||
*/
|
||||
void *sn_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle, gfp_t flags)
|
||||
{
|
||||
void *cpuaddr;
|
||||
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
|
||||
return cpuaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_alloc_coherent);
|
||||
|
||||
/**
|
||||
* sn_pci_free_coherent - free memory associated with coherent DMAable region
|
||||
@@ -136,7 +134,7 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
|
||||
* Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
|
||||
* any associated IOMMU mappings.
|
||||
*/
|
||||
void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
provider->dma_unmap(pdev, dma_handle, 0);
|
||||
free_pages((unsigned long)cpu_addr, get_order(size));
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_free_coherent);
|
||||
|
||||
/**
|
||||
* sn_dma_map_single_attrs - map a single page for DMA
|
||||
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
|
||||
* TODO: simplify our interface;
|
||||
* figure out how to save dmamap handle so can use two step.
|
||||
*/
|
||||
dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
|
||||
size_t size, int direction,
|
||||
static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *cpu_addr = page_address(page) + offset;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long phys_addr;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
|
||||
}
|
||||
return dma_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_map_single_attrs);
|
||||
|
||||
/**
|
||||
* sn_dma_unmap_single_attrs - unamp a DMA mapped page
|
||||
@@ -215,8 +213,8 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
|
||||
* by @dma_handle into the coherence domain. On SN, we're always cache
|
||||
* coherent, so we just need to free any ATEs associated with this mapping.
|
||||
*/
|
||||
void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction,
|
||||
static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
@@ -224,12 +222,11 @@ void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
|
||||
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
provider->dma_unmap(pdev, dma_addr, direction);
|
||||
provider->dma_unmap(pdev, dma_addr, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
|
||||
|
||||
/**
|
||||
* sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
|
||||
* sn_dma_unmap_sg - unmap a DMA scatterlist
|
||||
* @dev: device to unmap
|
||||
* @sg: scatterlist to unmap
|
||||
* @nhwentries: number of scatterlist entries
|
||||
@@ -238,8 +235,8 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
|
||||
*
|
||||
* Unmap a set of streaming mode DMA translations.
|
||||
*/
|
||||
void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
int nhwentries, int direction,
|
||||
static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nhwentries, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
int i;
|
||||
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
for_each_sg(sgl, sg, nhwentries, i) {
|
||||
provider->dma_unmap(pdev, sg->dma_address, direction);
|
||||
provider->dma_unmap(pdev, sg->dma_address, dir);
|
||||
sg->dma_address = (dma_addr_t) NULL;
|
||||
sg->dma_length = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
|
||||
|
||||
/**
|
||||
* sn_dma_map_sg_attrs - map a scatterlist for DMA
|
||||
* sn_dma_map_sg - map a scatterlist for DMA
|
||||
* @dev: device to map for
|
||||
* @sg: scatterlist to map
|
||||
* @nhwentries: number of entries
|
||||
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
|
||||
*
|
||||
* Maps each entry of @sg for DMA.
|
||||
*/
|
||||
int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
int nhwentries, int direction, struct dma_attrs *attrs)
|
||||
static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nhwentries, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long phys_addr;
|
||||
struct scatterlist *saved_sg = sgl, *sg;
|
||||
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
* Free any successfully allocated entries.
|
||||
*/
|
||||
if (i > 0)
|
||||
sn_dma_unmap_sg_attrs(dev, saved_sg, i,
|
||||
direction, attrs);
|
||||
sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
|
||||
return nhwentries;
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_map_sg_attrs);
|
||||
|
||||
void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
|
||||
|
||||
void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_sync_single_for_device);
|
||||
|
||||
void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
|
||||
|
||||
void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
|
||||
|
||||
int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(sn_dma_mapping_error);
|
||||
|
||||
u64 sn_dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct dma_map_ops sn_dma_ops = {
|
||||
.alloc_coherent = sn_dma_alloc_coherent,
|
||||
.free_coherent = sn_dma_free_coherent,
|
||||
.map_page = sn_dma_map_page,
|
||||
.unmap_page = sn_dma_unmap_page,
|
||||
.map_sg = sn_dma_map_sg,
|
||||
.unmap_sg = sn_dma_unmap_sg,
|
||||
.sync_single_for_cpu = sn_dma_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
|
||||
.sync_single_for_device = sn_dma_sync_single_for_device,
|
||||
.sync_sg_for_device = sn_dma_sync_sg_for_device,
|
||||
.mapping_error = sn_dma_mapping_error,
|
||||
.dma_supported = sn_dma_supported,
|
||||
};
|
||||
|
||||
void sn_dma_init(void)
|
||||
{
|
||||
dma_ops = &sn_dma_ops;
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ config X86
|
||||
select HAVE_GENERIC_DMA_COHERENT if X86_32
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select USER_STACKTRACE_SUPPORT
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_BZIP2
|
||||
select HAVE_KERNEL_LZMA
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user