treewide: Move dma_ops from struct dev_archdata into struct device

Some but not all architectures provide set_dma_ops(). Move dma_ops
from struct dev_archdata into struct device such that it becomes
possible on all architectures to configure dma_ops per device.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: Russell King <linux@armlinux.org.uk>
Cc: x86@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Bart Van Assche
2017-01-20 13:04:02 -08:00
committed by Doug Ledford
parent 5299709d0a
commit 5657933dbb
36 changed files with 48 additions and 70 deletions

View File

@@ -7,7 +7,6 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif

View File

@@ -18,8 +18,8 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
if (dev && dev->dma_ops)
return dev->dma_ops;
return &arm_dma_ops;
}
@@ -34,7 +34,7 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
BUG_ON(!dev);
dev->archdata.dma_ops = ops;
dev->dma_ops = ops;
}
#define HAVE_ARCH_DMA_SUPPORTED 1

View File

@@ -17,7 +17,6 @@
#define __ASM_DEVICE_H
struct dev_archdata {
const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif

View File

@@ -29,8 +29,8 @@ extern const struct dma_map_ops dummy_dma_ops;
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
if (dev && dev->dma_ops)
return dev->dma_ops;
/*
* We expect no ISA devices, and all other DMA masters are expected to

View File

@@ -837,7 +837,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
return false;
}
dev->archdata.dma_ops = &iommu_dma_ops;
dev->dma_ops = &iommu_dma_ops;
return true;
}
@@ -941,7 +941,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_teardown_dma_ops(struct device *dev)
{
dev->archdata.dma_ops = NULL;
dev->dma_ops = NULL;
}
#else
@@ -955,8 +955,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = &swiotlb_dma_ops;
if (!dev->dma_ops)
dev->dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);

View File

@@ -4,7 +4,6 @@
* This file is released under the GPLv2
*/
struct dev_archdata {
const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {

View File

@@ -12,8 +12,8 @@
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
if (dev && dev->dma_ops)
return dev->dma_ops;
return &dma_noop_ops;
}

View File

@@ -6,12 +6,7 @@
#ifndef _ASM_MIPS_DEVICE_H
#define _ASM_MIPS_DEVICE_H
struct dma_map_ops;
struct dev_archdata {
/* DMA operations on that device */
const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMA_PERDEV_COHERENT
/* Non-zero if DMA is coherent with CPU caches */
bool dma_coherent;

View File

@@ -13,8 +13,8 @@ extern const struct dma_map_ops *mips_dma_map_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
if (dev && dev->dma_ops)
return dev->dma_ops;
else
return mips_dma_map_ops;
}

View File

@@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
dev->dev.dma_ops = octeon_pci_dma_map_ops;
return 0;
}

View File

@@ -6,7 +6,6 @@
#ifndef _ASM_POWERPC_DEVICE_H
#define _ASM_POWERPC_DEVICE_H
struct dma_map_ops;
struct device_node;
#ifdef CONFIG_PPC64
struct pci_dn;
@@ -20,9 +19,6 @@ struct iommu_table;
* drivers/macintosh/macio_asic.c
*/
struct dev_archdata {
/* DMA operations on that device */
const struct dma_map_ops *dma_ops;
/*
* These two used to be a union. However, with the hybrid ops we need
* both so here we store both a DMA offset for direct mappings and

View File

@@ -88,12 +88,12 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
if (unlikely(dev == NULL))
return NULL;
return dev->archdata.dma_ops;
return dev->dma_ops;
}
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
dev->dma_ops = ops;
}
/*

View File

@@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB
if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif

View File

@@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
return 0;
/* We use the PCI DMA ops */
dev->archdata.dma_ops = get_pci_dma_ops();
dev->dma_ops = get_pci_dma_ops();
cell_dma_dev_setup(dev);

View File

@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops;
dev->dev.dma_ops = &dma_direct_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily

View File

@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
return 0;
/* We use the direct ops for localbus */
dev->archdata.dma_ops = &dma_direct_ops;
dev->dma_ops = &dma_direct_ops;
return 0;
}

View File

@@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;

View File

@@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn)
return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type;
dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
dev->dev.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev);
if (ret)

View File

@@ -4,7 +4,6 @@
* This file is released under the GPLv2
*/
struct dev_archdata {
const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {

View File

@@ -14,8 +14,8 @@ extern const struct dma_map_ops s390_pci_dma_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
if (dev && dev->dma_ops)
return dev->dma_ops;
return &dma_noop_ops;
}

Some files were not shown because too many files have changed in this diff Show More