You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (24 commits)
I/OAT: I/OAT version 3.0 support
I/OAT: tcp_dma_copybreak default value dependent on I/OAT version
I/OAT: Add watchdog/reset functionality to ioatdma
iop_adma: cleanup iop_chan_xor_slot_count
iop_adma: document how to calculate the minimum descriptor pool size
iop_adma: directly reclaim descriptors on allocation failure
async_tx: make async_tx_test_ack a boolean routine
async_tx: remove depend_tx from async_tx_sync_epilog
async_tx: export async_tx_quiesce
async_tx: fix handling of the "out of descriptor" condition in async_xor
async_tx: ensure the xor destination buffer remains dma-mapped
async_tx: list_for_each_entry_rcu() cleanup
dmaengine: Driver for the Synopsys DesignWare DMA controller
dmaengine: Add slave DMA interface
dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap
dmaengine: Add dma_client parameter to device_alloc_chan_resources
dmatest: Simple DMA memcpy test client
dmaengine: DMA engine driver for Marvell XOR engine
iop-adma: fix platform driver hotplug/coldplug
dmaengine: track the number of clients using a channel
...
Fixed up conflict in drivers/dca/dca-sysfs.c manually
This commit is contained in:
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dw_dmac.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
@@ -594,6 +595,17 @@ static void __init genclk_init_parent(struct clk *clk)
|
||||
clk->parent = parent;
|
||||
}
|
||||
|
||||
static struct dw_dma_platform_data dw_dmac0_data = {
|
||||
.nr_channels = 3,
|
||||
};
|
||||
|
||||
static struct resource dw_dmac0_resource[] = {
|
||||
PBMEM(0xff200000),
|
||||
IRQ(2),
|
||||
};
|
||||
DEFINE_DEV_DATA(dw_dmac, 0);
|
||||
DEV_CLK(hclk, dw_dmac0, hsb, 10);
|
||||
|
||||
/* --------------------------------------------------------------------
|
||||
* System peripherals
|
||||
* -------------------------------------------------------------------- */
|
||||
@@ -708,17 +720,6 @@ static struct clk pico_clk = {
|
||||
.users = 1,
|
||||
};
|
||||
|
||||
static struct resource dmaca0_resource[] = {
|
||||
{
|
||||
.start = 0xff200000,
|
||||
.end = 0xff20ffff,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
IRQ(2),
|
||||
};
|
||||
DEFINE_DEV(dmaca, 0);
|
||||
DEV_CLK(hclk, dmaca0, hsb, 10);
|
||||
|
||||
/* --------------------------------------------------------------------
|
||||
* HMATRIX
|
||||
* -------------------------------------------------------------------- */
|
||||
@@ -831,7 +832,7 @@ void __init at32_add_system_devices(void)
|
||||
platform_device_register(&at32_eic0_device);
|
||||
platform_device_register(&smc0_device);
|
||||
platform_device_register(&pdc_device);
|
||||
platform_device_register(&dmaca0_device);
|
||||
platform_device_register(&dw_dmac0_device);
|
||||
|
||||
platform_device_register(&at32_tcb0_device);
|
||||
platform_device_register(&at32_tcb1_device);
|
||||
@@ -2032,7 +2033,7 @@ struct clk *at32_clock_list[] = {
|
||||
&smc0_mck,
|
||||
&pdc_hclk,
|
||||
&pdc_pclk,
|
||||
&dmaca0_hclk,
|
||||
&dw_dmac0_hclk,
|
||||
&pico_clk,
|
||||
&pio0_mck,
|
||||
&pio1_mck,
|
||||
|
||||
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
|
||||
pr_debug("%s: (sync) len: %zu\n", __func__, len);
|
||||
|
||||
/* wait for any prerequisite operations */
|
||||
if (depend_tx) {
|
||||
/* if ack is already set then we cannot be sure
|
||||
* we are referring to the correct operation
|
||||
*/
|
||||
BUG_ON(async_tx_test_ack(depend_tx));
|
||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
||||
__func__);
|
||||
}
|
||||
async_tx_quiesce(&depend_tx);
|
||||
|
||||
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
|
||||
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
|
||||
@@ -91,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
|
||||
kunmap_atomic(dest_buf, KM_USER0);
|
||||
kunmap_atomic(src_buf, KM_USER1);
|
||||
|
||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||
async_tx_sync_epilog(cb_fn, cb_param);
|
||||
}
|
||||
|
||||
return tx;
|
||||
|
||||
@@ -72,19 +72,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
|
||||
dest_buf = (void *) (((char *) page_address(dest)) + offset);
|
||||
|
||||
/* wait for any prerequisite operations */
|
||||
if (depend_tx) {
|
||||
/* if ack is already set then we cannot be sure
|
||||
* we are referring to the correct operation
|
||||
*/
|
||||
BUG_ON(depend_tx->ack);
|
||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
||||
__func__);
|
||||
}
|
||||
async_tx_quiesce(&depend_tx);
|
||||
|
||||
memset(dest_buf, val, len);
|
||||
|
||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||
async_tx_sync_epilog(cb_fn, cb_param);
|
||||
}
|
||||
|
||||
return tx;
|
||||
|
||||
+22
-11
@@ -295,7 +295,7 @@ dma_channel_add_remove(struct dma_client *client,
|
||||
case DMA_RESOURCE_REMOVED:
|
||||
found = 0;
|
||||
spin_lock_irqsave(&async_tx_lock, flags);
|
||||
list_for_each_entry_rcu(ref, &async_tx_master_list, node)
|
||||
list_for_each_entry(ref, &async_tx_master_list, node)
|
||||
if (ref->chan == chan) {
|
||||
/* permit backing devices to go away */
|
||||
dma_chan_put(ref->chan);
|
||||
@@ -608,23 +608,34 @@ async_trigger_callback(enum async_tx_flags flags,
|
||||
pr_debug("%s: (sync)\n", __func__);
|
||||
|
||||
/* wait for any prerequisite operations */
|
||||
if (depend_tx) {
|
||||
/* if ack is already set then we cannot be sure
|
||||
* we are referring to the correct operation
|
||||
*/
|
||||
BUG_ON(async_tx_test_ack(depend_tx));
|
||||
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
|
||||
panic("%s: DMA_ERROR waiting for depend_tx\n",
|
||||
__func__);
|
||||
}
|
||||
async_tx_quiesce(&depend_tx);
|
||||
|
||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||
async_tx_sync_epilog(cb_fn, cb_param);
|
||||
}
|
||||
|
||||
return tx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_trigger_callback);
|
||||
|
||||
/**
|
||||
* async_tx_quiesce - ensure tx is complete and freeable upon return
|
||||
* @tx - transaction to quiesce
|
||||
*/
|
||||
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
|
||||
{
|
||||
if (*tx) {
|
||||
/* if ack is already set then we cannot be sure
|
||||
* we are referring to the correct operation
|
||||
*/
|
||||
BUG_ON(async_tx_test_ack(*tx));
|
||||
if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
|
||||
panic("DMA_ERROR waiting for transaction\n");
|
||||
async_tx_ack(*tx);
|
||||
*tx = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_tx_quiesce);
|
||||
|
||||
module_init(async_tx_init);
|
||||
module_exit(async_tx_exit);
|
||||
|
||||
|
||||
+113
-145
@@ -35,74 +35,121 @@
|
||||
* when CONFIG_DMA_ENGINE=n
|
||||
*/
|
||||
static __always_inline struct dma_async_tx_descriptor *
|
||||
do_async_xor(struct dma_device *device,
|
||||
struct dma_chan *chan, struct page *dest, struct page **src_list,
|
||||
unsigned int offset, unsigned int src_cnt, size_t len,
|
||||
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
|
||||
dma_async_tx_callback cb_fn, void *cb_param)
|
||||
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
|
||||
unsigned int offset, int src_cnt, size_t len,
|
||||
enum async_tx_flags flags,
|
||||
struct dma_async_tx_descriptor *depend_tx,
|
||||
dma_async_tx_callback cb_fn, void *cb_param)
|
||||
{
|
||||
dma_addr_t dma_dest;
|
||||
struct dma_device *dma = chan->device;
|
||||
dma_addr_t *dma_src = (dma_addr_t *) src_list;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
int src_off = 0;
|
||||
int i;
|
||||
unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
|
||||
|
||||
pr_debug("%s: len: %zu\n", __func__, len);
|
||||
|
||||
dma_dest = dma_map_page(device->dev, dest, offset, len,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_async_tx_callback _cb_fn;
|
||||
void *_cb_param;
|
||||
enum async_tx_flags async_flags;
|
||||
enum dma_ctrl_flags dma_flags;
|
||||
int xor_src_cnt;
|
||||
dma_addr_t dma_dest;
|
||||
|
||||
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE);
|
||||
for (i = 0; i < src_cnt; i++)
|
||||
dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
|
||||
dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
|
||||
len, DMA_TO_DEVICE);
|
||||
|
||||
/* Since we have clobbered the src_list we are committed
|
||||
* to doing this asynchronously. Drivers force forward progress
|
||||
* in case they can not provide a descriptor
|
||||
*/
|
||||
tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
|
||||
dma_prep_flags);
|
||||
if (!tx) {
|
||||
if (depend_tx)
|
||||
dma_wait_for_async_tx(depend_tx);
|
||||
while (src_cnt) {
|
||||
async_flags = flags;
|
||||
dma_flags = 0;
|
||||
xor_src_cnt = min(src_cnt, dma->max_xor);
|
||||
/* if we are submitting additional xors, leave the chain open,
|
||||
* clear the callback parameters, and leave the destination
|
||||
* buffer mapped
|
||||
*/
|
||||
if (src_cnt > xor_src_cnt) {
|
||||
async_flags &= ~ASYNC_TX_ACK;
|
||||
dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
|
||||
_cb_fn = NULL;
|
||||
_cb_param = NULL;
|
||||
} else {
|
||||
_cb_fn = cb_fn;
|
||||
_cb_param = cb_param;
|
||||
}
|
||||
if (_cb_fn)
|
||||
dma_flags |= DMA_PREP_INTERRUPT;
|
||||
|
||||
while (!tx)
|
||||
tx = device->device_prep_dma_xor(chan, dma_dest,
|
||||
dma_src, src_cnt, len,
|
||||
dma_prep_flags);
|
||||
/* Since we have clobbered the src_list we are committed
|
||||
* to doing this asynchronously. Drivers force forward progress
|
||||
* in case they can not provide a descriptor
|
||||
*/
|
||||
tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
|
||||
xor_src_cnt, len, dma_flags);
|
||||
|
||||
if (unlikely(!tx))
|
||||
async_tx_quiesce(&depend_tx);
|
||||
|
||||
/* spin wait for the preceeding transactions to complete */
|
||||
while (unlikely(!tx)) {
|
||||
dma_async_issue_pending(chan);
|
||||
tx = dma->device_prep_dma_xor(chan, dma_dest,
|
||||
&dma_src[src_off],
|
||||
xor_src_cnt, len,
|
||||
dma_flags);
|
||||
}
|
||||
|
||||
async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
|
||||
_cb_param);
|
||||
|
||||
depend_tx = tx;
|
||||
flags |= ASYNC_TX_DEP_ACK;
|
||||
|
||||
if (src_cnt > xor_src_cnt) {
|
||||
/* drop completed sources */
|
||||
src_cnt -= xor_src_cnt;
|
||||
src_off += xor_src_cnt;
|
||||
|
||||
/* use the intermediate result a source */
|
||||
dma_src[--src_off] = dma_dest;
|
||||
src_cnt++;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
|
||||
|
||||
return tx;
|
||||
}
|
||||
|
||||
static void
|
||||
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
||||
unsigned int src_cnt, size_t len, enum async_tx_flags flags,
|
||||
struct dma_async_tx_descriptor *depend_tx,
|
||||
dma_async_tx_callback cb_fn, void *cb_param)
|
||||
int src_cnt, size_t len, enum async_tx_flags flags,
|
||||
dma_async_tx_callback cb_fn, void *cb_param)
|
||||
{
|
||||
void *_dest;
|
||||
int i;
|
||||
|
||||
pr_debug("%s: len: %zu\n", __func__, len);
|
||||
int xor_src_cnt;
|
||||
int src_off = 0;
|
||||
void *dest_buf;
|
||||
void **srcs = (void **) src_list;
|
||||
|
||||
/* reuse the 'src_list' array to convert to buffer pointers */
|
||||
for (i = 0; i < src_cnt; i++)
|
||||
src_list[i] = (struct page *)
|
||||
(page_address(src_list[i]) + offset);
|
||||
srcs[i] = page_address(src_list[i]) + offset;
|
||||
|
||||
/* set destination address */
|
||||
_dest = page_address(dest) + offset;
|
||||
dest_buf = page_address(dest) + offset;
|
||||
|
||||
if (flags & ASYNC_TX_XOR_ZERO_DST)
|
||||
memset(_dest, 0, len);
|
||||
memset(dest_buf, 0, len);
|
||||
|
||||
xor_blocks(src_cnt, len, _dest,
|
||||
(void **) src_list);
|
||||
while (src_cnt > 0) {
|
||||
/* process up to 'MAX_XOR_BLOCKS' sources */
|
||||
xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
|
||||
xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
|
||||
|
||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||
/* drop completed sources */
|
||||
src_cnt -= xor_src_cnt;
|
||||
src_off += xor_src_cnt;
|
||||
}
|
||||
|
||||
async_tx_sync_epilog(cb_fn, cb_param);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -132,106 +179,34 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
||||
struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
|
||||
&dest, 1, src_list,
|
||||
src_cnt, len);
|
||||
struct dma_device *device = chan ? chan->device : NULL;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
dma_async_tx_callback _cb_fn;
|
||||
void *_cb_param;
|
||||
unsigned long local_flags;
|
||||
int xor_src_cnt;
|
||||
int i = 0, src_off = 0;
|
||||
|
||||
BUG_ON(src_cnt <= 1);
|
||||
|
||||
while (src_cnt) {
|
||||
local_flags = flags;
|
||||
if (device) { /* run the xor asynchronously */
|
||||
xor_src_cnt = min(src_cnt, device->max_xor);
|
||||
/* if we are submitting additional xors
|
||||
* only set the callback on the last transaction
|
||||
*/
|
||||
if (src_cnt > xor_src_cnt) {
|
||||
local_flags &= ~ASYNC_TX_ACK;
|
||||
_cb_fn = NULL;
|
||||
_cb_param = NULL;
|
||||
} else {
|
||||
_cb_fn = cb_fn;
|
||||
_cb_param = cb_param;
|
||||
}
|
||||
if (chan) {
|
||||
/* run the xor asynchronously */
|
||||
pr_debug("%s (async): len: %zu\n", __func__, len);
|
||||
|
||||
tx = do_async_xor(device, chan, dest,
|
||||
&src_list[src_off], offset,
|
||||
xor_src_cnt, len, local_flags,
|
||||
depend_tx, _cb_fn, _cb_param);
|
||||
} else { /* run the xor synchronously */
|
||||
/* in the sync case the dest is an implied source
|
||||
* (assumes the dest is at the src_off index)
|
||||
*/
|
||||
if (flags & ASYNC_TX_XOR_DROP_DST) {
|
||||
src_cnt--;
|
||||
src_off++;
|
||||
}
|
||||
return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
|
||||
flags, depend_tx, cb_fn, cb_param);
|
||||
} else {
|
||||
/* run the xor synchronously */
|
||||
pr_debug("%s (sync): len: %zu\n", __func__, len);
|
||||
|
||||
/* process up to 'MAX_XOR_BLOCKS' sources */
|
||||
xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
|
||||
|
||||
/* if we are submitting additional xors
|
||||
* only set the callback on the last transaction
|
||||
*/
|
||||
if (src_cnt > xor_src_cnt) {
|
||||
local_flags &= ~ASYNC_TX_ACK;
|
||||
_cb_fn = NULL;
|
||||
_cb_param = NULL;
|
||||
} else {
|
||||
_cb_fn = cb_fn;
|
||||
_cb_param = cb_param;
|
||||
}
|
||||
|
||||
/* wait for any prerequisite operations */
|
||||
if (depend_tx) {
|
||||
/* if ack is already set then we cannot be sure
|
||||
* we are referring to the correct operation
|
||||
*/
|
||||
BUG_ON(async_tx_test_ack(depend_tx));
|
||||
if (dma_wait_for_async_tx(depend_tx) ==
|
||||
DMA_ERROR)
|
||||
panic("%s: DMA_ERROR waiting for "
|
||||
"depend_tx\n",
|
||||
__func__);
|
||||
}
|
||||
|
||||
do_sync_xor(dest, &src_list[src_off], offset,
|
||||
xor_src_cnt, len, local_flags, depend_tx,
|
||||
_cb_fn, _cb_param);
|
||||
/* in the sync case the dest is an implied source
|
||||
* (assumes the dest is the first source)
|
||||
*/
|
||||
if (flags & ASYNC_TX_XOR_DROP_DST) {
|
||||
src_cnt--;
|
||||
src_list++;
|
||||
}
|
||||
|
||||
/* the previous tx is hidden from the client,
|
||||
* so ack it
|
||||
*/
|
||||
if (i && depend_tx)
|
||||
async_tx_ack(depend_tx);
|
||||
/* wait for any prerequisite operations */
|
||||
async_tx_quiesce(&depend_tx);
|
||||
|
||||
depend_tx = tx;
|
||||
do_sync_xor(dest, src_list, offset, src_cnt, len,
|
||||
flags, cb_fn, cb_param);
|
||||
|
||||
if (src_cnt > xor_src_cnt) {
|
||||
/* drop completed sources */
|
||||
src_cnt -= xor_src_cnt;
|
||||
src_off += xor_src_cnt;
|
||||
|
||||
/* unconditionally preserve the destination */
|
||||
flags &= ~ASYNC_TX_XOR_ZERO_DST;
|
||||
|
||||
/* use the intermediate result a source, but remember
|
||||
* it's dropped, because it's implied, in the sync case
|
||||
*/
|
||||
src_list[--src_off] = dest;
|
||||
src_cnt++;
|
||||
flags |= ASYNC_TX_XOR_DROP_DST;
|
||||
} else
|
||||
src_cnt = 0;
|
||||
i++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return tx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_xor);
|
||||
|
||||
@@ -285,11 +260,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
|
||||
tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
|
||||
len, result,
|
||||
dma_prep_flags);
|
||||
if (!tx) {
|
||||
if (depend_tx)
|
||||
dma_wait_for_async_tx(depend_tx);
|
||||
if (unlikely(!tx)) {
|
||||
async_tx_quiesce(&depend_tx);
|
||||
|
||||
while (!tx)
|
||||
dma_async_issue_pending(chan);
|
||||
tx = device->device_prep_dma_zero_sum(chan,
|
||||
dma_src, src_cnt, len, result,
|
||||
dma_prep_flags);
|
||||
@@ -307,18 +282,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
|
||||
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
|
||||
depend_tx, NULL, NULL);
|
||||
|
||||
if (tx) {
|
||||
if (dma_wait_for_async_tx(tx) == DMA_ERROR)
|
||||
panic("%s: DMA_ERROR waiting for tx\n",
|
||||
__func__);
|
||||
async_tx_ack(tx);
|
||||
}
|
||||
async_tx_quiesce(&tx);
|
||||
|
||||
*result = page_is_zero(dest, offset, len) ? 0 : 1;
|
||||
|
||||
tx = NULL;
|
||||
|
||||
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
|
||||
async_tx_sync_epilog(cb_fn, cb_param);
|
||||
}
|
||||
|
||||
return tx;
|
||||
|
||||
+104
-29
@@ -28,13 +28,29 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/dca.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
#define DCA_VERSION "1.4"
|
||||
|
||||
/* For now we're assuming a single, global, DCA provider for the system. */
|
||||
MODULE_VERSION(DCA_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
|
||||
static DEFINE_SPINLOCK(dca_lock);
|
||||
|
||||
static struct dca_provider *global_dca = NULL;
|
||||
static LIST_HEAD(dca_providers);
|
||||
|
||||
static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
|
||||
{
|
||||
struct dca_provider *dca, *ret = NULL;
|
||||
|
||||
list_for_each_entry(dca, &dca_providers, node) {
|
||||
if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
|
||||
ret = dca;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* dca_add_requester - add a dca client to the list
|
||||
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
|
||||
*/
|
||||
int dca_add_requester(struct device *dev)
|
||||
{
|
||||
int err, slot;
|
||||
struct dca_provider *dca;
|
||||
int err, slot = -ENODEV;
|
||||
|
||||
if (!global_dca)
|
||||
return -ENODEV;
|
||||
if (!dev)
|
||||
return -EFAULT;
|
||||
|
||||
spin_lock(&dca_lock);
|
||||
slot = global_dca->ops->add_requester(global_dca, dev);
|
||||
spin_unlock(&dca_lock);
|
||||
if (slot < 0)
|
||||
return slot;
|
||||
|
||||
err = dca_sysfs_add_req(global_dca, dev, slot);
|
||||
/* check if the requester has not been added already */
|
||||
dca = dca_find_provider_by_dev(dev);
|
||||
if (dca) {
|
||||
spin_unlock(&dca_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
list_for_each_entry(dca, &dca_providers, node) {
|
||||
slot = dca->ops->add_requester(dca, dev);
|
||||
if (slot >= 0)
|
||||
break;
|
||||
}
|
||||
if (slot < 0) {
|
||||
spin_unlock(&dca_lock);
|
||||
return slot;
|
||||
}
|
||||
|
||||
err = dca_sysfs_add_req(dca, dev, slot);
|
||||
if (err) {
|
||||
spin_lock(&dca_lock);
|
||||
global_dca->ops->remove_requester(global_dca, dev);
|
||||
dca->ops->remove_requester(dca, dev);
|
||||
spin_unlock(&dca_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_unlock(&dca_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dca_add_requester);
|
||||
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
|
||||
*/
|
||||
int dca_remove_requester(struct device *dev)
|
||||
{
|
||||
struct dca_provider *dca;
|
||||
int slot;
|
||||
if (!global_dca)
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev)
|
||||
return -EFAULT;
|
||||
|
||||
spin_lock(&dca_lock);
|
||||
slot = global_dca->ops->remove_requester(global_dca, dev);
|
||||
spin_unlock(&dca_lock);
|
||||
if (slot < 0)
|
||||
dca = dca_find_provider_by_dev(dev);
|
||||
if (!dca) {
|
||||
spin_unlock(&dca_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
slot = dca->ops->remove_requester(dca, dev);
|
||||
if (slot < 0) {
|
||||
spin_unlock(&dca_lock);
|
||||
return slot;
|
||||
}
|
||||
|
||||
dca_sysfs_remove_req(global_dca, slot);
|
||||
dca_sysfs_remove_req(dca, slot);
|
||||
|
||||
spin_unlock(&dca_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dca_remove_requester);
|
||||
|
||||
/**
|
||||
* dca_get_tag - return the dca tag for the given cpu
|
||||
* dca_common_get_tag - return the dca tag (serves both new and old api)
|
||||
* @dev - the device that wants dca service
|
||||
* @cpu - the cpuid as returned by get_cpu()
|
||||
*/
|
||||
u8 dca_common_get_tag(struct device *dev, int cpu)
|
||||
{
|
||||
struct dca_provider *dca;
|
||||
u8 tag;
|
||||
|
||||
spin_lock(&dca_lock);
|
||||
|
||||
dca = dca_find_provider_by_dev(dev);
|
||||
if (!dca) {
|
||||
spin_unlock(&dca_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
tag = dca->ops->get_tag(dca, dev, cpu);
|
||||
|
||||
spin_unlock(&dca_lock);
|
||||
return tag;
|
||||
}
|
||||
|
||||
/**
|
||||
* dca3_get_tag - return the dca tag to the requester device
|
||||
* for the given cpu (new api)
|
||||
* @dev - the device that wants dca service
|
||||
* @cpu - the cpuid as returned by get_cpu()
|
||||
*/
|
||||
u8 dca3_get_tag(struct device *dev, int cpu)
|
||||
{
|
||||
if (!dev)
|
||||
return -EFAULT;
|
||||
|
||||
return dca_common_get_tag(dev, cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dca3_get_tag);
|
||||
|
||||
/**
|
||||
* dca_get_tag - return the dca tag for the given cpu (old api)
|
||||
* @cpu - the cpuid as returned by get_cpu()
|
||||
*/
|
||||
u8 dca_get_tag(int cpu)
|
||||
{
|
||||
if (!global_dca)
|
||||
return -ENODEV;
|
||||
return global_dca->ops->get_tag(global_dca, cpu);
|
||||
struct device *dev = NULL;
|
||||
|
||||
return dca_common_get_tag(dev, cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dca_get_tag);
|
||||
|
||||
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (global_dca)
|
||||
return -EEXIST;
|
||||
err = dca_sysfs_add_provider(dca, dev);
|
||||
if (err)
|
||||
return err;
|
||||
global_dca = dca;
|
||||
list_add(&dca->node, &dca_providers);
|
||||
blocking_notifier_call_chain(&dca_provider_chain,
|
||||
DCA_PROVIDER_ADD, NULL);
|
||||
return 0;
|
||||
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
|
||||
*/
|
||||
void unregister_dca_provider(struct dca_provider *dca)
|
||||
{
|
||||
if (!global_dca)
|
||||
return;
|
||||
blocking_notifier_call_chain(&dca_provider_chain,
|
||||
DCA_PROVIDER_REMOVE, NULL);
|
||||
global_dca = NULL;
|
||||
list_del(&dca->node);
|
||||
dca_sysfs_remove_provider(dca);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_dca_provider);
|
||||
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
|
||||
|
||||
static int __init dca_init(void)
|
||||
{
|
||||
printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
|
||||
return dca_sysfs_init();
|
||||
}
|
||||
|
||||
|
||||
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
|
||||
int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
|
||||
{
|
||||
struct device *cd;
|
||||
static int req_count;
|
||||
|
||||
cd = device_create_drvdata(dca_class, dca->cd,
|
||||
MKDEV(0, slot + 1), NULL,
|
||||
"requester%d", slot);
|
||||
"requester%d", req_count++);
|
||||
if (IS_ERR(cd))
|
||||
return PTR_ERR(cd);
|
||||
return 0;
|
||||
|
||||
+32
-5
@@ -4,13 +4,14 @@
|
||||
|
||||
menuconfig DMADEVICES
|
||||
bool "DMA Engine support"
|
||||
depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
|
||||
depends on !HIGHMEM64G
|
||||
depends on !HIGHMEM64G && HAS_DMA
|
||||
help
|
||||
DMA engines can do asynchronous data transfers without
|
||||
involving the host CPU. Currently, this framework can be
|
||||
used to offload memory copies in the network stack and
|
||||
RAID operations in the MD driver.
|
||||
RAID operations in the MD driver. This menu only presents
|
||||
DMA Device drivers supported by the configured arch, it may
|
||||
be empty in some cases.
|
||||
|
||||
if DMADEVICES
|
||||
|
||||
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
|
||||
help
|
||||
Enable support for the Intel(R) IOP Series RAID engines.
|
||||
|
||||
config DW_DMAC
|
||||
tristate "Synopsys DesignWare AHB DMA support"
|
||||
depends on AVR32
|
||||
select DMA_ENGINE
|
||||
default y if CPU_AT32AP7000
|
||||
help
|
||||
Support the Synopsys DesignWare AHB DMA controller. This
|
||||
can be integrated in chips such as the Atmel AT32ap7000.
|
||||
|
||||
config FSL_DMA
|
||||
bool "Freescale MPC85xx/MPC83xx DMA support"
|
||||
depends on PPC
|
||||
@@ -46,6 +56,14 @@ config FSL_DMA
|
||||
MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
|
||||
The MPC8349, MPC8360 is also supported.
|
||||
|
||||
config MV_XOR
|
||||
bool "Marvell XOR engine support"
|
||||
depends on PLAT_ORION
|
||||
select ASYNC_CORE
|
||||
select DMA_ENGINE
|
||||
---help---
|
||||
Enable support for the Marvell XOR engine.
|
||||
|
||||
config DMA_ENGINE
|
||||
bool
|
||||
|
||||
@@ -55,10 +73,19 @@ comment "DMA Clients"
|
||||
config NET_DMA
|
||||
bool "Network: TCP receive copy offload"
|
||||
depends on DMA_ENGINE && NET
|
||||
default (INTEL_IOATDMA || FSL_DMA)
|
||||
help
|
||||
This enables the use of DMA engines in the network stack to
|
||||
offload receive copy-to-user operations, freeing CPU cycles.
|
||||
Since this is the main user of the DMA engine, it should be enabled;
|
||||
say Y here.
|
||||
|
||||
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
|
||||
say N.
|
||||
|
||||
config DMATEST
|
||||
tristate "DMA Test client"
|
||||
depends on DMA_ENGINE
|
||||
help
|
||||
Simple DMA test client. Say N unless you're debugging a
|
||||
DMA Device driver.
|
||||
|
||||
endif
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
|
||||
obj-$(CONFIG_NET_DMA) += iovlock.o
|
||||
obj-$(CONFIG_DMATEST) += dmatest.o
|
||||
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
|
||||
ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
|
||||
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
|
||||
obj-$(CONFIG_FSL_DMA) += fsldma.o
|
||||
obj-$(CONFIG_MV_XOR) += mv_xor.o
|
||||
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
|
||||
|
||||
+28
-7
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
|
||||
enum dma_state_client ack;
|
||||
|
||||
/* Find a channel */
|
||||
list_for_each_entry(device, &dma_device_list, global_node)
|
||||
list_for_each_entry(device, &dma_device_list, global_node) {
|
||||
/* Does the client require a specific DMA controller? */
|
||||
if (client->slave && client->slave->dma_dev
|
||||
&& client->slave->dma_dev != device->dev)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(chan, &device->channels, device_node) {
|
||||
if (!dma_chan_satisfies_mask(chan, client->cap_mask))
|
||||
continue;
|
||||
|
||||
desc = chan->device->device_alloc_chan_resources(chan);
|
||||
desc = chan->device->device_alloc_chan_resources(
|
||||
chan, client);
|
||||
if (desc >= 0) {
|
||||
ack = client->event_callback(client,
|
||||
chan,
|
||||
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
|
||||
/* we are done once this client rejects
|
||||
* an available resource
|
||||
*/
|
||||
if (ack == DMA_ACK)
|
||||
if (ack == DMA_ACK) {
|
||||
dma_chan_get(chan);
|
||||
else if (ack == DMA_NAK)
|
||||
chan->client_count++;
|
||||
} else if (ack == DMA_NAK)
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
|
||||
/* client was holding resources for this channel so
|
||||
* free it
|
||||
*/
|
||||
if (ack == DMA_ACK)
|
||||
if (ack == DMA_ACK) {
|
||||
dma_chan_put(chan);
|
||||
chan->client_count--;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
|
||||
*/
|
||||
void dma_async_client_register(struct dma_client *client)
|
||||
{
|
||||
/* validate client data */
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
|
||||
!client->slave);
|
||||
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_add_tail(&client->global_node, &dma_client_list);
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
|
||||
ack = client->event_callback(client, chan,
|
||||
DMA_RESOURCE_REMOVED);
|
||||
|
||||
if (ack == DMA_ACK)
|
||||
if (ack == DMA_ACK) {
|
||||
dma_chan_put(chan);
|
||||
chan->client_count--;
|
||||
}
|
||||
}
|
||||
|
||||
list_del(&client->global_node);
|
||||
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
|
||||
!device->device_prep_dma_memset);
|
||||
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
|
||||
!device->device_prep_dma_interrupt);
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
|
||||
!device->device_prep_slave_sg);
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
|
||||
!device->device_terminate_all);
|
||||
|
||||
BUG_ON(!device->device_alloc_chan_resources);
|
||||
BUG_ON(!device->device_free_chan_resources);
|
||||
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
|
||||
|
||||
chan->chan_id = chancnt++;
|
||||
chan->dev.class = &dma_devclass;
|
||||
chan->dev.parent = NULL;
|
||||
chan->dev.parent = device->dev;
|
||||
snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
|
||||
device->dev_id, chan->chan_id);
|
||||
|
||||
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
|
||||
kref_get(&device->refcount);
|
||||
kref_get(&device->refcount);
|
||||
kref_init(&chan->refcount);
|
||||
chan->client_count = 0;
|
||||
chan->slow_ref = 0;
|
||||
INIT_RCU_HEAD(&chan->rcu);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,444 @@
|
||||
/*
|
||||
* DMA Engine test module
|
||||
*
|
||||
* Copyright (C) 2007 Atmel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
static unsigned int test_buf_size = 16384;
|
||||
module_param(test_buf_size, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
|
||||
|
||||
static char test_channel[BUS_ID_SIZE];
|
||||
module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
|
||||
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
|
||||
|
||||
static char test_device[BUS_ID_SIZE];
|
||||
module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
|
||||
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
|
||||
|
||||
static unsigned int threads_per_chan = 1;
|
||||
module_param(threads_per_chan, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(threads_per_chan,
|
||||
"Number of threads to start per channel (default: 1)");
|
||||
|
||||
static unsigned int max_channels;
|
||||
module_param(max_channels, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(nr_channels,
|
||||
"Maximum number of channels to use (default: all)");
|
||||
|
||||
/*
|
||||
* Initialization patterns. All bytes in the source buffer has bit 7
|
||||
* set, all bytes in the destination buffer has bit 7 cleared.
|
||||
*
|
||||
* Bit 6 is set for all bytes which are to be copied by the DMA
|
||||
* engine. Bit 5 is set for all bytes which are to be overwritten by
|
||||
* the DMA engine.
|
||||
*
|
||||
* The remaining bits are the inverse of a counter which increments by
|
||||
* one for each byte address.
|
||||
*/
|
||||
#define PATTERN_SRC 0x80
|
||||
#define PATTERN_DST 0x00
|
||||
#define PATTERN_COPY 0x40
|
||||
#define PATTERN_OVERWRITE 0x20
|
||||
#define PATTERN_COUNT_MASK 0x1f
|
||||
|
||||
struct dmatest_thread {
|
||||
struct list_head node;
|
||||
struct task_struct *task;
|
||||
struct dma_chan *chan;
|
||||
u8 *srcbuf;
|
||||
u8 *dstbuf;
|
||||
};
|
||||
|
||||
struct dmatest_chan {
|
||||
struct list_head node;
|
||||
struct dma_chan *chan;
|
||||
struct list_head threads;
|
||||
};
|
||||
|
||||
/*
|
||||
* These are protected by dma_list_mutex since they're only used by
|
||||
* the DMA client event callback
|
||||
*/
|
||||
static LIST_HEAD(dmatest_channels);
|
||||
static unsigned int nr_channels;
|
||||
|
||||
static bool dmatest_match_channel(struct dma_chan *chan)
|
||||
{
|
||||
if (test_channel[0] == '\0')
|
||||
return true;
|
||||
return strcmp(chan->dev.bus_id, test_channel) == 0;
|
||||
}
|
||||
|
||||
static bool dmatest_match_device(struct dma_device *device)
|
||||
{
|
||||
if (test_device[0] == '\0')
|
||||
return true;
|
||||
return strcmp(device->dev->bus_id, test_device) == 0;
|
||||
}
|
||||
|
||||
static unsigned long dmatest_random(void)
|
||||
{
|
||||
unsigned long buf;
|
||||
|
||||
get_random_bytes(&buf, sizeof(buf));
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < start; i++)
|
||||
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
||||
for ( ; i < start + len; i++)
|
||||
buf[i] = PATTERN_SRC | PATTERN_COPY
|
||||
| (~i & PATTERN_COUNT_MASK);;
|
||||
for ( ; i < test_buf_size; i++)
|
||||
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
||||
}
|
||||
|
||||
static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < start; i++)
|
||||
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
||||
for ( ; i < start + len; i++)
|
||||
buf[i] = PATTERN_DST | PATTERN_OVERWRITE
|
||||
| (~i & PATTERN_COUNT_MASK);
|
||||
for ( ; i < test_buf_size; i++)
|
||||
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
||||
}
|
||||
|
||||
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
||||
unsigned int counter, bool is_srcbuf)
|
||||
{
|
||||
u8 diff = actual ^ pattern;
|
||||
u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
|
||||
const char *thread_name = current->comm;
|
||||
|
||||
if (is_srcbuf)
|
||||
pr_warning("%s: srcbuf[0x%x] overwritten!"
|
||||
" Expected %02x, got %02x\n",
|
||||
thread_name, index, expected, actual);
|
||||
else if ((pattern & PATTERN_COPY)
|
||||
&& (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
|
||||
pr_warning("%s: dstbuf[0x%x] not copied!"
|
||||
" Expected %02x, got %02x\n",
|
||||
thread_name, index, expected, actual);
|
||||
else if (diff & PATTERN_SRC)
|
||||
pr_warning("%s: dstbuf[0x%x] was copied!"
|
||||
" Expected %02x, got %02x\n",
|
||||
thread_name, index, expected, actual);
|
||||
else
|
||||
pr_warning("%s: dstbuf[0x%x] mismatch!"
|
||||
" Expected %02x, got %02x\n",
|
||||
thread_name, index, expected, actual);
|
||||
}
|
||||
|
||||
static unsigned int dmatest_verify(u8 *buf, unsigned int start,
|
||||
unsigned int end, unsigned int counter, u8 pattern,
|
||||
bool is_srcbuf)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int error_count = 0;
|
||||
u8 actual;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
actual = buf[i];
|
||||
if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
|
||||
if (error_count < 32)
|
||||
dmatest_mismatch(actual, pattern, i, counter,
|
||||
is_srcbuf);
|
||||
error_count++;
|
||||
}
|
||||
counter++;
|
||||
}
|
||||
|
||||
if (error_count > 32)
|
||||
pr_warning("%s: %u errors suppressed\n",
|
||||
current->comm, error_count - 32);
|
||||
|
||||
return error_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function repeatedly tests DMA transfers of various lengths and
|
||||
* offsets until it is told to exit by kthread_stop(). There may be
|
||||
* multiple threads running this function in parallel for a single
|
||||
* channel, and there may be multiple channels being tested in
|
||||
* parallel.
|
||||
*
|
||||
* Before each test, the source and destination buffer is initialized
|
||||
* with a known pattern. This pattern is different depending on
|
||||
* whether it's in an area which is supposed to be copied or
|
||||
* overwritten, and different in the source and destination buffers.
|
||||
* So if the DMA engine doesn't copy exactly what we tell it to copy,
|
||||
* we'll notice.
|
||||
*/
|
||||
static int dmatest_func(void *data)
|
||||
{
|
||||
struct dmatest_thread *thread = data;
|
||||
struct dma_chan *chan;
|
||||
const char *thread_name;
|
||||
unsigned int src_off, dst_off, len;
|
||||
unsigned int error_count;
|
||||
unsigned int failed_tests = 0;
|
||||
unsigned int total_tests = 0;
|
||||
dma_cookie_t cookie;
|
||||
enum dma_status status;
|
||||
int ret;
|
||||
|
||||
thread_name = current->comm;
|
||||
|
||||
ret = -ENOMEM;
|
||||
thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
|
||||
if (!thread->srcbuf)
|
||||
goto err_srcbuf;
|
||||
thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
|
||||
if (!thread->dstbuf)
|
||||
goto err_dstbuf;
|
||||
|
||||
smp_rmb();
|
||||
chan = thread->chan;
|
||||
dma_chan_get(chan);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
total_tests++;
|
||||
|
||||
len = dmatest_random() % test_buf_size + 1;
|
||||
src_off = dmatest_random() % (test_buf_size - len + 1);
|
||||
dst_off = dmatest_random() % (test_buf_size - len + 1);
|
||||
|
||||
dmatest_init_srcbuf(thread->srcbuf, src_off, len);
|
||||
dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
|
||||
|
||||
cookie = dma_async_memcpy_buf_to_buf(chan,
|
||||
thread->dstbuf + dst_off,
|
||||
thread->srcbuf + src_off,
|
||||
len);
|
||||
if (dma_submit_error(cookie)) {
|
||||
pr_warning("%s: #%u: submit error %d with src_off=0x%x "
|
||||
"dst_off=0x%x len=0x%x\n",
|
||||
thread_name, total_tests - 1, cookie,
|
||||
src_off, dst_off, len);
|
||||
msleep(100);
|
||||
failed_tests++;
|
||||
continue;
|
||||
}
|
||||
dma_async_memcpy_issue_pending(chan);
|
||||
|
||||
do {
|
||||
msleep(1);
|
||||
status = dma_async_memcpy_complete(
|
||||
chan, cookie, NULL, NULL);
|
||||
} while (status == DMA_IN_PROGRESS);
|
||||
|
||||
if (status == DMA_ERROR) {
|
||||
pr_warning("%s: #%u: error during copy\n",
|
||||
thread_name, total_tests - 1);
|
||||
failed_tests++;
|
||||
continue;
|
||||
}
|
||||
|
||||
error_count = 0;
|
||||
|
||||
pr_debug("%s: verifying source buffer...\n", thread_name);
|
||||
error_count += dmatest_verify(thread->srcbuf, 0, src_off,
|
||||
0, PATTERN_SRC, true);
|
||||
error_count += dmatest_verify(thread->srcbuf, src_off,
|
||||
src_off + len, src_off,
|
||||
PATTERN_SRC | PATTERN_COPY, true);
|
||||
error_count += dmatest_verify(thread->srcbuf, src_off + len,
|
||||
test_buf_size, src_off + len,
|
||||
PATTERN_SRC, true);
|
||||
|
||||
pr_debug("%s: verifying dest buffer...\n",
|
||||
thread->task->comm);
|
||||
error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
|
||||
0, PATTERN_DST, false);
|
||||
error_count += dmatest_verify(thread->dstbuf, dst_off,
|
||||
dst_off + len, src_off,
|
||||
PATTERN_SRC | PATTERN_COPY, false);
|
||||
error_count += dmatest_verify(thread->dstbuf, dst_off + len,
|
||||
test_buf_size, dst_off + len,
|
||||
PATTERN_DST, false);
|
||||
|
||||
if (error_count) {
|
||||
pr_warning("%s: #%u: %u errors with "
|
||||
"src_off=0x%x dst_off=0x%x len=0x%x\n",
|
||||
thread_name, total_tests - 1, error_count,
|
||||
src_off, dst_off, len);
|
||||
failed_tests++;
|
||||
} else {
|
||||
pr_debug("%s: #%u: No errors with "
|
||||
"src_off=0x%x dst_off=0x%x len=0x%x\n",
|
||||
thread_name, total_tests - 1,
|
||||
src_off, dst_off, len);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
dma_chan_put(chan);
|
||||
kfree(thread->dstbuf);
|
||||
err_dstbuf:
|
||||
kfree(thread->srcbuf);
|
||||
err_srcbuf:
|
||||
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
|
||||
thread_name, total_tests, failed_tests, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
|
||||
{
|
||||
struct dmatest_thread *thread;
|
||||
struct dmatest_thread *_thread;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
|
||||
ret = kthread_stop(thread->task);
|
||||
pr_debug("dmatest: thread %s exited with status %d\n",
|
||||
thread->task->comm, ret);
|
||||
list_del(&thread->node);
|
||||
kfree(thread);
|
||||
}
|
||||
kfree(dtc);
|
||||
}
|
||||
|
||||
static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
|
||||
{
|
||||
struct dmatest_chan *dtc;
|
||||
struct dmatest_thread *thread;
|
||||
unsigned int i;
|
||||
|
||||
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
|
||||
if (!dtc) {
|
||||
pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
|
||||
return DMA_NAK;
|
||||
}
|
||||
|
||||
dtc->chan = chan;
|
||||
INIT_LIST_HEAD(&dtc->threads);
|
||||
|
||||
for (i = 0; i < threads_per_chan; i++) {
|
||||
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
|
||||
if (!thread) {
|
||||
pr_warning("dmatest: No memory for %s-test%u\n",
|
||||
chan->dev.bus_id, i);
|
||||
break;
|
||||
}
|
||||
thread->chan = dtc->chan;
|
||||
smp_wmb();
|
||||
thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
|
||||
chan->dev.bus_id, i);
|
||||
if (IS_ERR(thread->task)) {
|
||||
pr_warning("dmatest: Failed to run thread %s-test%u\n",
|
||||
chan->dev.bus_id, i);
|
||||
kfree(thread);
|
||||
break;
|
||||
}
|
||||
|
||||
/* srcbuf and dstbuf are allocated by the thread itself */
|
||||
|
||||
list_add_tail(&thread->node, &dtc->threads);
|
||||
}
|
||||
|
||||
pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
|
||||
|
||||
list_add_tail(&dtc->node, &dmatest_channels);
|
||||
nr_channels++;
|
||||
|
||||
return DMA_ACK;
|
||||
}
|
||||
|
||||
static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
|
||||
{
|
||||
struct dmatest_chan *dtc, *_dtc;
|
||||
|
||||
list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
|
||||
if (dtc->chan == chan) {
|
||||
list_del(&dtc->node);
|
||||
dmatest_cleanup_channel(dtc);
|
||||
pr_debug("dmatest: lost channel %s\n",
|
||||
chan->dev.bus_id);
|
||||
return DMA_ACK;
|
||||
}
|
||||
}
|
||||
|
||||
return DMA_DUP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start testing threads as new channels are assigned to us, and kill
|
||||
* them when the channels go away.
|
||||
*
|
||||
* When we unregister the client, all channels are removed so this
|
||||
* will also take care of cleaning things up when the module is
|
||||
* unloaded.
|
||||
*/
|
||||
static enum dma_state_client
|
||||
dmatest_event(struct dma_client *client, struct dma_chan *chan,
|
||||
enum dma_state state)
|
||||
{
|
||||
enum dma_state_client ack = DMA_NAK;
|
||||
|
||||
switch (state) {
|
||||
case DMA_RESOURCE_AVAILABLE:
|
||||
if (!dmatest_match_channel(chan)
|
||||
|| !dmatest_match_device(chan->device))
|
||||
ack = DMA_DUP;
|
||||
else if (max_channels && nr_channels >= max_channels)
|
||||
ack = DMA_NAK;
|
||||
else
|
||||
ack = dmatest_add_channel(chan);
|
||||
break;
|
||||
|
||||
case DMA_RESOURCE_REMOVED:
|
||||
ack = dmatest_remove_channel(chan);
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_info("dmatest: Unhandled event %u (%s)\n",
|
||||
state, chan->dev.bus_id);
|
||||
break;
|
||||
}
|
||||
|
||||
return ack;
|
||||
}
|
||||
|
||||
static struct dma_client dmatest_client = {
|
||||
.event_callback = dmatest_event,
|
||||
};
|
||||
|
||||
static int __init dmatest_init(void)
|
||||
{
|
||||
dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
|
||||
dma_async_client_register(&dmatest_client);
|
||||
dma_async_client_chan_request(&dmatest_client);
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(dmatest_init);
|
||||
|
||||
static void __exit dmatest_exit(void)
|
||||
{
|
||||
dma_async_client_unregister(&dmatest_client);
|
||||
}
|
||||
module_exit(dmatest_exit);
|
||||
|
||||
MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,225 @@
|
||||
/*
|
||||
* Driver for the Synopsys DesignWare AHB DMA Controller
|
||||
*
|
||||
* Copyright (C) 2005-2007 Atmel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/dw_dmac.h>
|
||||
|
||||
#define DW_DMA_MAX_NR_CHANNELS 8
|
||||
|
||||
/*
|
||||
* Redefine this macro to handle differences between 32- and 64-bit
|
||||
* addressing, big vs. little endian, etc.
|
||||
*/
|
||||
#define DW_REG(name) u32 name; u32 __pad_##name
|
||||
|
||||
/* Hardware register definitions. */
|
||||
struct dw_dma_chan_regs {
|
||||
DW_REG(SAR); /* Source Address Register */
|
||||
DW_REG(DAR); /* Destination Address Register */
|
||||
DW_REG(LLP); /* Linked List Pointer */
|
||||
u32 CTL_LO; /* Control Register Low */
|
||||
u32 CTL_HI; /* Control Register High */
|
||||
DW_REG(SSTAT);
|
||||
DW_REG(DSTAT);
|
||||
DW_REG(SSTATAR);
|
||||
DW_REG(DSTATAR);
|
||||
u32 CFG_LO; /* Configuration Register Low */
|
||||
u32 CFG_HI; /* Configuration Register High */
|
||||
DW_REG(SGR);
|
||||
DW_REG(DSR);
|
||||
};
|
||||
|
||||
struct dw_dma_irq_regs {
|
||||
DW_REG(XFER);
|
||||
DW_REG(BLOCK);
|
||||
DW_REG(SRC_TRAN);
|
||||
DW_REG(DST_TRAN);
|
||||
DW_REG(ERROR);
|
||||
};
|
||||
|
||||
struct dw_dma_regs {
|
||||
/* per-channel registers */
|
||||
struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
|
||||
|
||||
/* irq handling */
|
||||
struct dw_dma_irq_regs RAW; /* r */
|
||||
struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
|
||||
struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
|
||||
struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
|
||||
|
||||
DW_REG(STATUS_INT); /* r */
|
||||
|
||||
/* software handshaking */
|
||||
DW_REG(REQ_SRC);
|
||||
DW_REG(REQ_DST);
|
||||
DW_REG(SGL_REQ_SRC);
|
||||
DW_REG(SGL_REQ_DST);
|
||||
DW_REG(LAST_SRC);
|
||||
DW_REG(LAST_DST);
|
||||
|
||||
/* miscellaneous */
|
||||
DW_REG(CFG);
|
||||
DW_REG(CH_EN);
|
||||
DW_REG(ID);
|
||||
DW_REG(TEST);
|
||||
|
||||
/* optional encoded params, 0x3c8..0x3 */
|
||||
};
|
||||
|
||||
/* Bitfields in CTL_LO */
|
||||
#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
|
||||
#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
|
||||
#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
|
||||
#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
|
||||
#define DWC_CTLL_DST_DEC (1<<7)
|
||||
#define DWC_CTLL_DST_FIX (2<<7)
|
||||
#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
|
||||
#define DWC_CTLL_SRC_DEC (1<<9)
|
||||
#define DWC_CTLL_SRC_FIX (2<<9)
|
||||
#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
|
||||
#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
|
||||
#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
|
||||
#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
|
||||
#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
|
||||
#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
|
||||
#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
|
||||
#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
|
||||
/* plus 4 transfer types for peripheral-as-flow-controller */
|
||||
#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
|
||||
#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
|
||||
#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
|
||||
#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
|
||||
|
||||
/* Bitfields in CTL_HI */
|
||||
#define DWC_CTLH_DONE 0x00001000
|
||||
#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
|
||||
|
||||
/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
|
||||
#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
|
||||
#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
|
||||
#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
|
||||
#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
|
||||
#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
|
||||
#define DWC_CFGL_RELOAD_SAR (1 << 30)
|
||||
#define DWC_CFGL_RELOAD_DAR (1 << 31)
|
||||
|
||||
/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
|
||||
#define DWC_CFGH_DS_UPD_EN (1 << 5)
|
||||
#define DWC_CFGH_SS_UPD_EN (1 << 6)
|
||||
|
||||
/* Bitfields in SGR */
|
||||
#define DWC_SGR_SGI(x) ((x) << 0)
|
||||
#define DWC_SGR_SGC(x) ((x) << 20)
|
||||
|
||||
/* Bitfields in DSR */
|
||||
#define DWC_DSR_DSI(x) ((x) << 0)
|
||||
#define DWC_DSR_DSC(x) ((x) << 20)
|
||||
|
||||
/* Bitfields in CFG */
|
||||
#define DW_CFG_DMA_EN (1 << 0)
|
||||
|
||||
#define DW_REGLEN 0x400
|
||||
|
||||
struct dw_dma_chan {
|
||||
struct dma_chan chan;
|
||||
void __iomem *ch_regs;
|
||||
u8 mask;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
/* these other elements are all protected by lock */
|
||||
dma_cookie_t completed;
|
||||
struct list_head active_list;
|
||||
struct list_head queue;
|
||||
struct list_head free_list;
|
||||
|
||||
struct dw_dma_slave *dws;
|
||||
|
||||
unsigned int descs_allocated;
|
||||
};
|
||||
|
||||
static inline struct dw_dma_chan_regs __iomem *
|
||||
__dwc_regs(struct dw_dma_chan *dwc)
|
||||
{
|
||||
return dwc->ch_regs;
|
||||
}
|
||||
|
||||
#define channel_readl(dwc, name) \
|
||||
__raw_readl(&(__dwc_regs(dwc)->name))
|
||||
#define channel_writel(dwc, name, val) \
|
||||
__raw_writel((val), &(__dwc_regs(dwc)->name))
|
||||
|
||||
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct dw_dma_chan, chan);
|
||||
}
|
||||
|
||||
|
||||
struct dw_dma {
|
||||
struct dma_device dma;
|
||||
void __iomem *regs;
|
||||
struct tasklet_struct tasklet;
|
||||
struct clk *clk;
|
||||
|
||||
u8 all_chan_mask;
|
||||
|
||||
struct dw_dma_chan chan[0];
|
||||
};
|
||||
|
||||
static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
|
||||
{
|
||||
return dw->regs;
|
||||
}
|
||||
|
||||
#define dma_readl(dw, name) \
|
||||
__raw_readl(&(__dw_regs(dw)->name))
|
||||
#define dma_writel(dw, name, val) \
|
||||
__raw_writel((val), &(__dw_regs(dw)->name))
|
||||
|
||||
#define channel_set_bit(dw, reg, mask) \
|
||||
dma_writel(dw, reg, ((mask) << 8) | (mask))
|
||||
#define channel_clear_bit(dw, reg, mask) \
|
||||
dma_writel(dw, reg, ((mask) << 8) | 0)
|
||||
|
||||
static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
|
||||
{
|
||||
return container_of(ddev, struct dw_dma, dma);
|
||||
}
|
||||
|
||||
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
|
||||
struct dw_lli {
|
||||
/* values that are not changed by hardware */
|
||||
dma_addr_t sar;
|
||||
dma_addr_t dar;
|
||||
dma_addr_t llp; /* chain to next lli */
|
||||
u32 ctllo;
|
||||
/* values that may get written back: */
|
||||
u32 ctlhi;
|
||||
/* sstat and dstat can snapshot peripheral register state.
|
||||
* silicon config may discard either or both...
|
||||
*/
|
||||
u32 sstat;
|
||||
u32 dstat;
|
||||
};
|
||||
|
||||
struct dw_desc {
|
||||
/* FIRST values the hardware uses */
|
||||
struct dw_lli lli;
|
||||
|
||||
/* THEN values for driver housekeeping */
|
||||
struct list_head desc_node;
|
||||
struct dma_async_tx_descriptor txd;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
static inline struct dw_desc *
|
||||
txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
return container_of(txd, struct dw_desc, txd);
|
||||
}
|
||||
+20
-18
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
|
||||
*
|
||||
* Return - The number of descriptors allocated.
|
||||
*/
|
||||
static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
|
||||
struct dma_client *client)
|
||||
{
|
||||
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
|
||||
LIST_HEAD(tmp_list);
|
||||
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
|
||||
if (!src) {
|
||||
dev_err(fsl_chan->dev,
|
||||
"selftest: Cannot alloc memory for test!\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dest = src + test_size;
|
||||
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
|
||||
|
||||
chan = &fsl_chan->common;
|
||||
|
||||
if (fsl_dma_alloc_chan_resources(chan) < 1) {
|
||||
if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
|
||||
dev_err(fsl_chan->dev,
|
||||
"selftest: Cannot alloc resources for DMA\n");
|
||||
err = -ENODEV;
|
||||
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
|
||||
if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
|
||||
dev_err(fsl_chan->dev, "selftest: Time out!\n");
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
/* Test free and re-alloc channel resources */
|
||||
fsl_dma_free_chan_resources(chan);
|
||||
|
||||
if (fsl_dma_alloc_chan_resources(chan) < 1) {
|
||||
if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
|
||||
dev_err(fsl_chan->dev,
|
||||
"selftest: Cannot alloc resources for DMA\n");
|
||||
err = -ENODEV;
|
||||
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
|
||||
if (!new_fsl_chan) {
|
||||
dev_err(&dev->dev, "No free memory for allocating "
|
||||
"dma channels!\n");
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* get dma channel register base */
|
||||
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
|
||||
if (err) {
|
||||
dev_err(&dev->dev, "Can't get %s property 'reg'\n",
|
||||
dev->node->full_name);
|
||||
goto err;
|
||||
goto err_no_reg;
|
||||
}
|
||||
|
||||
new_fsl_chan->feature = *(u32 *)match->data;
|
||||
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
|
||||
dev_err(&dev->dev, "There is no %d channel!\n",
|
||||
new_fsl_chan->id);
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
goto err_no_chan;
|
||||
}
|
||||
fdev->chan[new_fsl_chan->id] = new_fsl_chan;
|
||||
tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
|
||||
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
|
||||
if (err) {
|
||||
dev_err(&dev->dev, "DMA channel %s request_irq error "
|
||||
"with return %d\n", dev->node->full_name, err);
|
||||
goto err;
|
||||
goto err_no_irq;
|
||||
}
|
||||
}
|
||||
|
||||
err = fsl_dma_self_test(new_fsl_chan);
|
||||
if (err)
|
||||
goto err;
|
||||
goto err_self_test;
|
||||
|
||||
dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
|
||||
match->compatible, new_fsl_chan->irq);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
dma_halt(new_fsl_chan);
|
||||
iounmap(new_fsl_chan->reg_base);
|
||||
|
||||
err_self_test:
|
||||
free_irq(new_fsl_chan->irq, new_fsl_chan);
|
||||
err_no_irq:
|
||||
list_del(&new_fsl_chan->common.device_node);
|
||||
err_no_chan:
|
||||
iounmap(new_fsl_chan->reg_base);
|
||||
err_no_reg:
|
||||
kfree(new_fsl_chan);
|
||||
return err;
|
||||
}
|
||||
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
|
||||
fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
|
||||
if (!fdev) {
|
||||
dev_err(&dev->dev, "No enough memory for 'priv'\n");
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
return -ENOMEM;
|
||||
}
|
||||
fdev->dev = &dev->dev;
|
||||
INIT_LIST_HEAD(&fdev->common.channels);
|
||||
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
|
||||
if (err) {
|
||||
dev_err(&dev->dev, "Can't get %s property 'reg'\n",
|
||||
dev->node->full_name);
|
||||
goto err;
|
||||
goto err_no_reg;
|
||||
}
|
||||
|
||||
dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
|
||||
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
|
||||
|
||||
err:
|
||||
iounmap(fdev->reg_base);
|
||||
err_no_reg:
|
||||
kfree(fdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
|
||||
|
||||
/* I/OAT v2 platforms */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
|
||||
|
||||
/* I/OAT v3 platforms */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
|
||||
if (device->dma && ioat_dca_enabled)
|
||||
device->dca = ioat2_dca_init(pdev, iobase);
|
||||
break;
|
||||
case IOAT_VER_3_0:
|
||||
device->dma = ioat_dma_probe(pdev, iobase);
|
||||
if (device->dma && ioat_dca_enabled)
|
||||
device->dca = ioat3_dca_init(pdev, iobase);
|
||||
break;
|
||||
default:
|
||||
err = -ENODEV;
|
||||
break;
|
||||
|
||||
+237
-7
@@ -37,12 +37,18 @@
|
||||
#include "ioatdma_registers.h"
|
||||
|
||||
/*
|
||||
* Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15
|
||||
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
|
||||
* contain the bit number of the APIC ID to map into the DCA tag. If the valid
|
||||
* bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
|
||||
*/
|
||||
#define DCA_TAG_MAP_VALID 0x80
|
||||
|
||||
#define DCA3_TAG_MAP_BIT_TO_INV 0x80
|
||||
#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
|
||||
#define DCA3_TAG_MAP_LITERAL_VAL 0x1
|
||||
|
||||
#define DCA_TAG_MAP_MASK 0xDF
|
||||
|
||||
/*
|
||||
* "Legacy" DCA systems do not implement the DCA register set in the
|
||||
* I/OAT device. Software needs direct support for their tag mappings.
|
||||
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
|
||||
};
|
||||
|
||||
#define IOAT_DCA_MAX_REQ 6
|
||||
#define IOAT3_DCA_MAX_REQ 2
|
||||
|
||||
struct ioat_dca_priv {
|
||||
void __iomem *iobase;
|
||||
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
|
||||
static u8 ioat_dca_get_tag(struct dca_provider *dca,
|
||||
struct device *dev,
|
||||
int cpu)
|
||||
{
|
||||
struct ioat_dca_priv *ioatdca = dca_priv(dca);
|
||||
int i, apic_id, bit, value;
|
||||
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
|
||||
return tag;
|
||||
}
|
||||
|
||||
static int ioat_dca_dev_managed(struct dca_provider *dca,
|
||||
struct device *dev)
|
||||
{
|
||||
struct ioat_dca_priv *ioatdca = dca_priv(dca);
|
||||
struct pci_dev *pdev;
|
||||
int i;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
for (i = 0; i < ioatdca->max_requesters; i++) {
|
||||
if (ioatdca->req_slots[i].pdev == pdev)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dca_ops ioat_dca_ops = {
|
||||
.add_requester = ioat_dca_add_requester,
|
||||
.remove_requester = ioat_dca_remove_requester,
|
||||
.get_tag = ioat_dca_get_tag,
|
||||
.dev_managed = ioat_dca_dev_managed,
|
||||
};
|
||||
|
||||
|
||||
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
|
||||
u8 *tag_map = NULL;
|
||||
int i;
|
||||
int err;
|
||||
u8 version;
|
||||
u8 max_requesters;
|
||||
|
||||
if (!system_has_dca_enabled(pdev))
|
||||
return NULL;
|
||||
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
|
||||
if (tag_map == NULL)
|
||||
return NULL;
|
||||
|
||||
version = readb(iobase + IOAT_VER_OFFSET);
|
||||
if (version == IOAT_VER_3_0)
|
||||
max_requesters = IOAT3_DCA_MAX_REQ;
|
||||
else
|
||||
max_requesters = IOAT_DCA_MAX_REQ;
|
||||
|
||||
dca = alloc_dca_provider(&ioat_dca_ops,
|
||||
sizeof(*ioatdca) +
|
||||
(sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ));
|
||||
(sizeof(struct ioat_dca_slot) * max_requesters));
|
||||
if (!dca)
|
||||
return NULL;
|
||||
|
||||
ioatdca = dca_priv(dca);
|
||||
ioatdca->max_requesters = IOAT_DCA_MAX_REQ;
|
||||
|
||||
ioatdca->max_requesters = max_requesters;
|
||||
ioatdca->dca_base = iobase + 0x54;
|
||||
|
||||
/* copy over the APIC ID to DCA tag mapping */
|
||||
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu)
|
||||
static u8 ioat2_dca_get_tag(struct dca_provider *dca,
|
||||
struct device *dev,
|
||||
int cpu)
|
||||
{
|
||||
u8 tag;
|
||||
|
||||
tag = ioat_dca_get_tag(dca, cpu);
|
||||
tag = ioat_dca_get_tag(dca, dev, cpu);
|
||||
tag = (~tag) & 0x1F;
|
||||
return tag;
|
||||
}
|
||||
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
|
||||
.add_requester = ioat2_dca_add_requester,
|
||||
.remove_requester = ioat2_dca_remove_requester,
|
||||
.get_tag = ioat2_dca_get_tag,
|
||||
.dev_managed = ioat_dca_dev_managed,
|
||||
};
|
||||
|
||||
static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
|
||||
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
|
||||
|
||||
return dca;
|
||||
}
|
||||
|
||||
static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
|
||||
{
|
||||
struct ioat_dca_priv *ioatdca = dca_priv(dca);
|
||||
struct pci_dev *pdev;
|
||||
int i;
|
||||
u16 id;
|
||||
u16 global_req_table;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
id = dcaid_from_pcidev(pdev);
|
||||
|
||||
if (ioatdca->requester_count == ioatdca->max_requesters)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < ioatdca->max_requesters; i++) {
|
||||
if (ioatdca->req_slots[i].pdev == NULL) {
|
||||
/* found an empty slot */
|
||||
ioatdca->requester_count++;
|
||||
ioatdca->req_slots[i].pdev = pdev;
|
||||
ioatdca->req_slots[i].rid = id;
|
||||
global_req_table =
|
||||
readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
|
||||
writel(id | IOAT_DCA_GREQID_VALID,
|
||||
ioatdca->iobase + global_req_table + (i * 4));
|
||||
return i;
|
||||
}
|
||||
}
|
||||
/* Error, ioatdma->requester_count is out of whack */
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int ioat3_dca_remove_requester(struct dca_provider *dca,
|
||||
struct device *dev)
|
||||
{
|
||||
struct ioat_dca_priv *ioatdca = dca_priv(dca);
|
||||
struct pci_dev *pdev;
|
||||
int i;
|
||||
u16 global_req_table;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
|
||||
for (i = 0; i < ioatdca->max_requesters; i++) {
|
||||
if (ioatdca->req_slots[i].pdev == pdev) {
|
||||
global_req_table =
|
||||
readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
|
||||
writel(0, ioatdca->iobase + global_req_table + (i * 4));
|
||||
ioatdca->req_slots[i].pdev = NULL;
|
||||
ioatdca->req_slots[i].rid = 0;
|
||||
ioatdca->requester_count--;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static u8 ioat3_dca_get_tag(struct dca_provider *dca,
|
||||
struct device *dev,
|
||||
int cpu)
|
||||
{
|
||||
u8 tag;
|
||||
|
||||
struct ioat_dca_priv *ioatdca = dca_priv(dca);
|
||||
int i, apic_id, bit, value;
|
||||
u8 entry;
|
||||
|
||||
tag = 0;
|
||||
apic_id = cpu_physical_id(cpu);
|
||||
|
||||
for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
|
||||
entry = ioatdca->tag_map[i];
|
||||
if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
|
||||
bit = entry &
|
||||
~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
|
||||
value = (apic_id & (1 << bit)) ? 1 : 0;
|
||||
} else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
|
||||
bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
|
||||
value = (apic_id & (1 << bit)) ? 0 : 1;
|
||||
} else {
|
||||
value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
|
||||
}
|
||||
tag |= (value << i);
|
||||
}
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
static struct dca_ops ioat3_dca_ops = {
|
||||
.add_requester = ioat3_dca_add_requester,
|
||||
.remove_requester = ioat3_dca_remove_requester,
|
||||
.get_tag = ioat3_dca_get_tag,
|
||||
.dev_managed = ioat_dca_dev_managed,
|
||||
};
|
||||
|
||||
static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
|
||||
{
|
||||
int slots = 0;
|
||||
u32 req;
|
||||
u16 global_req_table;
|
||||
|
||||
global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
|
||||
if (global_req_table == 0)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
req = readl(iobase + global_req_table + (slots * sizeof(u32)));
|
||||
slots++;
|
||||
} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
|
||||
|
||||
return slots;
|
||||
}
|
||||
|
||||
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
|
||||
{
|
||||
struct dca_provider *dca;
|
||||
struct ioat_dca_priv *ioatdca;
|
||||
int slots;
|
||||
int i;
|
||||
int err;
|
||||
u16 dca_offset;
|
||||
u16 csi_fsb_control;
|
||||
u16 pcie_control;
|
||||
u8 bit;
|
||||
|
||||
union {
|
||||
u64 full;
|
||||
struct {
|
||||
u32 low;
|
||||
u32 high;
|
||||
};
|
||||
} tag_map;
|
||||
|
||||
if (!system_has_dca_enabled(pdev))
|
||||
return NULL;
|
||||
|
||||
dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
|
||||
if (dca_offset == 0)
|
||||
return NULL;
|
||||
|
||||
slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
|
||||
if (slots == 0)
|
||||
return NULL;
|
||||
|
||||
dca = alloc_dca_provider(&ioat3_dca_ops,
|
||||
sizeof(*ioatdca)
|
||||
+ (sizeof(struct ioat_dca_slot) * slots));
|
||||
if (!dca)
|
||||
return NULL;
|
||||
|
||||
ioatdca = dca_priv(dca);
|
||||
ioatdca->iobase = iobase;
|
||||
ioatdca->dca_base = iobase + dca_offset;
|
||||
ioatdca->max_requesters = slots;
|
||||
|
||||
/* some bios might not know to turn these on */
|
||||
csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
|
||||
if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
|
||||
csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
|
||||
writew(csi_fsb_control,
|
||||
ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
|
||||
}
|
||||
pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
|
||||
if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
|
||||
pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
|
||||
writew(pcie_control,
|
||||
ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
|
||||
}
|
||||
|
||||
|
||||
/* TODO version, compatibility and configuration checks */
|
||||
|
||||
/* copy out the APIC to DCA tag map */
|
||||
tag_map.low =
|
||||
readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
|
||||
tag_map.high =
|
||||
readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
|
||||
for (i = 0; i < 8; i++) {
|
||||
bit = tag_map.full >> (8 * i);
|
||||
ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
|
||||
}
|
||||
|
||||
err = register_dca_provider(dca, &pdev->dev);
|
||||
if (err) {
|
||||
free_dca_provider(dca);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dca;
|
||||
}
|
||||
|
||||
+368
-36
File diff suppressed because it is too large
Load Diff
+27
-1
@@ -27,8 +27,9 @@
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#define IOAT_DMA_VERSION "2.04"
|
||||
#define IOAT_DMA_VERSION "3.30"
|
||||
|
||||
enum ioat_interrupt {
|
||||
none = 0,
|
||||
@@ -40,6 +41,7 @@ enum ioat_interrupt {
|
||||
|
||||
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
|
||||
#define IOAT_DMA_DCA_ANY_CPU ~0
|
||||
#define IOAT_WATCHDOG_PERIOD (2 * HZ)
|
||||
|
||||
|
||||
/**
|
||||
@@ -62,6 +64,7 @@ struct ioatdma_device {
|
||||
struct dma_device common;
|
||||
u8 version;
|
||||
enum ioat_interrupt irq_mode;
|
||||
struct delayed_work work;
|
||||
struct msix_entry msix_entries[4];
|
||||
struct ioat_dma_chan *idx[4];
|
||||
};
|
||||
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
|
||||
|
||||
dma_cookie_t completed_cookie;
|
||||
unsigned long last_completion;
|
||||
unsigned long last_completion_time;
|
||||
|
||||
size_t xfercap; /* XFERCAP register value expanded out */
|
||||
|
||||
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
|
||||
spinlock_t desc_lock;
|
||||
struct list_head free_desc;
|
||||
struct list_head used_desc;
|
||||
unsigned long watchdog_completion;
|
||||
int watchdog_tcp_cookie;
|
||||
u32 watchdog_last_tcp_cookie;
|
||||
struct delayed_work work;
|
||||
|
||||
int pending;
|
||||
int dmacount;
|
||||
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
|
||||
u32 high;
|
||||
};
|
||||
} *completion_virt;
|
||||
unsigned long last_compl_desc_addr_hw;
|
||||
struct tasklet_struct cleanup_task;
|
||||
};
|
||||
|
||||
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
};
|
||||
|
||||
static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_NET_DMA
|
||||
switch (dev->version) {
|
||||
case IOAT_VER_1_2:
|
||||
case IOAT_VER_3_0:
|
||||
sysctl_tcp_dma_copybreak = 4096;
|
||||
break;
|
||||
case IOAT_VER_2_0:
|
||||
sysctl_tcp_dma_copybreak = 2048;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
|
||||
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
|
||||
void __iomem *iobase);
|
||||
void ioat_dma_remove(struct ioatdma_device *device);
|
||||
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
|
||||
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
|
||||
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
|
||||
#else
|
||||
#define ioat_dma_probe(pdev, iobase) NULL
|
||||
#define ioat_dma_remove(device) do { } while (0)
|
||||
#define ioat_dca_init(pdev, iobase) NULL
|
||||
#define ioat2_dca_init(pdev, iobase) NULL
|
||||
#define ioat3_dca_init(pdev, iobase) NULL
|
||||
#endif
|
||||
|
||||
#endif /* IOATDMA_H */
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#define IOAT_PCI_SID 0x8086
|
||||
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
|
||||
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
|
||||
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
|
||||
|
||||
struct ioat_dma_descriptor {
|
||||
uint32_t size;
|
||||
|
||||
@@ -25,6 +25,10 @@
|
||||
#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001
|
||||
#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002
|
||||
|
||||
#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
|
||||
#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
|
||||
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
|
||||
|
||||
/* MMIO Device Registers */
|
||||
#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
|
||||
|
||||
@@ -149,7 +153,23 @@
|
||||
#define IOAT_DCA_GREQID_VALID 0x20000000
|
||||
#define IOAT_DCA_GREQID_LASTID 0x80000000
|
||||
|
||||
#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
|
||||
#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1
|
||||
|
||||
#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
|
||||
#define IOAT3_PCI_CAPABILITY_MEMWR 0x1
|
||||
|
||||
#define IOAT3_CSI_CONTROL_OFFSET 0x0C
|
||||
#define IOAT3_CSI_CONTROL_PREFETCH 0x1
|
||||
|
||||
#define IOAT3_PCI_CONTROL_OFFSET 0x0E
|
||||
#define IOAT3_PCI_CONTROL_MEMWR 0x1
|
||||
|
||||
#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
|
||||
#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10
|
||||
#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
|
||||
|
||||
#define IOAT3_DCA_GREQID_OFFSET 0x02
|
||||
|
||||
#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
|
||||
#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user