Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (63 commits)
  dmaengine: mid_dma: mask_peripheral_interrupt only when dmac is idle
  dmaengine/ep93xx_dma: add module.h include
  pch_dma: Reduce wasting memory
  pch_dma: Fix suspend issue
  dma/timberdale: free_irq() on an error path
  dma: shdma: transfer based runtime PM
  dmaengine: shdma: protect against the IRQ handler
  dmaengine i.MX DMA/SDMA: add missing include of linux/module.h
  dmaengine: delete redundant chan_id and chancnt initialization in dma drivers
  dmaengine/amba-pl08x: Check txd->llis_va before freeing dma_pool
  dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers
  serial: sh-sci: don't filter on DMA device, use only channel ID
  ARM: SAMSUNG: Remove Samsung specific enum type for dma direction
  ASoC: Samsung: Update DMA interface
  spi/s3c64xx: Merge dma control code
  spi/s3c64xx: Add support DMA engine API
  ARM: SAMSUNG: Remove S3C-PL330-DMA driver
  ARM: S5P64X0: Use generic DMA PL330 driver
  ARM: S5PC100: Use generic DMA PL330 driver
  ARM: S5PV210: Use generic DMA PL330 driver
  ...

Fix up fairly trivial conflicts in
 - arch/arm/mach-exynos4/{Kconfig,clock.c}
 - arch/arm/mach-s5p64x0/dma.c
This commit is contained in:
Linus Torvalds
2011-11-04 18:02:25 -07:00
60 changed files with 2323 additions and 2466 deletions
+2 -1
View File
@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
config PL330_DMA
tristate "DMA API Driver for PL330"
select DMA_ENGINE
depends on PL330
depends on ARM_AMBA
select PL330
help
Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via
+334 -342
View File
File diff suppressed because it is too large Load Diff
+127 -37
View File
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0;
LIST_HEAD(tmp_list);
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc);
}
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i);
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) {
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++;
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
} else {
dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{
if (desc) {
struct at_desc *child;
unsigned long flags;
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list);
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
}
}
@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
unsigned long flags;
spin_lock(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
spin_unlock(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie;
unsigned long flags;
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) {
@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue);
}
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
return cookie;
}
@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) {
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!test_bit(ATC_IS_PAUSED, &atchan->status))
if (!atc_chan_is_paused(atchan))
return 0;
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc;
/*
@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
} else {
return -ENXIO;
}
@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
unsigned long flags;
enum dma_status ret;
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used,
@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
if (test_bit(ATC_IS_PAUSED, &atchan->status))
if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */
if (test_bit(ATC_IS_CYCLIC, &atchan->status))
if (atc_chan_is_cyclic(atchan))
return;
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
}
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
unsigned long flags;
int i;
u32 cfg;
LIST_HEAD(tmp_list);
@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
spin_lock_bh(&atchan->lock);
spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1;
spin_unlock_bh(&atchan->lock);
spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
@@ -1260,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
for (i = 0; i < pdata->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
atchan->chan_common.cookie = atchan->completed_cookie = 1;
atchan->chan_common.chan_id = i;
list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels);
@@ -1293,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
/* controller can do slave DMA: can trigger cyclic transfers */
dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE);
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
atdma->dma_common.chancnt);
pdata->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1377,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk);
}
static int at_dma_prepare(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* wait for transaction completion (except in cyclic case) */
if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
return -EAGAIN;
}
return 0;
}
static void atc_suspend_cyclic(struct at_dma_chan *atchan)
{
struct dma_chan *chan = &atchan->chan_common;
/* Channel should be paused by user
* do it anyway even if it is not done already */
if (!atc_chan_is_paused(atchan)) {
dev_warn(chan2dev(chan),
"cyclic channel not paused, should be done by channel user\n");
atc_control(chan, DMA_PAUSE, 0);
}
/* now preserve additional data for cyclic operations */
/* next descriptor address in the cyclic list */
atchan->save_dscr = channel_readl(atchan, DSCR);
vdbg_dump_regs(atchan);
}
static int at_dma_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
at_dma_off(platform_get_drvdata(pdev));
/* preserve data */
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
if (atc_chan_is_cyclic(atchan))
atc_suspend_cyclic(atchan);
atchan->save_cfg = channel_readl(atchan, CFG);
}
atdma->save_imr = dma_readl(atdma, EBCIMR);
/* disable DMA controller */
at_dma_off(atdma);
clk_disable(atdma->clk);
return 0;
}
static void atc_resume_cyclic(struct at_dma_chan *atchan)
{
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
/* restore channel status for cyclic descriptors list:
* next descriptor in the cyclic list at the time of suspend */
channel_writel(atchan, SADDR, 0);
channel_writel(atchan, DADDR, 0);
channel_writel(atchan, CTRLA, 0);
channel_writel(atchan, CTRLB, 0);
channel_writel(atchan, DSCR, atchan->save_dscr);
dma_writel(atdma, CHER, atchan->mask);
/* channel pause status should be removed by channel user
* We cannot take the initiative to do it here */
vdbg_dump_regs(atchan);
}
static int at_dma_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
/* clear any pending interrupt */
while (dma_readl(atdma, EBCISR))
cpu_relax();
/* restore saved data */
dma_writel(atdma, EBCIER, atdma->save_imr);
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
channel_writel(atchan, CFG, atchan->save_cfg);
if (atc_chan_is_cyclic(atchan))
atc_resume_cyclic(atchan);
}
return 0;
}
static const struct dev_pm_ops at_dma_dev_pm_ops = {
.prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
+24
View File
@@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask;
unsigned long status;
struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
spinlock_t lock;
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common;
void __iomem *regs;
struct clk *clk;
u32 save_imr;
u8 all_chan_mask;
@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask);
}
/**
* atc_chan_is_paused - test channel pause/resume status
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_PAUSED, &atchan->status);
}
/**
* atc_chan_is_cyclic - test if given channel has cyclic property set
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_CYCLIC, &atchan->status);
}
/**
* set_desc_eol - set end-of-link to descriptor so it will end transfer
+21 -2
View File
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/freezer.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -251,6 +252,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
set_freezable_with_signal();
ret = -ENOMEM;
@@ -305,7 +307,8 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
unsigned long tmo = msecs_to_jiffies(timeout);
unsigned long start, tmo, end = 0 /* compiler... */;
bool reload = true;
u8 align = 0;
total_tests++;
@@ -404,7 +407,17 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
tmo = wait_for_completion_timeout(&cmp, tmo);
do {
start = jiffies;
if (reload)
end = start + msecs_to_jiffies(timeout);
else if (end <= start)
end = start + 1;
tmo = wait_for_completion_interruptible_timeout(&cmp,
end - start);
reload = try_to_freeze();
} while (tmo == -ERESTARTSYS);
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (tmo == 0) {
@@ -477,6 +490,8 @@ err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_del(&thread->node);
kfree(thread);
}
/* terminate all transfers on specified channels */
dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
kfree(dtc);
}
+2 -3
View File
@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1;
dwc->chan.chan_id = i;
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node,
&dw->dma.channels);
@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
dev_name(&pdev->dev), dw->dma.chancnt);
dev_name(&pdev->dev), pdata->nr_channels);
dma_async_device_register(&dw->dma);
+1
View File
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+1
View File
@@ -14,6 +14,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+39 -9
View File
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -318,6 +319,7 @@ struct sdma_engine {
dma_addr_t context_phys;
struct dma_device dma_device;
struct clk *clk;
struct mutex channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
@@ -415,11 +417,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_addr_t buf_phys;
int ret;
mutex_lock(&sdma->channel_0_lock);
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
if (!buf_virt)
return -ENOMEM;
if (!buf_virt) {
ret = -ENOMEM;
goto err_out;
}
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -433,6 +439,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_free_coherent(NULL, size, buf_virt, buf_phys);
err_out:
mutex_unlock(&sdma->channel_0_lock);
return ret;
}
@@ -656,6 +665,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
mutex_lock(&sdma->channel_0_lock);
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
@@ -676,6 +687,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
ret = sdma_run_channel(&sdma->channel[0]);
mutex_unlock(&sdma->channel_0_lock);
return ret;
}
@@ -1131,18 +1144,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
}
static int __init sdma_get_firmware(struct sdma_engine *sdma,
const char *fw_name)
static void sdma_load_firmware(const struct firmware *fw, void *context)
{
const struct firmware *fw;
struct sdma_engine *sdma = context;
const struct sdma_firmware_header *header;
int ret;
const struct sdma_script_start_addrs *addr;
unsigned short *ram_code;
ret = request_firmware(&fw, fw_name, sdma->dev);
if (ret)
return ret;
if (!fw) {
dev_err(sdma->dev, "firmware not found\n");
return;
}
if (fw->size < sizeof(*header))
goto err_firmware;
@@ -1172,6 +1184,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
err_firmware:
release_firmware(fw);
}
static int __init sdma_get_firmware(struct sdma_engine *sdma,
const char *fw_name)
{
int ret;
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, fw_name, sdma->dev,
GFP_KERNEL, sdma, sdma_load_firmware);
return ret;
}
@@ -1269,11 +1291,14 @@ static int __init sdma_probe(struct platform_device *pdev)
struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
return -ENOMEM;
mutex_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1310,6 +1335,11 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_alloc;
}
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
if (of_id)
pdev->id_entry = of_id->data;
sdma->devtype = pdev->id_entry->driver_data;
+3 -6
View File
@@ -115,16 +115,15 @@ DMAC1 interrupt Functions*/
/**
* dmac1_mask_periphral_intr - mask the periphral interrupt
* @midc: dma channel for which masking is required
* @mid: dma device for which masking is required
*
* Masks the DMA periphral interrupt
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
static void dmac1_mask_periphral_intr(struct middma_device *mid)
{
u32 pimr;
struct middma_device *mid = to_middma_device(midc->chan.device);
if (mid->pimr_mask) {
pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@ -184,7 +183,6 @@ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
/*Check LPE PISR, make sure fwd is disabled*/
dmac1_mask_periphral_intr(midc);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@ -1114,7 +1112,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
midch->chan.device = &dma->common;
midch->chan.cookie = 1;
midch->chan.chan_id = i;
midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
@@ -1150,7 +1147,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
dma->common.dev = &pdev->dev;
dma->common.chancnt = dma->max_chan;
dma->common.device_alloc_chan_resources =
intel_mid_dma_alloc_chan_resources;
@@ -1350,6 +1346,7 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
if (device->ch[i].in_use)
return -EAGAIN;
}
dmac1_mask_periphral_intr(device);
device->state = SUSPENDED;
pci_save_state(pci);
pci_disable_device(pci);
-1
View File
@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
mchan = &mdma->channels[i];
mchan->chan.device = dma;
mchan->chan.chan_id = i;
mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie;
+24 -21
View File
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
/* enable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
}
}
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* clkgate needs to be enabled before writing other registers */
mxs_dma_clkgate(mxs_chan, 1);
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
/* enable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
}
/* write 1 to SEMA to kick off the channel */
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* disable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
}
mxs_dma_clkgate(mxs_chan, 0);
mxs_chan->status = DMA_SUCCESS;
}
@@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto err_clk;
/* clkgate needs to be enabled for reset to finish */
mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
+2 -5
View File
@@ -60,7 +60,7 @@
#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
#define MAX_CHAN_NR 8
#define MAX_CHAN_NR 12
#define DMA_MASK_CTL0_MODE 0x33333333
#define DMA_MASK_CTL2_MODE 0x00003333
@@ -872,8 +872,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
int i;
nr_channels = id->driver_data;
pd = kzalloc(sizeof(struct pch_dma)+
sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
@@ -926,7 +925,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
}
pd->dma.dev = &pdev->dev;
pd->dma.chancnt = nr_channels;
INIT_LIST_HEAD(&pd->dma.channels);
@@ -935,7 +933,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->chan.device = &pd->dma;
pd_chan->chan.cookie = 1;
pd_chan->chan.chan_id = i;
pd_chan->membase = &regs->desc[i];
+209 -26
View File
@@ -17,6 +17,8 @@
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#define NR_DEFAULT_DESC 16
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
* NULL if the channel is available to be acquired.
*/
void *pl330_chid;
/* For D-to-M and M-to-D channels */
int burst_sz; /* the peripheral fifo width */
int burst_len; /* the number of burst */
dma_addr_t fifo_addr;
/* for cyclic capability */
bool cyclic;
};
struct dma_pl330_dmac {
@@ -83,6 +93,8 @@ struct dma_pl330_dmac {
/* Peripheral channels connected to this DMAC */
struct dma_pl330_chan *peripherals; /* keep at end */
struct clk *clk;
};
struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
}
static inline void handle_cyclic_desc_list(struct list_head *list)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch;
unsigned long flags;
if (list_empty(list))
return;
list_for_each_entry(desc, list, node) {
dma_async_tx_callback callback;
/* Change status to reload it */
desc->status = PREP;
pch = desc->pchan;
callback = desc->txd.callback;
if (callback)
callback(desc->txd.callback_param);
}
spin_lock_irqsave(&pch->lock, flags);
list_splice_tail_init(list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
}
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
spin_unlock_irqrestore(&pch->lock, flags);
free_desc_list(&list);
if (pch->cyclic)
handle_cyclic_desc_list(&list);
else
free_desc_list(&list);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&pch->lock, flags);
pch->completed = chan->cookie = 1;
pch->cyclic = false;
pch->pl330_chid = pl330_request_channel(&pdmac->pif);
if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct dma_pl330_desc *desc;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
switch (cmd) {
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&pch->lock, flags);
/* FLUSH the PL330 Channel thread */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
desc->status = DONE;
pch->completed = desc->txd.cookie;
list_move_tail(&desc->node, &list);
}
list_splice_tail_init(&list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
if (slave_config->direction == DMA_TO_DEVICE) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
} else if (slave_config->direction == DMA_FROM_DEVICE) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
pch->burst_sz = __ffs(slave_config->src_addr_width);
if (slave_config->src_maxburst)
pch->burst_len = slave_config->src_maxburst;
}
break;
default:
dev_err(pch->dmac->pif.dev, "Not supported command.\n");
return -ENXIO;
spin_lock_irqsave(&pch->lock, flags);
/* FLUSH the PL330 Channel thread */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
list_for_each_entry(desc, &pch->work_list, node)
desc->status = DONE;
spin_unlock_irqrestore(&pch->lock, flags);
pl330_tasklet((unsigned long) pch);
}
return 0;
}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pl330_release_channel(pch->pl330_chid);
pch->pl330_chid = NULL;
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
}
@@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
if (peri) {
desc->req.rqtype = peri->rqtype;
desc->req.peri = peri->peri_id;
desc->req.peri = pch->chan.chan_id;
} else {
desc->req.rqtype = MEMTOMEM;
desc->req.peri = 0;
@@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
return burst_len;
}
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
size_t period_len, enum dma_data_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
dma_addr_t dst;
dma_addr_t src;
desc = pl330_get_desc(pch);
if (!desc) {
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
return NULL;
}
switch (direction) {
case DMA_TO_DEVICE:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_FROM_DEVICE:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
src = pch->fifo_addr;
dst = dma_addr;
break;
default:
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
pch->cyclic = true;
fill_px(&desc->px, dst, src, period_len);
return &desc->txd;
}
static struct dma_async_tx_descriptor *
pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
dma_addr_t src, size_t len, unsigned long flags)
@@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
int i, burst_size;
int i;
dma_addr_t addr;
if (unlikely(!pch || !sgl || !sg_len || !peri))
@@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
addr = peri->fifo_addr;
burst_size = peri->burst_sz;
addr = pch->fifo_addr;
first = NULL;
@@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
desc->rqcfg.brst_size = burst_size;
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
}
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err1;
}
pdmac->clk = clk_get(&adev->dev, "dma");
if (IS_ERR(pdmac->clk)) {
dev_err(&adev->dev, "Cannot get operation clock.\n");
ret = -EINVAL;
goto probe_err1;
}
amba_set_drvdata(adev, pdmac);
#ifdef CONFIG_PM_RUNTIME
/* to use the runtime PM helper functions */
pm_runtime_enable(&adev->dev);
/* enable the power domain */
if (pm_runtime_get_sync(&adev->dev)) {
dev_err(&adev->dev, "failed to get runtime pm\n");
ret = -ENODEV;
goto probe_err1;
}
#else
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
@@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
case MEMTODEV:
case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask);
break;
default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@@ -747,11 +887,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
pch->chan.chan_id = i;
pch->dmac = pdmac;
/* Add the channel to the DMAC list */
pd->chancnt++;
list_add_tail(&pch->chan.device_node, &pd->channels);
}
@@ -760,6 +898,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
pd->device_tx_status = pl330_tx_status;
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
@@ -771,8 +910,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err4;
}
amba_set_drvdata(adev, pdmac);
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
dev_info(&adev->dev,
@@ -833,6 +970,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
#ifdef CONFIG_PM_RUNTIME
pm_runtime_put(&adev->dev);
pm_runtime_disable(&adev->dev);
#else
clk_disable(pdmac->clk);
#endif
kfree(pdmac);
return 0;
@@ -846,10 +990,49 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
if (!pdmac) {
dev_err(dev, "failed to get dmac\n");
return -ENODEV;
}
clk_disable(pdmac->clk);
return 0;
}
static int pl330_runtime_resume(struct device *dev)
{
struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
if (!pdmac) {
dev_err(dev, "failed to get dmac\n");
return -ENODEV;
}
clk_enable(pdmac->clk);
return 0;
}
#else
#define pl330_runtime_suspend NULL
#define pl330_runtime_resume NULL
#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops pl330_pm_ops = {
.runtime_suspend = pl330_runtime_suspend,
.runtime_resume = pl330_runtime_resume,
};
static struct amba_driver pl330_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "dma-pl330",
.pm = &pl330_pm_ops,
},
.id_table = pl330_ids,
.probe = pl330_probe,
+86 -43
View File
@@ -259,14 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
struct sh_dmae_slave *param = tx->chan->private;
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
bool power_up;
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irq(&sh_chan->desc_lock);
if (list_empty(&sh_chan->ld_queue))
power_up = true;
else
power_up = false;
cookie = sh_chan->common.cookie;
cookie++;
@@ -302,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
tx->cookie, &last->async_tx, sh_chan->id,
desc->hw.sar, desc->hw.tcr, desc->hw.dar);
spin_unlock_bh(&sh_chan->desc_lock);
if (power_up) {
sh_chan->pm_state = DMAE_PM_BUSY;
pm_runtime_get(sh_chan->dev);
spin_unlock_irq(&sh_chan->desc_lock);
pm_runtime_barrier(sh_chan->dev);
spin_lock_irq(&sh_chan->desc_lock);
/* Have we been reset, while waiting? */
if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
dev_dbg(sh_chan->dev, "Bring up channel %d\n",
sh_chan->id);
if (param) {
const struct sh_dmae_slave_config *cfg =
param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
} else {
dmae_init(sh_chan);
}
if (sh_chan->pm_state == DMAE_PM_PENDING)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
}
spin_unlock_irq(&sh_chan->desc_lock);
return cookie;
}
@@ -346,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
struct sh_dmae_slave *param = chan->private;
int ret;
pm_runtime_get_sync(sh_chan->dev);
/*
* This relies on the guarantee from dmaengine that alloc_chan_resources
* never runs concurrently with itself or free_chan_resources.
@@ -367,31 +405,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
}
param->config = cfg;
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
} else {
dmae_init(sh_chan);
}
spin_lock_bh(&sh_chan->desc_lock);
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
spin_unlock_bh(&sh_chan->desc_lock);
desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
if (!desc) {
spin_lock_bh(&sh_chan->desc_lock);
if (!desc)
break;
}
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
desc->mark = DESC_IDLE;
spin_lock_bh(&sh_chan->desc_lock);
list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
spin_unlock_bh(&sh_chan->desc_lock);
if (!sh_chan->descs_allocated) {
ret = -ENOMEM;
@@ -405,7 +432,7 @@ edescalloc:
clear_bit(param->slave_id, sh_dmae_slave_used);
etestused:
efindslave:
pm_runtime_put(sh_chan->dev);
chan->private = NULL;
return ret;
}
@@ -417,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
int descs = sh_chan->descs_allocated;
/* Protect against ISR */
spin_lock_irq(&sh_chan->desc_lock);
@@ -437,15 +463,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
chan->private = NULL;
}
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irq(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
sh_chan->descs_allocated = 0;
spin_unlock_bh(&sh_chan->desc_lock);
if (descs > 0)
pm_runtime_put(sh_chan->dev);
spin_unlock_irq(&sh_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &list, node)
kfree(desc);
@@ -534,6 +557,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
LIST_HEAD(tx_list);
int chunks = 0;
unsigned long irq_flags;
int i;
if (!sg_len)
@@ -544,7 +568,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
(SH_DMA_TCR_MAX + 1);
/* Have to lock the whole loop to protect against concurrent release */
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
/*
* Chaining:
@@ -590,7 +614,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
/* Put them back on the free list, so, they don't get lost */
list_splice_tail(&tx_list, &sh_chan->ld_free);
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return &first->async_tx;
@@ -599,7 +623,7 @@ err_get_desc:
new->mark = DESC_IDLE;
list_splice(&tx_list, &sh_chan->ld_free);
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return NULL;
}
@@ -661,6 +685,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
unsigned long flags;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
@@ -669,7 +694,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (!chan)
return -EINVAL;
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);
dmae_halt(sh_chan);
if (!list_empty(&sh_chan->ld_queue)) {
@@ -678,9 +703,8 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sh_desc, node);
desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
sh_chan->xmit_shift;
}
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
sh_dmae_chan_ld_cleanup(sh_chan, true);
@@ -695,8 +719,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
void *param = NULL;
unsigned long flags;
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -762,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
async_tx_test_ack(&desc->async_tx)) || all) {
/* Remove from ld_queue list */
desc->mark = DESC_IDLE;
list_move(&desc->node, &sh_chan->ld_free);
if (list_empty(&sh_chan->ld_queue)) {
dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
pm_runtime_put(sh_chan->dev);
}
}
}
@@ -773,7 +804,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
*/
sh_chan->completed_cookie = sh_chan->common.cookie;
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
if (callback)
callback(param);
@@ -792,14 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
;
}
/* Called under spin_lock_irq(&sh_chan->desc_lock) */
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
struct sh_desc *desc;
spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
if (dmae_is_busy(sh_chan))
goto sh_chan_xfer_ld_queue_end;
return;
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -812,15 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
dmae_start(sh_chan);
break;
}
sh_chan_xfer_ld_queue_end:
spin_unlock_bh(&sh_chan->desc_lock);
}
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
sh_chan_xfer_ld_queue(sh_chan);
spin_lock_irq(&sh_chan->desc_lock);
if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
sh_chan_xfer_ld_queue(sh_chan);
else
sh_chan->pm_state = DMAE_PM_PENDING;
spin_unlock_irq(&sh_chan->desc_lock);
}
static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -831,6 +865,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status status;
unsigned long flags;
sh_dmae_chan_ld_cleanup(sh_chan, false);
@@ -841,7 +876,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);
status = dma_async_is_complete(cookie, last_complete, last_used);
@@ -859,7 +894,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
}
}
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
return status;
}
@@ -912,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
list_splice_init(&sh_chan->ld_queue, &dl);
if (!list_empty(&dl)) {
dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
pm_runtime_put(sh_chan->dev);
}
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
spin_unlock(&sh_chan->desc_lock);
/* Complete all */
@@ -952,7 +993,7 @@ static void dmae_do_tasklet(unsigned long data)
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
spin_lock(&sh_chan->desc_lock);
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
((desc->direction == DMA_FROM_DEVICE &&
@@ -965,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
break;
}
}
spin_unlock(&sh_chan->desc_lock);
/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
spin_unlock_irq(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, false);
}
@@ -1036,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
return -ENOMEM;
}
/* copy struct dma_device */
new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
/* reference struct dma_device */
new_sh_chan->common.device = &shdev->common;
new_sh_chan->dev = shdev->common.dev;
+7
View File
@@ -23,6 +23,12 @@
struct device;
enum dmae_pm_state {
DMAE_PM_ESTABLISHED,
DMAE_PM_BUSY,
DMAE_PM_PENDING,
};
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
@@ -38,6 +44,7 @@ struct sh_dmae_chan {
u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
enum dmae_pm_state pm_state;
};
struct sh_dmae_device {
+2 -3
View File
@@ -753,7 +753,7 @@ static int __devinit td_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&td->dma.channels);
for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
for (i = 0; i < pdata->nr_channels; i++) {
struct timb_dma_chan *td_chan = &td->channels[i];
struct timb_dma_platform_data_channel *pchan =
pdata->channels + i;
@@ -762,12 +762,11 @@ static int __devinit td_probe(struct platform_device *pdev)
if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
goto err_tasklet_kill;
goto err_free_irq;
}
td_chan->chan.device = &td->dma;
td_chan->chan.cookie = 1;
td_chan->chan.chan_id = i;
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);