dmaengine: pl330: Optimize scatterlist transfer

Use hardware link-list instead of soft link-list to make sure
no gap among the scatter-list transfer.

Merge multiple descs [first ... last] into the last desc,
and drop the descs which have been merged into last one.

Now we get the union one rather than a soft SGL.

Obviously, this require much more MCODE buf size, because we
merge multiple descs into a union one. Increase MCODE buf size
if needed.

e.g.

  -#define MCODE_BUFF_PER_REQ	256
  +#define MCODE_BUFF_PER_REQ	512

or parsed from DT

  arm,pl330-mcbufsz-bytes = <512>;

Otherwise, you may see the warning log on the long SGL situation.

  dma-pl330 2ab90000.dmac: pl330_submit_req: Try increasing mcbufsz (403/256)

Signed-off-by: Sugar Zhang <sugar.zhang@rock-chips.com>
Change-Id: Iea31fc9dba08d36570d4bdbe2c5fcb61f5fed0d4
This commit is contained in:
Sugar Zhang
2023-07-11 12:40:07 +08:00
committed by Tao Huang
parent aab600d7f0
commit ec0b65dbc5

View File

@@ -1592,13 +1592,12 @@ static inline int _setup_xfer_cyclic(struct pl330_dmac *pl330,
*/
static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
struct pl330_thread *thrd, unsigned index,
struct _xfer_spec *pxs)
struct _xfer_spec *pxs, int off)
{
struct _pl330_req *req = &thrd->req[index];
u8 *buf = req->mc_cpu;
int off = 0;
PL330_DBGMC_START(req->mc_bus);
PL330_DBGMC_START(req->mc_bus + off);
/* DMAMOV CCR, ccr */
off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
@@ -1606,10 +1605,13 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
if (!pxs->desc->cyclic) {
off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
/* DMAEND */
off += _emit_END(dry_run, &buf[off]);
if (pxs->desc->last) {
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
/* DMAEND */
off += _emit_END(dry_run, &buf[off]);
}
} else {
off += _setup_xfer_cyclic(pl330, dry_run, &buf[off],
pxs, thrd->ev);
@@ -1656,7 +1658,7 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
* xfer units are done or some error occurs.
*/
static int pl330_submit_req(struct pl330_thread *thrd,
struct dma_pl330_desc *desc)
struct dma_pl330_desc *desc, int *off)
{
struct pl330_dmac *pl330 = thrd->dmac;
struct _xfer_spec xs;
@@ -1712,12 +1714,14 @@ static int pl330_submit_req(struct pl330_thread *thrd,
idx = thrd->req[0].desc == NULL ? 0 : 1;
dev_dbg(pl330->ddma.dev, "desc-%px id %d-%d off %03d last %d cyclic %d\n",
desc, thrd->id, idx, *off, desc->last, desc->cyclic);
xs.ccr = ccr;
xs.desc = desc;
/* First dry run to check if req is acceptable */
ret = _setup_req(pl330, 1, thrd, idx, &xs);
ret = _setup_req(pl330, 1, thrd, idx, &xs, *off);
if (ret > pl330->mcbufsz / 2) {
dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
__func__, __LINE__, ret, pl330->mcbufsz / 2);
@@ -1726,13 +1730,26 @@ static int pl330_submit_req(struct pl330_thread *thrd,
}
/* Hook the request */
thrd->lstenq = idx;
thrd->req[idx].desc = desc;
_setup_req(pl330, 0, thrd, idx, &xs);
if (desc->last) {
thrd->lstenq = idx;
thrd->req[idx].desc = desc;
}
*off = _setup_req(pl330, 0, thrd, idx, &xs, *off);
if (!desc->last) {
desc->status = FREE;
list_move_tail(&desc->node, &pl330->desc_pool);
dev_dbg(pl330->ddma.dev, "desc-%px has been merged, drop it\n", desc);
}
ret = 0;
xfer_exit:
if (desc->last)
*off = 0;
spin_unlock_irqrestore(&pl330->lock, flags);
return ret;
@@ -2227,16 +2244,16 @@ to_desc(struct dma_async_tx_descriptor *tx)
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
int ret;
struct dma_pl330_desc *desc, *_dt;
int ret, off = 0;
list_for_each_entry(desc, &pch->work_list, node) {
list_for_each_entry_safe(desc, _dt, &pch->work_list, node) {
/* If already submitted */
if (desc->status == BUSY || desc->status == PAUSED)
continue;
ret = pl330_submit_req(pch->thread, desc);
ret = pl330_submit_req(pch->thread, desc, &off);
if (!ret) {
desc->status = BUSY;
} else if (ret == -EAGAIN) {