dmaengine: xilinx_vdma: Improve SG engine handling

The current driver allows user to queue up multiple segments
on to a single transaction descriptor. User will submit this single desc
and in the issue_pending() we decode multiple segments and submit to SG HW engine.
We free up the allocated_desc when it is submitted to the HW.

Existing code prevents the user to prepare multiple trasactions at same time as
we are overwrite with the allocated_desc.

The best utilization of HW SG engine would happen if we collate the pending
list when we start dma this patch updates the same.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Kedareswara rao Appana 2016-02-26 19:33:51 +05:30 committed by Vinod Koul
parent 92e963f50f
commit 7096f36e53

View File

@ -190,8 +190,7 @@ struct xilinx_vdma_tx_descriptor {
* @desc_offset: TX descriptor registers offset
* @lock: Descriptor operation lock
* @pending_list: Descriptors waiting
* @active_desc: Active descriptor
* @allocated_desc: Allocated descriptor
* @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
@ -206,6 +205,7 @@ struct xilinx_vdma_tx_descriptor {
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
* @desc_pendingcount: Descriptor pending count
*/
struct xilinx_vdma_chan {
struct xilinx_vdma_device *xdev;
@ -213,8 +213,7 @@ struct xilinx_vdma_chan {
u32 desc_offset;
spinlock_t lock;
struct list_head pending_list;
struct xilinx_vdma_tx_descriptor *active_desc;
struct xilinx_vdma_tx_descriptor *allocated_desc;
struct list_head active_list;
struct list_head done_list;
struct dma_chan common;
struct dma_pool *desc_pool;
@ -229,6 +228,7 @@ struct xilinx_vdma_chan {
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
u32 desc_pendingcount;
};
/**
@ -342,19 +342,11 @@ static struct xilinx_vdma_tx_descriptor *
xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc;
unsigned long flags;
if (chan->allocated_desc)
return chan->allocated_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
spin_lock_irqsave(&chan->lock, flags);
chan->allocated_desc = desc;
spin_unlock_irqrestore(&chan->lock, flags);
INIT_LIST_HEAD(&desc->segments);
return desc;
@ -412,9 +404,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
xilinx_vdma_free_desc_list(chan, &chan->pending_list);
xilinx_vdma_free_desc_list(chan, &chan->done_list);
xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
chan->active_desc = NULL;
xilinx_vdma_free_desc_list(chan, &chan->active_list);
spin_unlock_irqrestore(&chan->lock, flags);
}
@ -614,25 +604,26 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_vdma_tx_descriptor *desc;
struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
unsigned long flags;
u32 reg;
struct xilinx_vdma_tx_segment *head, *tail = NULL;
struct xilinx_vdma_tx_segment *tail_segment;
if (chan->err)
return;
spin_lock_irqsave(&chan->lock, flags);
/* There's already an active descriptor, bail out. */
if (chan->active_desc)
goto out_unlock;
if (list_empty(&chan->pending_list))
goto out_unlock;
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
/* If it is SG mode and hardware is busy, cannot submit */
if (chan->has_sg && xilinx_vdma_is_running(chan) &&
@ -645,14 +636,9 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
if (chan->has_sg) {
head = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
tail = list_entry(desc->segments.prev,
struct xilinx_vdma_tx_segment, node);
vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
}
if (chan->has_sg)
vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@ -694,12 +680,15 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
/* Start the transfer */
if (chan->has_sg) {
vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
tail_segment->phys);
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
list_for_each_entry(segment, &desc->segments, node) {
list_for_each_entry(desc, &chan->pending_list, node) {
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
@ -716,8 +705,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
}
list_del(&desc->node);
chan->active_desc = desc;
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
out_unlock:
spin_unlock_irqrestore(&chan->lock, flags);
@ -742,22 +731,20 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
*/
static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc;
struct xilinx_vdma_tx_descriptor *desc, *next;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
desc = chan->active_desc;
if (!desc) {
dev_dbg(chan->dev, "no running descriptors\n");
if (list_empty(&chan->active_list))
goto out_unlock;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
list_del(&desc->node);
dma_cookie_complete(&desc->async_tx);
list_add_tail(&desc->node, &chan->done_list);
}
dma_cookie_complete(&desc->async_tx);
list_add_tail(&desc->node, &chan->done_list);
chan->active_desc = NULL;
out_unlock:
spin_unlock_irqrestore(&chan->lock, flags);
}
@ -878,6 +865,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
/**
* append_desc_queue - Queuing descriptor
* @chan: Driver specific dma channel
* @desc: dma transaction descriptor
*/
static void append_desc_queue(struct xilinx_vdma_chan *chan,
struct xilinx_vdma_tx_descriptor *desc)
{
struct xilinx_vdma_tx_segment *tail_segment;
struct xilinx_vdma_tx_descriptor *tail_desc;
if (list_empty(&chan->pending_list))
goto append;
/*
* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory.
*/
tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
/*
* Add the software descriptor and all children to the list
* of pending transactions
*/
append:
list_add_tail(&desc->node, &chan->pending_list);
chan->desc_pendingcount++;
if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
dev_dbg(chan->dev, "desc pendingcount is too high\n");
chan->desc_pendingcount = chan->num_frms;
}
}
/**
* xilinx_vdma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
@ -906,11 +931,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
/* Append the transaction to the pending transactions queue. */
list_add_tail(&desc->node, &chan->pending_list);
/* Free the allocated desc */
chan->allocated_desc = NULL;
/* Put this transaction onto the tail of the pending queue */
append_desc_queue(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
@ -973,13 +995,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
else
hw->buf_addr = xt->src_start;
/* Link the previous next descriptor to current */
if (!list_empty(&desc->segments)) {
prev = list_last_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
prev->hw.next_desc = segment->phys;
}
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
@ -988,7 +1003,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
prev->hw.next_desc = segment->phys;
desc->async_tx.phys = segment->phys;
return &desc->async_tx;
@ -1127,10 +1142,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
INIT_LIST_HEAD(&chan->active_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");