mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
dmaengine: PL08x: move DMA signal muxing into slave prepare code
Move the DMA request muxing into the slave prepare code and txd release/completion code. This means we only hold the DMA request mux while there are descriptors waiting to be started or are in progress. This leaves txd->direction as a write-only variable; remove it. Acked-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
a936e79313
commit
c48d496329
@ -168,7 +168,6 @@ struct pl08x_sg {
|
|||||||
* @tx: async tx descriptor
|
* @tx: async tx descriptor
|
||||||
* @node: node for txd list for channels
|
* @node: node for txd list for channels
|
||||||
* @dsg_list: list of children sg's
|
* @dsg_list: list of children sg's
|
||||||
* @direction: direction of transfer
|
|
||||||
* @llis_bus: DMA memory address (physical) start for the LLIs
|
* @llis_bus: DMA memory address (physical) start for the LLIs
|
||||||
* @llis_va: virtual memory address start for the LLIs
|
* @llis_va: virtual memory address start for the LLIs
|
||||||
* @cctl: control reg values for current txd
|
* @cctl: control reg values for current txd
|
||||||
@ -178,7 +177,6 @@ struct pl08x_txd {
|
|||||||
struct dma_async_tx_descriptor tx;
|
struct dma_async_tx_descriptor tx;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct list_head dsg_list;
|
struct list_head dsg_list;
|
||||||
enum dma_transfer_direction direction;
|
|
||||||
dma_addr_t llis_bus;
|
dma_addr_t llis_bus;
|
||||||
struct pl08x_lli *llis_va;
|
struct pl08x_lli *llis_va;
|
||||||
/* Default cctl value for LLIs */
|
/* Default cctl value for LLIs */
|
||||||
@ -997,6 +995,7 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
|
|||||||
if (!list_empty(&plchan->pend_list)) {
|
if (!list_empty(&plchan->pend_list)) {
|
||||||
list_for_each_entry_safe(txdi,
|
list_for_each_entry_safe(txdi,
|
||||||
next, &plchan->pend_list, node) {
|
next, &plchan->pend_list, node) {
|
||||||
|
pl08x_release_mux(plchan);
|
||||||
list_del(&txdi->node);
|
list_del(&txdi->node);
|
||||||
pl08x_free_txd(pl08x, txdi);
|
pl08x_free_txd(pl08x, txdi);
|
||||||
}
|
}
|
||||||
@ -1018,12 +1017,10 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
|
|||||||
/*
|
/*
|
||||||
* This should be called with the channel plchan->lock held
|
* This should be called with the channel plchan->lock held
|
||||||
*/
|
*/
|
||||||
static int prep_phy_channel(struct pl08x_dma_chan *plchan,
|
static int prep_phy_channel(struct pl08x_dma_chan *plchan)
|
||||||
struct pl08x_txd *txd)
|
|
||||||
{
|
{
|
||||||
struct pl08x_driver_data *pl08x = plchan->host;
|
struct pl08x_driver_data *pl08x = plchan->host;
|
||||||
struct pl08x_phy_chan *ch;
|
struct pl08x_phy_chan *ch;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Check if we already have a channel */
|
/* Check if we already have a channel */
|
||||||
if (plchan->phychan) {
|
if (plchan->phychan) {
|
||||||
@ -1038,36 +1035,11 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* OK we have a physical channel: for memcpy() this is all we
|
|
||||||
* need, but for slaves the physical signals may be muxed!
|
|
||||||
* Can the platform allow us to use this channel?
|
|
||||||
*/
|
|
||||||
if (plchan->slave) {
|
|
||||||
ret = pl08x_request_mux(plchan);
|
|
||||||
if (ret < 0) {
|
|
||||||
dev_dbg(&pl08x->adev->dev,
|
|
||||||
"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
|
|
||||||
ch->id, plchan->name);
|
|
||||||
/* Release physical channel & return */
|
|
||||||
pl08x_put_phy_channel(pl08x, ch);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
plchan->phychan = ch;
|
plchan->phychan = ch;
|
||||||
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
|
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
|
||||||
ch->id,
|
ch->id, plchan->name);
|
||||||
plchan->signal,
|
|
||||||
plchan->name);
|
|
||||||
|
|
||||||
got_channel:
|
got_channel:
|
||||||
/* Assign the flow control signal to this channel */
|
|
||||||
if (txd->direction == DMA_MEM_TO_DEV)
|
|
||||||
txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
|
|
||||||
else if (txd->direction == DMA_DEV_TO_MEM)
|
|
||||||
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
|
|
||||||
|
|
||||||
plchan->phychan_hold++;
|
plchan->phychan_hold++;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1077,7 +1049,6 @@ static void release_phy_channel(struct pl08x_dma_chan *plchan)
|
|||||||
{
|
{
|
||||||
struct pl08x_driver_data *pl08x = plchan->host;
|
struct pl08x_driver_data *pl08x = plchan->host;
|
||||||
|
|
||||||
pl08x_release_mux(plchan);
|
|
||||||
pl08x_put_phy_channel(pl08x, plchan->phychan);
|
pl08x_put_phy_channel(pl08x, plchan->phychan);
|
||||||
plchan->phychan = NULL;
|
plchan->phychan = NULL;
|
||||||
}
|
}
|
||||||
@ -1340,19 +1311,12 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
|
|||||||
* See if we already have a physical channel allocated,
|
* See if we already have a physical channel allocated,
|
||||||
* else this is the time to try to get one.
|
* else this is the time to try to get one.
|
||||||
*/
|
*/
|
||||||
ret = prep_phy_channel(plchan, txd);
|
ret = prep_phy_channel(plchan);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
* No physical channel was available.
|
* No physical channel was available.
|
||||||
*
|
*
|
||||||
* memcpy transfers can be sorted out at submission time.
|
* memcpy transfers can be sorted out at submission time.
|
||||||
*
|
|
||||||
* Slave transfers may have been denied due to platform
|
|
||||||
* channel muxing restrictions. Since there is no guarantee
|
|
||||||
* that this will ever be resolved, and the signal must be
|
|
||||||
* acquired AFTER acquiring the physical channel, we will let
|
|
||||||
* them be NACK:ed with -EBUSY here. The drivers can retry
|
|
||||||
* the prep() call if they are eager on doing this using DMA.
|
|
||||||
*/
|
*/
|
||||||
if (plchan->slave) {
|
if (plchan->slave) {
|
||||||
pl08x_free_txd_list(pl08x, plchan);
|
pl08x_free_txd_list(pl08x, plchan);
|
||||||
@ -1423,7 +1387,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
|
|||||||
}
|
}
|
||||||
list_add_tail(&dsg->node, &txd->dsg_list);
|
list_add_tail(&dsg->node, &txd->dsg_list);
|
||||||
|
|
||||||
txd->direction = DMA_MEM_TO_MEM;
|
|
||||||
dsg->src_addr = src;
|
dsg->src_addr = src;
|
||||||
dsg->dst_addr = dest;
|
dsg->dst_addr = dest;
|
||||||
dsg->len = len;
|
dsg->len = len;
|
||||||
@ -1477,8 +1440,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
|
|||||||
* will take precedence since this may configure the
|
* will take precedence since this may configure the
|
||||||
* channel target address dynamically at runtime.
|
* channel target address dynamically at runtime.
|
||||||
*/
|
*/
|
||||||
txd->direction = direction;
|
|
||||||
|
|
||||||
if (direction == DMA_MEM_TO_DEV) {
|
if (direction == DMA_MEM_TO_DEV) {
|
||||||
cctl = PL080_CONTROL_SRC_INCR;
|
cctl = PL080_CONTROL_SRC_INCR;
|
||||||
slave_addr = plchan->cfg.dst_addr;
|
slave_addr = plchan->cfg.dst_addr;
|
||||||
@ -1519,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
|
|||||||
|
|
||||||
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
|
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
|
||||||
|
|
||||||
|
ret = pl08x_request_mux(plchan);
|
||||||
|
if (ret < 0) {
|
||||||
|
pl08x_free_txd(pl08x, txd);
|
||||||
|
dev_dbg(&pl08x->adev->dev,
|
||||||
|
"unable to mux for transfer on %s due to platform restrictions\n",
|
||||||
|
plchan->name);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
|
||||||
|
plchan->signal, plchan->name);
|
||||||
|
|
||||||
|
/* Assign the flow control signal to this channel */
|
||||||
|
if (direction == DMA_MEM_TO_DEV)
|
||||||
|
txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
|
||||||
|
else
|
||||||
|
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_len, tmp) {
|
for_each_sg(sgl, sg, sg_len, tmp) {
|
||||||
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
|
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
|
||||||
if (!dsg) {
|
if (!dsg) {
|
||||||
|
pl08x_release_mux(plchan);
|
||||||
pl08x_free_txd(pl08x, txd);
|
pl08x_free_txd(pl08x, txd);
|
||||||
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
|
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
|
||||||
__func__);
|
__func__);
|
||||||
@ -1586,6 +1566,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||||||
}
|
}
|
||||||
/* Dequeue jobs and free LLIs */
|
/* Dequeue jobs and free LLIs */
|
||||||
if (plchan->at) {
|
if (plchan->at) {
|
||||||
|
/* Killing this one off, release its mux */
|
||||||
|
pl08x_release_mux(plchan);
|
||||||
pl08x_free_txd(pl08x, plchan->at);
|
pl08x_free_txd(pl08x, plchan->at);
|
||||||
plchan->at = NULL;
|
plchan->at = NULL;
|
||||||
}
|
}
|
||||||
@ -1702,7 +1684,6 @@ static void pl08x_tasklet(unsigned long data)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* No more jobs, so free up the physical channel
|
* No more jobs, so free up the physical channel
|
||||||
* Free any allocated signal on slave transfers too
|
|
||||||
*/
|
*/
|
||||||
release_phy_channel(plchan);
|
release_phy_channel(plchan);
|
||||||
plchan->state = PL08X_CHAN_IDLE;
|
plchan->state = PL08X_CHAN_IDLE;
|
||||||
@ -1720,8 +1701,7 @@ static void pl08x_tasklet(unsigned long data)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* This should REALLY not fail now */
|
/* This should REALLY not fail now */
|
||||||
ret = prep_phy_channel(waiting,
|
ret = prep_phy_channel(waiting);
|
||||||
waiting->waiting);
|
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
waiting->phychan_hold--;
|
waiting->phychan_hold--;
|
||||||
waiting->state = PL08X_CHAN_RUNNING;
|
waiting->state = PL08X_CHAN_RUNNING;
|
||||||
@ -1794,6 +1774,11 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
|
|||||||
tx = plchan->at;
|
tx = plchan->at;
|
||||||
if (tx) {
|
if (tx) {
|
||||||
plchan->at = NULL;
|
plchan->at = NULL;
|
||||||
|
/*
|
||||||
|
* This descriptor is done, release its mux
|
||||||
|
* reservation.
|
||||||
|
*/
|
||||||
|
pl08x_release_mux(plchan);
|
||||||
dma_cookie_complete(&tx->tx);
|
dma_cookie_complete(&tx->tx);
|
||||||
list_add_tail(&tx->node, &plchan->done_list);
|
list_add_tail(&tx->node, &plchan->done_list);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user