dmaengine: s3c24xx: Split device_control

Split the device_control callback of the Samsung S3C24xxx DMA driver to make
use of the newly introduced callbacks, that will eventually be used to retrieve
slave capabilities.

Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Maxime Ripard 2014-11-17 14:42:31 +01:00 committed by Vinod Koul
parent 62ec8eb52d
commit 39ad460096

View File

@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
return tc * txd->width; return tc * txd->width;
} }
static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config) struct dma_slave_config *config)
{ {
if (!s3cchan->slave) struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
return -EINVAL; unsigned long flags;
int ret = 0;
/* Reject definitely invalid configurations */ /* Reject definitely invalid configurations */
if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&s3cchan->vc.lock, flags);
if (!s3cchan->slave) {
ret = -EINVAL;
goto out;
}
s3cchan->cfg = *config; s3cchan->cfg = *config;
return 0; out:
spin_lock_irqrestore(&s3cchan->vc.lock, flags);
return ret;
} }
/* /*
@ -703,53 +713,38 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
* The DMA ENGINE API * The DMA ENGINE API
*/ */
static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
unsigned long arg)
{ {
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host; struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
unsigned long flags; unsigned long flags;
int ret = 0;
spin_lock_irqsave(&s3cchan->vc.lock, flags); spin_lock_irqsave(&s3cchan->vc.lock, flags);
switch (cmd) { if (!s3cchan->phy && !s3cchan->at) {
case DMA_SLAVE_CONFIG: dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
ret = s3c24xx_dma_set_runtime_config(s3cchan, s3cchan->id);
(struct dma_slave_config *)arg); return -EINVAL;
break;
case DMA_TERMINATE_ALL:
if (!s3cchan->phy && !s3cchan->at) {
dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
s3cchan->id);
ret = -EINVAL;
break;
}
s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
/* Mark physical channel as free */
if (s3cchan->phy)
s3c24xx_dma_phy_free(s3cchan);
/* Dequeue current job */
if (s3cchan->at) {
s3c24xx_dma_desc_free(&s3cchan->at->vd);
s3cchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
break;
default:
/* Unknown command */
ret = -ENXIO;
break;
} }
s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
/* Mark physical channel as free */
if (s3cchan->phy)
s3c24xx_dma_phy_free(s3cchan);
/* Dequeue current job */
if (s3cchan->at) {
s3c24xx_dma_desc_free(&s3cchan->at->vd);
s3cchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
spin_unlock_irqrestore(&s3cchan->vc.lock, flags); spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
return ret; return 0;
} }
static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
@ -1300,7 +1295,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->memcpy.device_control = s3c24xx_dma_control; s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
/* Initialize slave engine for SoC internal dedicated peripherals */ /* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@ -1315,7 +1311,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_control = s3c24xx_dma_control; s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
/* Register as many memcpy channels as there are physical channels */ /* Register as many memcpy channels as there are physical channels */
ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,