dmaengine: remove DMA unmap flags

Remove no longer needed DMA unmap flags:
- DMA_COMPL_SKIP_SRC_UNMAP
- DMA_COMPL_SKIP_DEST_UNMAP
- DMA_COMPL_SRC_UNMAP_SINGLE
- DMA_COMPL_DEST_UNMAP_SINGLE

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Jon Mason <jon.mason@intel.com>
Acked-by: Mark Brown <broonie@linaro.org>
[djbw: clean up straggling skip unmap flags in ntb]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Bartlomiej Zolnierkiewicz 2013-10-18 19:35:33 +02:00 committed by Dan Williams
parent 54f8d501e8
commit 0776ae7b89
18 changed files with 27 additions and 67 deletions

View File

@ -56,8 +56,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | unsigned long dma_prep_flags = 0;
DMA_COMPL_SKIP_DEST_UNMAP;
if (submit->cb_fn) if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT; dma_prep_flags |= DMA_PREP_INTERRUPT;

View File

@ -62,7 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2]; dma_addr_t dma_dest[2];
int src_off = 0; int src_off = 0;
dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
if (submit->flags & ASYNC_TX_FENCE) if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE; dma_flags |= DMA_PREP_FENCE;

View File

@ -47,9 +47,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
struct device *dev = dma->dev; struct device *dev = dma->dev;
dma_addr_t pq[2]; dma_addr_t pq[2];
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE) if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE; dma_flags |= DMA_PREP_FENCE;
@ -113,9 +111,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
dma_addr_t dma_dest[2]; dma_addr_t dma_dest[2];
struct device *dev = dma->dev; struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE) if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE; dma_flags |= DMA_PREP_FENCE;

View File

@ -41,7 +41,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
dma_async_tx_callback cb_fn_orig = submit->cb_fn; dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param; void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags; enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags; enum dma_ctrl_flags dma_flags = 0;
int src_cnt = unmap->to_cnt; int src_cnt = unmap->to_cnt;
int xor_src_cnt; int xor_src_cnt;
dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
@ -55,7 +55,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
/* if we are submitting additional xors, leave the chain open /* if we are submitting additional xors, leave the chain open
* and clear the callback parameters * and clear the callback parameters
*/ */
dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
if (src_cnt > xor_src_cnt) { if (src_cnt > xor_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK; submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE; submit->flags |= ASYNC_TX_FENCE;
@ -284,8 +283,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
if (unmap && src_cnt <= device->max_xor && if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) { is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | unsigned long dma_prep_flags = 0;
DMA_COMPL_SKIP_DEST_UNMAP;
int i; int i;
pr_debug("%s: (async) len: %zu\n", __func__, len); pr_debug("%s: (async) len: %zu\n", __func__, len);

View File

@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dma_chan *chan = acdev->dma_chan; struct dma_chan *chan = acdev->dma_chan;
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | unsigned long flags = DMA_PREP_INTERRUPT;
DMA_COMPL_SKIP_DEST_UNMAP;
int ret = 0; int ret = 0;
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);

View File

@ -1065,8 +1065,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
unmap->len = len; unmap->len = len;
flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP | flags = DMA_CTRL_ACK;
DMA_COMPL_SKIP_DEST_UNMAP;
tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
len, flags); len, flags);

View File

@ -599,8 +599,7 @@ static int dmatest_func(void *data)
/* /*
* src and dst buffers are freed by ourselves below * src and dst buffers are freed by ourselves below
*/ */
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
while (!kthread_should_stop() while (!kthread_should_stop()
&& !(params->iterations && total_tests >= params->iterations)) { && !(params->iterations && total_tests >= params->iterations)) {

View File

@ -818,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | flags = DMA_PREP_INTERRUPT;
DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags); IOAT_TEST_SIZE, flags);
if (!tx) { if (!tx) {

View File

@ -1279,9 +1279,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE); DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE, IOAT_NUM_SRC_TEST, PAGE_SIZE,
DMA_PREP_INTERRUPT | DMA_PREP_INTERRUPT);
DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) { if (!tx) {
dev_err(dev, "Self-test xor prep failed\n"); dev_err(dev, "Self-test xor prep failed\n");
@ -1342,9 +1340,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE); DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT | &xor_val_result, DMA_PREP_INTERRUPT);
DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) { if (!tx) {
dev_err(dev, "Self-test zero prep failed\n"); dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV; err = -ENODEV;
@ -1389,9 +1385,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE); DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT | &xor_val_result, DMA_PREP_INTERRUPT);
DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) { if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n"); dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV; err = -ENODEV;

View File

@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
ctx->xt->dir = DMA_MEM_TO_MEM; ctx->xt->dir = DMA_MEM_TO_MEM;
ctx->xt->src_sgl = false; ctx->xt->src_sgl = false;
ctx->xt->dst_sgl = true; ctx->xt->dst_sgl = true;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
if (tx == NULL) { if (tx == NULL) {

View File

@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
desc = dmaengine_prep_slave_sg(fh->chan, desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM, buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); DMA_PREP_INTERRUPT);
if (!desc) { if (!desc) {
spin_lock_irq(&fh->queue_lock); spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue); list_del_init(&vb->queue);

View File

@ -631,8 +631,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie; dma_cookie_t cookie;
dma_addr_t dst, src; dma_addr_t dst, src;
unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | unsigned long dma_flags = 0;
DMA_COMPL_SKIP_SRC_UNMAP;
dst_sg = buf->vb.sglist; dst_sg = buf->vb.sglist;
dst_nents = buf->vb.sglen; dst_nents = buf->vb.sglen;

View File

@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
dma_dev = host->dma_chan->device; dma_dev = host->dma_chan->device;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
DMA_COMPL_SKIP_DEST_UNMAP;
phys_addr = dma_map_single(dma_dev->dev, p, len, dir); phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
if (dma_mapping_error(dma_dev->dev, phys_addr)) { if (dma_mapping_error(dma_dev->dev, phys_addr)) {

View File

@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device; dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr; dma_src = dma_addr;
dma_dst = host->data_pa; dma_dst = host->data_pa;

View File

@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
&ctl->sg, 1, DMA_MEM_TO_DEV, &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc) if (!ctl->adesc)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE; sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
sg, 1, DMA_DEV_TO_MEM, sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc) if (!ctl->adesc)
goto out; goto out;

View File

@ -1037,7 +1037,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
struct dmaengine_unmap_data *unmap; struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie; dma_cookie_t cookie;
void *buf = entry->buf; void *buf = entry->buf;
unsigned long flags;
entry->len = len; entry->len = len;
@ -1073,10 +1072,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
unmap->from_cnt = 1; unmap->from_cnt = 1;
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
unmap->addr[0], len, flags); unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (!txd) if (!txd)
goto err_get_unmap; goto err_get_unmap;
@ -1266,7 +1264,6 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
void __iomem *offset; void __iomem *offset;
size_t len = entry->len; size_t len = entry->len;
void *buf = entry->buf; void *buf = entry->buf;
unsigned long flags;
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@ -1301,10 +1298,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
unmap->to_cnt = 1; unmap->to_cnt = 1;
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
flags); DMA_PREP_INTERRUPT);
if (!txd) if (!txd)
goto err_get_unmap; goto err_get_unmap;

View File

@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->tx_sgl, &dws->tx_sgl,
1, 1,
DMA_MEM_TO_DEV, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); DMA_PREP_INTERRUPT);
txdesc->callback = dw_spi_dma_done; txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws; txdesc->callback_param = dws;
@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->rx_sgl, &dws->rx_sgl,
1, 1,
DMA_DEV_TO_MEM, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); DMA_PREP_INTERRUPT);
rxdesc->callback = dw_spi_dma_done; rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws; rxdesc->callback_param = dws;

View File

@ -171,12 +171,6 @@ struct dma_interleaved_template {
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency * acknowledges receipt, i.e. has has a chance to establish any dependency
* chains * chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
* @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
* (if not set, do the source dma-unmapping as page)
* @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
* (if not set, do the destination dma-unmapping as page)
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
* @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@ -188,14 +182,10 @@ struct dma_interleaved_template {
enum dma_ctrl_flags { enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0), DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1), DMA_CTRL_ACK = (1 << 1),
DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), DMA_PREP_PQ_DISABLE_P = (1 << 2),
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), DMA_PREP_CONTINUE = (1 << 4),
DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), DMA_PREP_FENCE = (1 << 5),
DMA_PREP_PQ_DISABLE_P = (1 << 6),
DMA_PREP_PQ_DISABLE_Q = (1 << 7),
DMA_PREP_CONTINUE = (1 << 8),
DMA_PREP_FENCE = (1 << 9),
}; };
/** /**