mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 23:23:03 +00:00
dmaengine updates for v5.10-rc1
Core: - Mark dma_request_slave_channel() deprecated in favour of dma_request_chan() - subsystem conversion for tasklet_setup() API - subsystem removal of local dma_parms for arm drivers Updates to bunch of driver notably TI, DW and AXI-DMAC -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAl+H8+MACgkQfBQHDyUj g0eQXBAA0SUdSgldqi8hInfPMrYDcxhKeJUbDFelLqueOMCTe/WV4ZexjQ3vyDZc 49CeMXmlukBaM6ULlmXrWBYFARcO0kFrH4Kz9QPZrcwcAx219NQutADB/Cpkgu+R INEHSeYI5d+Q/I4FPmzEofmbUY0916BvNzx1UBQuIwR9iPJQpaK3hOhbmS7p/9X5 ZK6AkyADWmkoBvzCylkLecZPBhSYCFdgRK/UlaFKMT9l2BcTwQRIQJ5JOof9ks/q Z0e6ULXgk3pbVpPKv10bO/1RP2DJT1zl2gIIg/zAiivTf4tXgNKEUkxO7sqJrJje LifyCzPeSMieiUWDuwGTzfFnLxpQ9Ao4JX+iaDNMnZuEXPYJuQzqQ2Yrt1N3f0xA +EF5ZMpvsMTEUML7GIMk8e8aadE1FujQaEmi5ONg4RinbJRqVLOp15y/LhQl+GT/ tlu/y/7D/0rk6auxmb5tui5x/7Y89uvx16KYfQxqPyrWzIMQ5py6/2lrKSertFzD OUSwhGqfq/gTkpLLG/HUyEe0xfVeCfvyS7bHn5FzjYpWtYtQzNgLvpy5xvYLSb+H ONVpCK5MJtXmeM8tRN2oOMnwuM4vlU90ev0DDb7r75JvU20oLMltvrGA+4BRQff3 cCY84fv5y6mr1/Cdm05aZNsb8Iy5uQ3UlssVmqDc8qvasZqeyRw= =mu9A -----END PGP SIGNATURE----- Merge tag 'dmaengine-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine updates from Vinod Koul: "Core: - Mark dma_request_slave_channel() deprecated in favour of dma_request_chan() - subsystem conversion for tasklet_setup() API - subsystem removal of local dma_parms for arm drivers Also updates to bunch of driver notably TI, DW and AXI-DMAC" * tag 'dmaengine-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (104 commits) dmaengine: owl-dma: fix kernel-doc style for enum dmaengine: zynqmp_dma: fix kernel-doc style for tasklet dmaengine: xilinx_dma: fix kernel-doc style for tasklet dmaengine: qcom: bam_dma: fix kernel-doc style for tasklet dmaengine: altera-msgdma: fix kernel-doc style for tasklet dmaengine: xilinx: dpdma: convert tasklets to use new tasklet_setup() API dmaengine: sf-pdma: convert tasklets to use new tasklet_setup() API dt-bindings: Fix 'reg' size issues in zynqmp examples dmaengine: rcar-dmac: drop double zeroing dmaengine: sh: drop double zeroing dmaengine: ioat: Allocate correct size for descriptor chunk dmaengine: ti: k3-udma: use devm_platform_ioremap_resource_byname dmaengine: fsl: remove bad channel update dmaengine: dma-jz4780: Fix race in jz4780_dma_tx_status dmaengine: pl330: fix argument for tasklet dmaengine: dmatest: Return boolean result directly in filter() dmaengine: dmatest: Check list for emptiness before access its last entry dmaengine: ti: k3-udma-glue: fix channel enable functions dmaengine: iop-adma: Fix pointer cast warnings dmaengine: dw-edma: Fix Using plain integer as NULL pointer in dw-edma-v0-debugfs.c ...
This commit is contained in:
commit
f065199d4d
@ -116,6 +116,12 @@ Description: The maximum number of bandwidth tokens that may be in use at
|
||||
one time by operations that access low bandwidth memory in the
|
||||
device.
|
||||
|
||||
What: /sys/bus/dsa/devices/dsa<m>/cmd_status
|
||||
Date: Aug 28, 2020
|
||||
KernelVersion: 5.10.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: The last executed device administrative command's status/error.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/group_id
|
||||
Date: Oct 25, 2019
|
||||
KernelVersion: 5.6.0
|
||||
@ -170,6 +176,20 @@ Contact: dmaengine@vger.kernel.org
|
||||
Description: The number of entries in this work queue that may be filled
|
||||
via a limited portal.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/max_transfer_size
|
||||
Date: Aug 28, 2020
|
||||
KernelVersion: 5.10.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: The max transfer sized for this workqueue. Cannot exceed device
|
||||
max transfer size. Configurable parameter.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/max_batch_size
|
||||
Date: Aug 28, 2020
|
||||
KernelVersion: 5.10.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: The max batch size for this workqueue. Cannot exceed device
|
||||
max batch size. Configurable parameter.
|
||||
|
||||
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
|
||||
Date: Oct 25, 2019
|
||||
KernelVersion: 5.6.0
|
||||
|
@ -16,6 +16,7 @@ properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- renesas,dmac-r8a7742 # RZ/G1H
|
||||
- renesas,dmac-r8a7743 # RZ/G1M
|
||||
- renesas,dmac-r8a7744 # RZ/G1N
|
||||
- renesas,dmac-r8a7745 # RZ/G1E
|
||||
|
@ -18,12 +18,15 @@ properties:
|
||||
const: snps,dma-spear1340
|
||||
|
||||
"#dma-cells":
|
||||
const: 3
|
||||
minimum: 3
|
||||
maximum: 4
|
||||
description: |
|
||||
First cell is a phandle pointing to the DMA controller. Second one is
|
||||
the DMA request line number. Third cell is the memory master identifier
|
||||
for transfers on dynamically allocated channel. Fourth cell is the
|
||||
peripheral master identifier for transfers on an allocated channel.
|
||||
peripheral master identifier for transfers on an allocated channel. Fifth
|
||||
cell is an optional mask of the DMA channels permitted to be allocated
|
||||
for the corresponding client device.
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -678,11 +678,11 @@ static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
|
||||
/**
|
||||
* msgdma_tasklet - Schedule completion tasklet
|
||||
* @data: Pointer to the Altera sSGDMA channel structure
|
||||
* @t: Pointer to the Altera sSGDMA channel structure
|
||||
*/
|
||||
static void msgdma_tasklet(unsigned long data)
|
||||
static void msgdma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct msgdma_device *mdev = (struct msgdma_device *)data;
|
||||
struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
|
||||
u32 count;
|
||||
u32 __maybe_unused size;
|
||||
u32 __maybe_unused status;
|
||||
@ -830,7 +830,7 @@ static int msgdma_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
|
||||
tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
|
||||
|
||||
dma_cookie_init(&mdev->dmachan);
|
||||
|
||||
|
@ -598,9 +598,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
|
||||
|
||||
/*-- IRQ & Tasklet ---------------------------------------------------*/
|
||||
|
||||
static void atc_tasklet(unsigned long data)
|
||||
static void atc_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
|
||||
struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
|
||||
|
||||
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
|
||||
return atc_handle_error(atchan);
|
||||
@ -1892,8 +1892,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
INIT_LIST_HEAD(&atchan->queue);
|
||||
INIT_LIST_HEAD(&atchan->free_list);
|
||||
|
||||
tasklet_init(&atchan->tasklet, atc_tasklet,
|
||||
(unsigned long)atchan);
|
||||
tasklet_setup(&atchan->tasklet, atc_tasklet);
|
||||
atc_enable_chan_irq(atdma, i);
|
||||
}
|
||||
|
||||
|
@ -1613,9 +1613,9 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
||||
/* Then continue with usual descriptor management */
|
||||
}
|
||||
|
||||
static void at_xdmac_tasklet(unsigned long data)
|
||||
static void at_xdmac_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
|
||||
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
|
||||
struct at_xdmac_desc *desc;
|
||||
u32 error_mask;
|
||||
|
||||
@ -2063,8 +2063,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&atchan->lock);
|
||||
INIT_LIST_HEAD(&atchan->xfers_list);
|
||||
INIT_LIST_HEAD(&atchan->free_descs_list);
|
||||
tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
|
||||
(unsigned long)atchan);
|
||||
tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
|
||||
|
||||
/* Clear pending interrupts. */
|
||||
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
|
||||
|
@ -41,14 +41,12 @@
|
||||
* struct bcm2835_dmadev - BCM2835 DMA controller
|
||||
* @ddev: DMA device
|
||||
* @base: base address of register map
|
||||
* @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
|
||||
* @zero_page: bus address of zero page (to detect transactions copying from
|
||||
* zero page and avoid accessing memory if so)
|
||||
*/
|
||||
struct bcm2835_dmadev {
|
||||
struct dma_device ddev;
|
||||
void __iomem *base;
|
||||
struct device_dma_parameters dma_parms;
|
||||
dma_addr_t zero_page;
|
||||
};
|
||||
|
||||
@ -902,7 +900,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
|
||||
if (!od)
|
||||
return -ENOMEM;
|
||||
|
||||
pdev->dev.dma_parms = &od->dma_parms;
|
||||
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
@ -1868,9 +1868,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
|
||||
* This tasklet is called from the interrupt handler to
|
||||
* handle each descriptor (DMA job) that is sent to a channel.
|
||||
*/
|
||||
static void dma_tasklet(unsigned long data)
|
||||
static void dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
|
||||
struct coh901318_chan *cohc = from_tasklet(cohc, t, tasklet);
|
||||
struct coh901318_desc *cohd_fin;
|
||||
unsigned long flags;
|
||||
struct dmaengine_desc_callback cb;
|
||||
@ -2615,8 +2615,7 @@ static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
|
||||
INIT_LIST_HEAD(&cohc->active);
|
||||
INIT_LIST_HEAD(&cohc->queue);
|
||||
|
||||
tasklet_init(&cohc->tasklet, dma_tasklet,
|
||||
(unsigned long) cohc);
|
||||
tasklet_setup(&cohc->tasklet, dma_tasklet);
|
||||
|
||||
list_add_tail(&cohc->chan.device_node,
|
||||
&dma->channels);
|
||||
|
@ -6,6 +6,7 @@
|
||||
* Author: Lars-Peter Clausen <lars@metafoo.de>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
@ -45,6 +46,16 @@
|
||||
* there is no address than can or needs to be configured for the device side.
|
||||
*/
|
||||
|
||||
#define AXI_DMAC_REG_INTERFACE_DESC 0x10
|
||||
#define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
|
||||
#define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
|
||||
#define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
|
||||
#define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
|
||||
#define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
|
||||
#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
|
||||
#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
|
||||
#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
|
||||
|
||||
#define AXI_DMAC_REG_IRQ_MASK 0x80
|
||||
#define AXI_DMAC_REG_IRQ_PENDING 0x84
|
||||
#define AXI_DMAC_REG_IRQ_SOURCE 0x88
|
||||
@ -134,8 +145,6 @@ struct axi_dmac {
|
||||
|
||||
struct dma_device dma_dev;
|
||||
struct axi_dmac_chan chan;
|
||||
|
||||
struct device_dma_parameters dma_parms;
|
||||
};
|
||||
|
||||
static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
|
||||
@ -717,6 +726,20 @@ static const struct regmap_config axi_dmac_regmap_config = {
|
||||
.writeable_reg = axi_dmac_regmap_rdwr,
|
||||
};
|
||||
|
||||
static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
|
||||
{
|
||||
chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
|
||||
|
||||
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_MEM_TO_MEM;
|
||||
else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_MEM_TO_DEV;
|
||||
else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_DEV_TO_MEM;
|
||||
else
|
||||
chan->direction = DMA_DEV_TO_DEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* The configuration stored in the devicetree matches the configuration
|
||||
* parameters of the peripheral instance and allows the driver to know which
|
||||
@ -760,26 +783,81 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
|
||||
return ret;
|
||||
chan->dest_width = val / 8;
|
||||
|
||||
chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
|
||||
|
||||
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_MEM_TO_MEM;
|
||||
else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_MEM_TO_DEV;
|
||||
else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
|
||||
chan->direction = DMA_DEV_TO_MEM;
|
||||
else
|
||||
chan->direction = DMA_DEV_TO_DEV;
|
||||
axi_dmac_adjust_chan_params(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axi_dmac_detect_caps(struct axi_dmac *dmac)
|
||||
static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
|
||||
{
|
||||
struct device_node *of_channels, *of_chan;
|
||||
int ret;
|
||||
|
||||
of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
|
||||
if (of_channels == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
for_each_child_of_node(of_channels, of_chan) {
|
||||
ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
|
||||
if (ret) {
|
||||
of_node_put(of_chan);
|
||||
of_node_put(of_channels);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
of_node_put(of_channels);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
|
||||
{
|
||||
struct axi_dmac_chan *chan = &dmac->chan;
|
||||
unsigned int version;
|
||||
unsigned int val, desc;
|
||||
|
||||
version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
|
||||
desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
|
||||
if (desc == 0) {
|
||||
dev_err(dev, "DMA interface register reads zero\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
|
||||
if (val > AXI_DMAC_BUS_TYPE_FIFO) {
|
||||
dev_err(dev, "Invalid source bus type read: %d\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
chan->src_type = val;
|
||||
|
||||
val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
|
||||
if (val > AXI_DMAC_BUS_TYPE_FIFO) {
|
||||
dev_err(dev, "Invalid destination bus type read: %d\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
chan->dest_type = val;
|
||||
|
||||
val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
|
||||
if (val == 0) {
|
||||
dev_err(dev, "Source bus width is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* widths are stored in log2 */
|
||||
chan->src_width = 1 << val;
|
||||
|
||||
val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
|
||||
if (val == 0) {
|
||||
dev_err(dev, "Destination bus width is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
chan->dest_width = 1 << val;
|
||||
|
||||
axi_dmac_adjust_chan_params(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
|
||||
{
|
||||
struct axi_dmac_chan *chan = &dmac->chan;
|
||||
|
||||
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
|
||||
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
|
||||
@ -826,11 +904,11 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac)
|
||||
|
||||
static int axi_dmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *of_channels, *of_chan;
|
||||
struct dma_device *dma_dev;
|
||||
struct axi_dmac *dmac;
|
||||
struct resource *res;
|
||||
struct regmap *regmap;
|
||||
unsigned int version;
|
||||
int ret;
|
||||
|
||||
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
|
||||
@ -852,23 +930,22 @@ static int axi_dmac_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(dmac->clk))
|
||||
return PTR_ERR(dmac->clk);
|
||||
|
||||
ret = clk_prepare_enable(dmac->clk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
|
||||
|
||||
if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
|
||||
ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
|
||||
else
|
||||
ret = axi_dmac_parse_dt(&pdev->dev, dmac);
|
||||
|
||||
if (ret < 0)
|
||||
goto err_clk_disable;
|
||||
|
||||
INIT_LIST_HEAD(&dmac->chan.active_descs);
|
||||
|
||||
of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
|
||||
if (of_channels == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
for_each_child_of_node(of_channels, of_chan) {
|
||||
ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
|
||||
if (ret) {
|
||||
of_node_put(of_chan);
|
||||
of_node_put(of_channels);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
of_node_put(of_channels);
|
||||
|
||||
pdev->dev.dma_parms = &dmac->dma_parms;
|
||||
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
|
||||
|
||||
dma_dev = &dmac->dma_dev;
|
||||
@ -894,11 +971,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
|
||||
dmac->chan.vchan.desc_free = axi_dmac_desc_free;
|
||||
vchan_init(&dmac->chan.vchan, dma_dev);
|
||||
|
||||
ret = clk_prepare_enable(dmac->clk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = axi_dmac_detect_caps(dmac);
|
||||
ret = axi_dmac_detect_caps(dmac, version);
|
||||
if (ret)
|
||||
goto err_clk_disable;
|
||||
|
||||
|
@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
|
||||
unsigned long flags;
|
||||
unsigned long residue = 0;
|
||||
|
||||
spin_lock_irqsave(&jzchan->vchan.lock, flags);
|
||||
|
||||
status = dma_cookie_status(chan, cookie, txstate);
|
||||
if ((status == DMA_COMPLETE) || (txstate == NULL))
|
||||
return status;
|
||||
|
||||
spin_lock_irqsave(&jzchan->vchan.lock, flags);
|
||||
goto out_unlock_irqrestore;
|
||||
|
||||
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
|
||||
if (vdesc) {
|
||||
@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
|
||||
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
|
||||
status = DMA_ERROR;
|
||||
|
||||
out_unlock_irqrestore:
|
||||
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
|
||||
return status;
|
||||
}
|
||||
|
@ -847,8 +847,10 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
|
||||
}
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
if (IS_ERR_OR_NULL(chan))
|
||||
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
|
||||
if (IS_ERR(chan))
|
||||
return chan;
|
||||
if (!chan)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
found:
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -871,24 +873,6 @@ found:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_request_chan);
|
||||
|
||||
/**
|
||||
* dma_request_slave_channel - try to allocate an exclusive slave channel
|
||||
* @dev: pointer to client device structure
|
||||
* @name: slave channel name
|
||||
*
|
||||
* Returns pointer to appropriate DMA channel on success or NULL.
|
||||
*/
|
||||
struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
struct dma_chan *ch = dma_request_chan(dev, name);
|
||||
if (IS_ERR(ch))
|
||||
return NULL;
|
||||
|
||||
return ch;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
|
||||
|
||||
/**
|
||||
* dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
|
||||
* @mask: capabilities that the channel must satisfy
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmaengine.h>
|
||||
@ -454,8 +455,13 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
|
||||
static void result(const char *err, unsigned int n, unsigned int src_off,
|
||||
unsigned int dst_off, unsigned int len, unsigned long data)
|
||||
{
|
||||
pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
|
||||
current->comm, n, err, src_off, dst_off, len, data);
|
||||
if (IS_ERR_VALUE(data)) {
|
||||
pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%ld)\n",
|
||||
current->comm, n, err, src_off, dst_off, len, data);
|
||||
} else {
|
||||
pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
|
||||
current->comm, n, err, src_off, dst_off, len, data);
|
||||
}
|
||||
}
|
||||
|
||||
static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
|
||||
@ -1052,13 +1058,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
|
||||
|
||||
static bool filter(struct dma_chan *chan, void *param)
|
||||
{
|
||||
struct dmatest_params *params = param;
|
||||
|
||||
if (!dmatest_match_channel(params, chan) ||
|
||||
!dmatest_match_device(params, chan->device))
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
return dmatest_match_channel(param, chan) && dmatest_match_device(param, chan->device);
|
||||
}
|
||||
|
||||
static void request_channels(struct dmatest_info *info,
|
||||
@ -1249,15 +1249,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
||||
add_threaded_test(info);
|
||||
|
||||
/* Check if channel was added successfully */
|
||||
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
|
||||
|
||||
if (dtc->chan) {
|
||||
if (!list_empty(&info->channels)) {
|
||||
/*
|
||||
* if new channel was not successfully added, revert the
|
||||
* "test_channel" string to the name of the last successfully
|
||||
* added channel. exception for when users issues empty string
|
||||
* to channel parameter.
|
||||
*/
|
||||
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
|
||||
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
|
||||
&& (strcmp("", strim(test_channel)) != 0)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -293,7 +293,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
|
||||
if (!regs)
|
||||
return;
|
||||
|
||||
base_dir = debugfs_create_dir(dw->name, 0);
|
||||
base_dir = debugfs_create_dir(dw->name, NULL);
|
||||
if (!base_dir)
|
||||
return;
|
||||
|
||||
|
@ -40,7 +40,7 @@ struct dw_edma_v0_ch {
|
||||
struct dw_edma_v0_ch_regs wr; /* 0x200 */
|
||||
u32 padding_1[55]; /* [0x224..0x2fc] */
|
||||
struct dw_edma_v0_ch_regs rd; /* 0x300 */
|
||||
u32 padding_2[55]; /* [0x224..0x2fc] */
|
||||
u32 padding_2[55]; /* [0x324..0x3fc] */
|
||||
};
|
||||
|
||||
struct dw_edma_v0_unroll {
|
||||
|
@ -463,9 +463,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
dwc_descriptor_complete(dwc, bad_desc, true);
|
||||
}
|
||||
|
||||
static void dw_dma_tasklet(unsigned long data)
|
||||
static void dw_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct dw_dma *dw = (struct dw_dma *)data;
|
||||
struct dw_dma *dw = from_tasklet(dw, t, tasklet);
|
||||
struct dw_dma_chan *dwc;
|
||||
u32 status_xfer;
|
||||
u32 status_err;
|
||||
@ -723,7 +723,7 @@ slave_sg_fromdev_fill_desc:
|
||||
lli_write(desc, sar, reg);
|
||||
lli_write(desc, dar, mem);
|
||||
lli_write(desc, ctlhi, ctlhi);
|
||||
mem_width = __ffs(data_width | mem | dlen);
|
||||
mem_width = __ffs(data_width | mem);
|
||||
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
|
||||
desc->len = dlen;
|
||||
|
||||
@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
|
||||
if (dws->dma_dev != chan->device->dev)
|
||||
return false;
|
||||
|
||||
/* permit channels in accordance with the channels mask */
|
||||
if (dws->channels && !(dws->channels & dwc->mask))
|
||||
return false;
|
||||
|
||||
/* We have to copy data since dws can be temporary storage */
|
||||
memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
|
||||
|
||||
@ -1138,7 +1142,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
|
||||
goto err_pdata;
|
||||
}
|
||||
|
||||
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
||||
tasklet_setup(&dw->tasklet, dw_dma_tasklet);
|
||||
|
||||
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
|
||||
dw->name, dw);
|
||||
|
@ -14,7 +14,7 @@
|
||||
static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
|
||||
{
|
||||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
u32 cfghi = DWC_CFGH_FIFO_MODE;
|
||||
u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
|
||||
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
|
||||
bool hs_polarity = dwc->dws.hs_polarity;
|
||||
|
||||
@ -67,9 +67,8 @@ static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
|
||||
static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
|
||||
{
|
||||
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
||||
bool is_slave = is_slave_direction(dwc->direction);
|
||||
u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16;
|
||||
u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16;
|
||||
u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
|
||||
u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
|
||||
u8 p_master = dwc->dws.p_master;
|
||||
u8 m_master = dwc->dws.m_master;
|
||||
u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
|
||||
|
@ -73,9 +73,8 @@ static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
|
||||
static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
|
||||
{
|
||||
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
|
||||
bool is_slave = is_slave_direction(dwc->direction);
|
||||
u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8;
|
||||
u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8;
|
||||
u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
|
||||
u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
|
||||
|
||||
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
|
||||
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
|
||||
|
@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
};
|
||||
dma_cap_mask_t cap;
|
||||
|
||||
if (dma_spec->args_count != 3)
|
||||
if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
|
||||
return NULL;
|
||||
|
||||
slave.src_id = dma_spec->args[0];
|
||||
slave.dst_id = dma_spec->args[0];
|
||||
slave.m_master = dma_spec->args[1];
|
||||
slave.p_master = dma_spec->args[2];
|
||||
if (dma_spec->args_count >= 4)
|
||||
slave.channels = dma_spec->args[3];
|
||||
|
||||
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
|
||||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
|
||||
slave.m_master >= dw->pdata->nr_masters ||
|
||||
slave.p_master >= dw->pdata->nr_masters))
|
||||
slave.p_master >= dw->pdata->nr_masters ||
|
||||
slave.channels >= BIT(dw->pdata->nr_channels)))
|
||||
return NULL;
|
||||
|
||||
dma_cap_zero(cap);
|
||||
|
@ -745,9 +745,9 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
|
||||
spin_unlock_irqrestore(&edmac->lock, flags);
|
||||
}
|
||||
|
||||
static void ep93xx_dma_tasklet(unsigned long data)
|
||||
static void ep93xx_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
|
||||
struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
|
||||
struct ep93xx_dma_desc *desc, *d;
|
||||
struct dmaengine_desc_callback cb;
|
||||
LIST_HEAD(list);
|
||||
@ -1353,8 +1353,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
|
||||
INIT_LIST_HEAD(&edmac->active);
|
||||
INIT_LIST_HEAD(&edmac->queue);
|
||||
INIT_LIST_HEAD(&edmac->free_list);
|
||||
tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
|
||||
(unsigned long)edmac);
|
||||
tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
|
||||
|
||||
list_add_tail(&edmac->chan.device_node,
|
||||
&dma_dev->channels);
|
||||
|
@ -154,17 +154,15 @@ static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
|
||||
fsl_re_issue_pending(&re_chan->chan);
|
||||
}
|
||||
|
||||
static void fsl_re_dequeue(unsigned long data)
|
||||
static void fsl_re_dequeue(struct tasklet_struct *t)
|
||||
{
|
||||
struct fsl_re_chan *re_chan;
|
||||
struct fsl_re_chan *re_chan = from_tasklet(re_chan, t, irqtask);
|
||||
struct fsl_re_desc *desc, *_desc;
|
||||
struct fsl_re_hw_desc *hwdesc;
|
||||
unsigned long flags;
|
||||
unsigned int count, oub_count;
|
||||
int found;
|
||||
|
||||
re_chan = dev_get_drvdata((struct device *)data);
|
||||
|
||||
fsl_re_cleanup_descs(re_chan);
|
||||
|
||||
spin_lock_irqsave(&re_chan->desc_lock, flags);
|
||||
@ -671,7 +669,7 @@ static int fsl_re_chan_probe(struct platform_device *ofdev,
|
||||
snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
|
||||
|
||||
chandev = &chan_ofdev->dev;
|
||||
tasklet_init(&chan->irqtask, fsl_re_dequeue, (unsigned long)chandev);
|
||||
tasklet_setup(&chan->irqtask, fsl_re_dequeue);
|
||||
|
||||
ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
|
||||
if (ret) {
|
||||
|
@ -976,9 +976,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void dma_do_tasklet(unsigned long data)
|
||||
static void dma_do_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct fsldma_chan *chan = (struct fsldma_chan *)data;
|
||||
struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
|
||||
|
||||
chan_dbg(chan, "tasklet entry\n");
|
||||
|
||||
@ -1151,7 +1151,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
|
||||
}
|
||||
|
||||
fdev->chan[chan->id] = chan;
|
||||
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
|
||||
tasklet_setup(&chan->tasklet, dma_do_tasklet);
|
||||
snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
|
||||
|
||||
/* Initialize the channel */
|
||||
|
@ -368,6 +368,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
|
||||
__func__, cmd_code, operand);
|
||||
|
||||
idxd->cmd_status = 0;
|
||||
__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
|
||||
idxd->cmd_done = &done;
|
||||
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
|
||||
@ -379,8 +380,11 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
wait_for_completion(&done);
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
if (status)
|
||||
if (status) {
|
||||
*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
|
||||
idxd->cmd_status = *status & GENMASK(7, 0);
|
||||
}
|
||||
|
||||
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
|
||||
/* Wake up other pending commands */
|
||||
wake_up(&idxd->cmd_waitq);
|
||||
@ -555,8 +559,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
||||
wq->wqcfg.priority = wq->priority;
|
||||
|
||||
/* bytes 12-15 */
|
||||
wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
|
||||
wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
|
||||
wq->wqcfg.max_xfer_shift = ilog2(wq->max_xfer_bytes);
|
||||
wq->wqcfg.max_batch_shift = ilog2(wq->max_batch_size);
|
||||
|
||||
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
|
||||
for (i = 0; i < 8; i++) {
|
||||
|
@ -114,6 +114,8 @@ struct idxd_wq {
|
||||
struct sbitmap_queue sbq;
|
||||
struct dma_chan dma_chan;
|
||||
char name[WQ_NAME_SIZE + 1];
|
||||
u64 max_xfer_bytes;
|
||||
u32 max_batch_size;
|
||||
};
|
||||
|
||||
struct idxd_engine {
|
||||
@ -154,6 +156,7 @@ struct idxd_device {
|
||||
unsigned long flags;
|
||||
int id;
|
||||
int major;
|
||||
u8 cmd_status;
|
||||
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *reg_base;
|
||||
|
@ -176,6 +176,8 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
||||
wq->idxd = idxd;
|
||||
mutex_init(&wq->wq_lock);
|
||||
wq->idxd_cdev.minor = -1;
|
||||
wq->max_xfer_bytes = idxd->max_xfer_bytes;
|
||||
wq->max_batch_size = idxd->max_batch_size;
|
||||
}
|
||||
|
||||
for (i = 0; i < idxd->max_engines; i++) {
|
||||
|
@ -64,6 +64,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
bool err = false;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
if (cause & IDXD_INTC_ERR) {
|
||||
spin_lock_bh(&idxd->dev_lock);
|
||||
@ -121,7 +122,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
|
||||
val);
|
||||
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (!err)
|
||||
goto out;
|
||||
|
||||
|
@ -1064,6 +1064,89 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
|
||||
static struct device_attribute dev_attr_wq_cdev_minor =
|
||||
__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
|
||||
|
||||
static int __get_sysfs_u64(const char *buf, u64 *val)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = kstrtou64(buf, 0, val);
|
||||
if (rc < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (*val == 0)
|
||||
return -EINVAL;
|
||||
|
||||
*val = roundup_pow_of_two(*val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
|
||||
return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
|
||||
}
|
||||
|
||||
static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
u64 xfer_size;
|
||||
int rc;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
rc = __get_sysfs_u64(buf, &xfer_size);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
if (xfer_size > idxd->max_xfer_bytes)
|
||||
return -EINVAL;
|
||||
|
||||
wq->max_xfer_bytes = xfer_size;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_wq_max_transfer_size =
|
||||
__ATTR(max_transfer_size, 0644,
|
||||
wq_max_transfer_size_show, wq_max_transfer_size_store);
|
||||
|
||||
static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
|
||||
return sprintf(buf, "%u\n", wq->max_batch_size);
|
||||
}
|
||||
|
||||
static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
u64 batch_size;
|
||||
int rc;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
rc = __get_sysfs_u64(buf, &batch_size);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
if (batch_size > idxd->max_batch_size)
|
||||
return -EINVAL;
|
||||
|
||||
wq->max_batch_size = (u32)batch_size;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_wq_max_batch_size =
|
||||
__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
|
||||
|
||||
static struct attribute *idxd_wq_attributes[] = {
|
||||
&dev_attr_wq_clients.attr,
|
||||
&dev_attr_wq_state.attr,
|
||||
@ -1074,6 +1157,8 @@ static struct attribute *idxd_wq_attributes[] = {
|
||||
&dev_attr_wq_type.attr,
|
||||
&dev_attr_wq_name.attr,
|
||||
&dev_attr_wq_cdev_minor.attr,
|
||||
&dev_attr_wq_max_transfer_size.attr,
|
||||
&dev_attr_wq_max_batch_size.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -1317,6 +1402,15 @@ static ssize_t cdev_major_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(cdev_major);
|
||||
|
||||
static ssize_t cmd_status_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
|
||||
|
||||
return sprintf(buf, "%#x\n", idxd->cmd_status);
|
||||
}
|
||||
static DEVICE_ATTR_RO(cmd_status);
|
||||
|
||||
static struct attribute *idxd_device_attributes[] = {
|
||||
&dev_attr_version.attr,
|
||||
&dev_attr_max_groups.attr,
|
||||
@ -1335,6 +1429,7 @@ static struct attribute *idxd_device_attributes[] = {
|
||||
&dev_attr_max_tokens.attr,
|
||||
&dev_attr_token_limit.attr,
|
||||
&dev_attr_cdev_major.attr,
|
||||
&dev_attr_cmd_status.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -173,7 +173,6 @@ enum imx_dma_type {
|
||||
|
||||
struct imxdma_engine {
|
||||
struct device *dev;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct dma_device dma_device;
|
||||
void __iomem *base;
|
||||
struct clk *dma_ahb;
|
||||
@ -613,9 +612,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imxdma_tasklet(unsigned long data)
|
||||
static void imxdma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = (void *)data;
|
||||
struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
|
||||
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
||||
struct imxdma_desc *desc, *next_desc;
|
||||
unsigned long flags;
|
||||
@ -1169,8 +1168,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
|
||||
INIT_LIST_HEAD(&imxdmac->ld_free);
|
||||
INIT_LIST_HEAD(&imxdmac->ld_active);
|
||||
|
||||
tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
|
||||
(unsigned long)imxdmac);
|
||||
tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
|
||||
imxdmac->chan.device = &imxdma->dma_device;
|
||||
dma_cookie_init(&imxdmac->chan);
|
||||
imxdmac->channel = i;
|
||||
@ -1196,7 +1194,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, imxdma);
|
||||
|
||||
imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
|
||||
imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
|
||||
dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
|
||||
|
||||
ret = dma_async_device_register(&imxdma->dma_device);
|
||||
|
@ -426,7 +426,6 @@ struct sdma_driver_data {
|
||||
|
||||
struct sdma_engine {
|
||||
struct device *dev;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct sdma_channel channel[MAX_DMA_CHANNELS];
|
||||
struct sdma_channel_control *channel_control;
|
||||
void __iomem *regs;
|
||||
@ -2118,7 +2117,6 @@ static int sdma_probe(struct platform_device *pdev)
|
||||
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
|
||||
sdma->dma_device.device_issue_pending = sdma_issue_pending;
|
||||
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
|
||||
sdma->dma_device.copy_align = 2;
|
||||
dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
|
||||
|
||||
|
@ -26,11 +26,11 @@
|
||||
|
||||
#include "../dmaengine.h"
|
||||
|
||||
int completion_timeout = 200;
|
||||
static int completion_timeout = 200;
|
||||
module_param(completion_timeout, int, 0644);
|
||||
MODULE_PARM_DESC(completion_timeout,
|
||||
"set ioat completion timeout [msec] (default 200 [msec])");
|
||||
int idle_timeout = 2000;
|
||||
static int idle_timeout = 2000;
|
||||
module_param(idle_timeout, int, 0644);
|
||||
MODULE_PARM_DESC(idle_timeout,
|
||||
"set ioat idel timeout [msec] (default 2000 [msec])");
|
||||
@ -165,7 +165,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
|
||||
tasklet_kill(&ioat_chan->cleanup_task);
|
||||
|
||||
/* final cleanup now that everything is quiesced and can't re-arm */
|
||||
ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
|
||||
ioat_cleanup_event(&ioat_chan->cleanup_task);
|
||||
}
|
||||
|
||||
static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
|
||||
@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
|
||||
struct ioat_descs *descs = &ioat_chan->descs[i];
|
||||
|
||||
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
|
||||
SZ_2M, &descs->hw, flags);
|
||||
IOAT_CHUNK_SIZE, &descs->hw, flags);
|
||||
if (!descs->virt) {
|
||||
int idx;
|
||||
|
||||
@ -690,9 +690,9 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
|
||||
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
||||
}
|
||||
|
||||
void ioat_cleanup_event(unsigned long data)
|
||||
void ioat_cleanup_event(struct tasklet_struct *t)
|
||||
{
|
||||
struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
|
||||
struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
|
||||
|
||||
ioat_cleanup(ioat_chan);
|
||||
if (!test_bit(IOAT_RUN, &ioat_chan->state))
|
||||
|
@ -393,7 +393,7 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
|
||||
enum dma_status
|
||||
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate);
|
||||
void ioat_cleanup_event(unsigned long data);
|
||||
void ioat_cleanup_event(struct tasklet_struct *t);
|
||||
void ioat_timer_event(struct timer_list *t);
|
||||
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
|
||||
void ioat_issue_pending(struct dma_chan *chan);
|
||||
|
@ -767,8 +767,6 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
|
||||
struct ioatdma_chan *ioat_chan, int idx)
|
||||
{
|
||||
struct dma_device *dma = &ioat_dma->dma_dev;
|
||||
struct dma_chan *c = &ioat_chan->dma_chan;
|
||||
unsigned long data = (unsigned long) c;
|
||||
|
||||
ioat_chan->ioat_dma = ioat_dma;
|
||||
ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
|
||||
@ -778,7 +776,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
|
||||
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
|
||||
ioat_dma->idx[idx] = ioat_chan;
|
||||
timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
|
||||
tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
|
||||
tasklet_setup(&ioat_chan->cleanup_task, ioat_cleanup_event);
|
||||
}
|
||||
|
||||
#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
|
||||
|
@ -238,9 +238,10 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
|
||||
spin_unlock_bh(&iop_chan->lock);
|
||||
}
|
||||
|
||||
static void iop_adma_tasklet(unsigned long data)
|
||||
static void iop_adma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
|
||||
struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t,
|
||||
irq_tasklet);
|
||||
|
||||
/* lockdep will flag depedency submissions as potentially
|
||||
* recursive locking, this is not the case as a dependency
|
||||
@ -416,6 +417,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
|
||||
static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
char *hw_desc;
|
||||
dma_addr_t dma_desc;
|
||||
int idx;
|
||||
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
|
||||
struct iop_adma_desc_slot *slot = NULL;
|
||||
@ -444,9 +446,8 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
|
||||
INIT_LIST_HEAD(&slot->tx_list);
|
||||
INIT_LIST_HEAD(&slot->chain_node);
|
||||
INIT_LIST_HEAD(&slot->slot_node);
|
||||
hw_desc = (char *) iop_chan->device->dma_desc_pool;
|
||||
slot->async_tx.phys =
|
||||
(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
|
||||
dma_desc = iop_chan->device->dma_desc_pool;
|
||||
slot->async_tx.phys = dma_desc + idx * IOP_ADMA_SLOT_SIZE;
|
||||
slot->idx = idx;
|
||||
|
||||
spin_lock_bh(&iop_chan->lock);
|
||||
@ -1296,9 +1297,8 @@ static int iop_adma_probe(struct platform_device *pdev)
|
||||
goto err_free_adev;
|
||||
}
|
||||
|
||||
dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
|
||||
__func__, adev->dma_desc_pool_virt,
|
||||
(void *) adev->dma_desc_pool);
|
||||
dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %pad\n",
|
||||
__func__, adev->dma_desc_pool_virt, &adev->dma_desc_pool);
|
||||
|
||||
adev->id = plat_data->hw_id;
|
||||
|
||||
@ -1351,8 +1351,7 @@ static int iop_adma_probe(struct platform_device *pdev)
|
||||
ret = -ENOMEM;
|
||||
goto err_free_iop_chan;
|
||||
}
|
||||
tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
|
||||
iop_chan);
|
||||
tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet);
|
||||
|
||||
/* clear errors before enabling interrupts */
|
||||
iop_adma_device_clear_err_status(iop_chan);
|
||||
|
@ -1299,9 +1299,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void ipu_gc_tasklet(unsigned long arg)
|
||||
static void ipu_gc_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct ipu *ipu = (struct ipu *)arg;
|
||||
struct ipu *ipu = from_tasklet(ipu, t, tasklet);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
|
||||
@ -1740,7 +1740,7 @@ static int __init ipu_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
goto err_idmac_init;
|
||||
|
||||
tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data);
|
||||
tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
|
||||
|
||||
ipu_data.dev = &pdev->dev;
|
||||
|
||||
|
@ -297,9 +297,9 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static void k3_dma_tasklet(unsigned long arg)
|
||||
static void k3_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
|
||||
struct k3_dma_dev *d = from_tasklet(d, t, task);
|
||||
struct k3_dma_phy *p;
|
||||
struct k3_dma_chan *c, *cn;
|
||||
unsigned pch, pch_alloc = 0;
|
||||
@ -962,7 +962,7 @@ static int k3_dma_probe(struct platform_device *op)
|
||||
|
||||
spin_lock_init(&d->lock);
|
||||
INIT_LIST_HEAD(&d->chan_pending);
|
||||
tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
|
||||
tasklet_setup(&d->task, k3_dma_tasklet);
|
||||
platform_set_drvdata(op, d);
|
||||
dev_info(&op->dev, "initialized\n");
|
||||
|
||||
|
@ -356,9 +356,9 @@ static struct mtk_cqdma_vdesc
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mtk_cqdma_tasklet_cb(unsigned long data)
|
||||
static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
|
||||
{
|
||||
struct mtk_cqdma_pchan *pc = (struct mtk_cqdma_pchan *)data;
|
||||
struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet);
|
||||
struct mtk_cqdma_vdesc *cvd = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
@ -878,8 +878,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
|
||||
|
||||
/* initialize tasklet for each PC */
|
||||
for (i = 0; i < cqdma->dma_channels; ++i)
|
||||
tasklet_init(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb,
|
||||
(unsigned long)cqdma->pc[i]);
|
||||
tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb);
|
||||
|
||||
dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n");
|
||||
|
||||
|
@ -624,14 +624,9 @@ static int mtk_uart_apdma_runtime_suspend(struct device *dev)
|
||||
|
||||
static int mtk_uart_apdma_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
|
||||
|
||||
ret = clk_prepare_enable(mtkd->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return clk_prepare_enable(mtkd->clk);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
|
@ -873,9 +873,9 @@ static void mmp_pdma_issue_pending(struct dma_chan *dchan)
|
||||
* Do call back
|
||||
* Start pending list
|
||||
*/
|
||||
static void dma_do_tasklet(unsigned long data)
|
||||
static void dma_do_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
|
||||
struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
|
||||
struct mmp_pdma_desc_sw *desc, *_desc;
|
||||
LIST_HEAD(chain_cleanup);
|
||||
unsigned long flags;
|
||||
@ -993,7 +993,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
|
||||
spin_lock_init(&chan->desc_lock);
|
||||
chan->dev = pdev->dev;
|
||||
chan->chan.device = &pdev->device;
|
||||
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
|
||||
tasklet_setup(&chan->tasklet, dma_do_tasklet);
|
||||
INIT_LIST_HEAD(&chan->chain_pending);
|
||||
INIT_LIST_HEAD(&chan->chain_running);
|
||||
|
||||
|
@ -346,9 +346,9 @@ static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static void dma_do_tasklet(unsigned long data)
|
||||
static void dma_do_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
|
||||
struct mmp_tdma_chan *tdmac = from_tasklet(tdmac, t, tasklet);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
|
||||
}
|
||||
@ -586,7 +586,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
|
||||
tdmac->pool = pool;
|
||||
tdmac->status = DMA_COMPLETE;
|
||||
tdev->tdmac[tdmac->idx] = tdmac;
|
||||
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
|
||||
tasklet_setup(&tdmac->tasklet, dma_do_tasklet);
|
||||
|
||||
/* add the channel to tdma_chan list */
|
||||
list_add_tail(&tdmac->chan.device_node,
|
||||
|
@ -414,9 +414,9 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
|
||||
}
|
||||
|
||||
/* DMA Tasklet */
|
||||
static void mpc_dma_tasklet(unsigned long data)
|
||||
static void mpc_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct mpc_dma *mdma = (void *)data;
|
||||
struct mpc_dma *mdma = from_tasklet(mdma, t, tasklet);
|
||||
unsigned long flags;
|
||||
uint es;
|
||||
|
||||
@ -1009,7 +1009,7 @@ static int mpc_dma_probe(struct platform_device *op)
|
||||
list_add_tail(&mchan->chan.device_node, &dma->channels);
|
||||
}
|
||||
|
||||
tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
|
||||
tasklet_setup(&mdma->tasklet, mpc_dma_tasklet);
|
||||
|
||||
/*
|
||||
* Configure DMA Engine:
|
||||
|
@ -336,9 +336,9 @@ static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
mv_chan->dmachan.completed_cookie = cookie;
|
||||
}
|
||||
|
||||
static void mv_xor_tasklet(unsigned long data)
|
||||
static void mv_xor_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
|
||||
struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
|
||||
|
||||
spin_lock(&chan->lock);
|
||||
mv_chan_slot_cleanup(chan);
|
||||
@ -1097,8 +1097,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||
|
||||
mv_chan->mmr_base = xordev->xor_base;
|
||||
mv_chan->mmr_high_base = xordev->xor_high_base;
|
||||
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
|
||||
mv_chan);
|
||||
tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
|
||||
|
||||
/* clear errors before enabling interrupts */
|
||||
mv_chan_clear_err_status(mv_chan);
|
||||
|
@ -553,9 +553,10 @@ int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
|
||||
/*
|
||||
* handle the descriptors after HW process
|
||||
*/
|
||||
static void mv_xor_v2_tasklet(unsigned long data)
|
||||
static void mv_xor_v2_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
|
||||
struct mv_xor_v2_device *xor_dev = from_tasklet(xor_dev, t,
|
||||
irq_tasklet);
|
||||
int pending_ptr, num_of_pending, i;
|
||||
struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
|
||||
|
||||
@ -780,8 +781,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto free_msi_irqs;
|
||||
|
||||
tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
|
||||
(unsigned long) xor_dev);
|
||||
tasklet_setup(&xor_dev->irq_tasklet, mv_xor_v2_tasklet);
|
||||
|
||||
xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
|
||||
|
||||
|
@ -141,7 +141,6 @@ struct mxs_dma_engine {
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct dma_device dma_device;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
|
||||
struct platform_device *pdev;
|
||||
unsigned int nr_channels;
|
||||
@ -320,9 +319,9 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
return dma_cookie_assign(tx);
|
||||
}
|
||||
|
||||
static void mxs_dma_tasklet(unsigned long data)
|
||||
static void mxs_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
|
||||
struct mxs_dma_chan *mxs_chan = from_tasklet(mxs_chan, t, tasklet);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
|
||||
}
|
||||
@ -812,8 +811,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
|
||||
mxs_chan->chan.device = &mxs_dma->dma_device;
|
||||
dma_cookie_init(&mxs_chan->chan);
|
||||
|
||||
tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
|
||||
(unsigned long) mxs_chan);
|
||||
tasklet_setup(&mxs_chan->tasklet, mxs_dma_tasklet);
|
||||
|
||||
|
||||
/* Add the channel to mxs_chan list */
|
||||
@ -829,7 +827,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
|
||||
mxs_dma->dma_device.dev = &pdev->dev;
|
||||
|
||||
/* mxs_dma gets 65535 bytes maximum sg size */
|
||||
mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
|
||||
dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
|
||||
|
||||
mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
|
||||
|
@ -1113,9 +1113,9 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
|
||||
return dchan;
|
||||
}
|
||||
|
||||
static void nbpf_chan_tasklet(unsigned long data)
|
||||
static void nbpf_chan_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct nbpf_channel *chan = (struct nbpf_channel *)data;
|
||||
struct nbpf_channel *chan = from_tasklet(chan, t, tasklet);
|
||||
struct nbpf_desc *desc, *tmp;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
@ -1260,7 +1260,7 @@ static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
|
||||
|
||||
snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
|
||||
|
||||
tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan);
|
||||
tasklet_setup(&chan->tasklet, nbpf_chan_tasklet);
|
||||
ret = devm_request_irq(dma_dev->dev, chan->irq,
|
||||
nbpf_chan_irq, IRQF_SHARED,
|
||||
chan->name, chan);
|
||||
|
@ -124,7 +124,7 @@
|
||||
#define FCNT_VAL 0x1
|
||||
|
||||
/**
|
||||
* owl_dmadesc_offsets - Describe DMA descriptor, hardware link
|
||||
* enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
|
||||
* list for dma transfer
|
||||
* @OWL_DMADESC_NEXT_LLI: physical address of the next link list
|
||||
* @OWL_DMADESC_SADDR: source physical address
|
||||
@ -135,6 +135,7 @@
|
||||
* @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config
|
||||
* @OWL_DMADESC_CTRLB: interrupt config
|
||||
* @OWL_DMADESC_CONST_NUM: data for constant fill
|
||||
* @OWL_DMADESC_SIZE: max size of this enum
|
||||
*/
|
||||
enum owl_dmadesc_offsets {
|
||||
OWL_DMADESC_NEXT_LLI = 0,
|
||||
|
@ -670,9 +670,9 @@ static int pd_device_terminate_all(struct dma_chan *chan)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdc_tasklet(unsigned long data)
|
||||
static void pdc_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
|
||||
struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet);
|
||||
unsigned long flags;
|
||||
|
||||
if (!pdc_is_idle(pd_chan)) {
|
||||
@ -735,8 +735,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
|
||||
return ret0 | ret2;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void pch_dma_save_regs(struct pch_dma *pd)
|
||||
static void __maybe_unused pch_dma_save_regs(struct pch_dma *pd)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan;
|
||||
struct dma_chan *chan, *_c;
|
||||
@ -759,7 +758,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
|
||||
}
|
||||
}
|
||||
|
||||
static void pch_dma_restore_regs(struct pch_dma *pd)
|
||||
static void __maybe_unused pch_dma_restore_regs(struct pch_dma *pd)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan;
|
||||
struct dma_chan *chan, *_c;
|
||||
@ -782,40 +781,25 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
|
||||
}
|
||||
}
|
||||
|
||||
static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
static int __maybe_unused pch_dma_suspend(struct device *dev)
|
||||
{
|
||||
struct pch_dma *pd = pci_get_drvdata(pdev);
|
||||
struct pch_dma *pd = dev_get_drvdata(dev);
|
||||
|
||||
if (pd)
|
||||
pch_dma_save_regs(pd);
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pch_dma_resume(struct pci_dev *pdev)
|
||||
static int __maybe_unused pch_dma_resume(struct device *dev)
|
||||
{
|
||||
struct pch_dma *pd = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_dbg(&pdev->dev, "failed to enable device\n");
|
||||
return err;
|
||||
}
|
||||
struct pch_dma *pd = dev_get_drvdata(dev);
|
||||
|
||||
if (pd)
|
||||
pch_dma_restore_regs(pd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int pch_dma_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
@ -898,8 +882,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
||||
INIT_LIST_HEAD(&pd_chan->queue);
|
||||
INIT_LIST_HEAD(&pd_chan->free_list);
|
||||
|
||||
tasklet_init(&pd_chan->tasklet, pdc_tasklet,
|
||||
(unsigned long)pd_chan);
|
||||
tasklet_setup(&pd_chan->tasklet, pdc_tasklet);
|
||||
list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
|
||||
}
|
||||
|
||||
@ -993,15 +976,14 @@ static const struct pci_device_id pch_dma_id_table[] = {
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops, pch_dma_suspend, pch_dma_resume);
|
||||
|
||||
static struct pci_driver pch_dma_driver = {
|
||||
.name = DRV_NAME,
|
||||
.id_table = pch_dma_id_table,
|
||||
.probe = pch_dma_probe,
|
||||
.remove = pch_dma_remove,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = pch_dma_suspend,
|
||||
.resume = pch_dma_resume,
|
||||
#endif
|
||||
.driver.pm = &pch_dma_pm_ops,
|
||||
};
|
||||
|
||||
module_pci_driver(pch_dma_driver);
|
||||
|
@ -255,7 +255,7 @@ enum pl330_byteswap {
|
||||
static unsigned cmd_line;
|
||||
#define PL330_DBGCMD_DUMP(off, x...) do { \
|
||||
printk("%x:", cmd_line); \
|
||||
printk(x); \
|
||||
printk(KERN_CONT x); \
|
||||
cmd_line += off; \
|
||||
} while (0)
|
||||
#define PL330_DBGMC_START(addr) (cmd_line = addr)
|
||||
@ -460,9 +460,6 @@ struct pl330_dmac {
|
||||
/* DMA-Engine Device */
|
||||
struct dma_device ddma;
|
||||
|
||||
/* Holds info about sg limitations */
|
||||
struct device_dma_parameters dma_parms;
|
||||
|
||||
/* Pool of descriptors available for the DMAC's channels */
|
||||
struct list_head desc_pool;
|
||||
/* To protect desc_pool manipulation */
|
||||
@ -1576,9 +1573,9 @@ static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
|
||||
tasklet_schedule(&pch->task);
|
||||
}
|
||||
|
||||
static void pl330_dotask(unsigned long data)
|
||||
static void pl330_dotask(struct tasklet_struct *t)
|
||||
{
|
||||
struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
|
||||
struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
@ -1982,7 +1979,7 @@ static int pl330_add(struct pl330_dmac *pl330)
|
||||
return ret;
|
||||
}
|
||||
|
||||
tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
|
||||
tasklet_setup(&pl330->tasks, pl330_dotask);
|
||||
|
||||
pl330->state = INIT;
|
||||
|
||||
@ -2065,9 +2062,9 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
|
||||
}
|
||||
}
|
||||
|
||||
static void pl330_tasklet(unsigned long data)
|
||||
static void pl330_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
|
||||
struct dma_pl330_chan *pch = from_tasklet(pch, t, task);
|
||||
struct dma_pl330_desc *desc, *_dt;
|
||||
unsigned long flags;
|
||||
bool power_down = false;
|
||||
@ -2175,7 +2172,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
|
||||
tasklet_setup(&pch->task, pl330_tasklet);
|
||||
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
|
||||
@ -2487,7 +2484,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
|
||||
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
|
||||
pl330_tasklet((unsigned long)pch);
|
||||
pl330_tasklet(&pch->task);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3034,9 +3031,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
|
||||
pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
|
||||
if (IS_ERR(pl330->rstc)) {
|
||||
if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
|
||||
dev_err(&adev->dev, "Failed to get reset!\n");
|
||||
return PTR_ERR(pl330->rstc);
|
||||
return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n");
|
||||
} else {
|
||||
ret = reset_control_deassert(pl330->rstc);
|
||||
if (ret) {
|
||||
@ -3047,9 +3042,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
|
||||
pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
|
||||
if (IS_ERR(pl330->rstc_ocp)) {
|
||||
if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
|
||||
dev_err(&adev->dev, "Failed to get OCP reset!\n");
|
||||
return PTR_ERR(pl330->rstc_ocp);
|
||||
return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp),
|
||||
"Failed to get OCP reset!\n");
|
||||
} else {
|
||||
ret = reset_control_deassert(pl330->rstc_ocp);
|
||||
if (ret) {
|
||||
@ -3154,8 +3148,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
}
|
||||
}
|
||||
|
||||
adev->dev.dma_parms = &pl330->dma_parms;
|
||||
|
||||
/*
|
||||
* This is the limit for transfers with a buswidth of 1, larger
|
||||
* buswidths will have larger limits.
|
||||
|
@ -241,9 +241,9 @@ static void plx_dma_stop(struct plx_dma_dev *plxdev)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void plx_dma_desc_task(unsigned long data)
|
||||
static void plx_dma_desc_task(struct tasklet_struct *t)
|
||||
{
|
||||
struct plx_dma_dev *plxdev = (void *)data;
|
||||
struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task);
|
||||
|
||||
plx_dma_process_desc(plxdev);
|
||||
}
|
||||
@ -513,8 +513,7 @@ static int plx_dma_create(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
spin_lock_init(&plxdev->ring_lock);
|
||||
tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
|
||||
(unsigned long)plxdev);
|
||||
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
|
||||
|
||||
RCU_INIT_POINTER(plxdev->pdev, pdev);
|
||||
plxdev->bar = pcim_iomap_table(pdev)[0];
|
||||
|
@ -1660,9 +1660,9 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
|
||||
/**
|
||||
* ppc440spe_adma_tasklet - clean up watch-dog initiator
|
||||
*/
|
||||
static void ppc440spe_adma_tasklet(unsigned long data)
|
||||
static void ppc440spe_adma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
|
||||
struct ppc440spe_adma_chan *chan = from_tasklet(chan, t, irq_tasklet);
|
||||
|
||||
spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
|
||||
__ppc440spe_adma_slot_cleanup(chan);
|
||||
@ -4141,8 +4141,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
|
||||
chan->common.device = &adev->common;
|
||||
dma_cookie_init(&chan->common);
|
||||
list_add_tail(&chan->common.device_node, &adev->common.channels);
|
||||
tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
|
||||
(unsigned long)chan);
|
||||
tasklet_setup(&chan->irq_tasklet, ppc440spe_adma_tasklet);
|
||||
|
||||
/* allocate and map helper pages for async validation or
|
||||
* async_mult/async_sum_product operations on DMA0/1.
|
||||
|
@ -381,7 +381,6 @@ struct bam_device {
|
||||
void __iomem *regs;
|
||||
struct device *dev;
|
||||
struct dma_device common;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct bam_chan *channels;
|
||||
u32 num_channels;
|
||||
u32 num_ees;
|
||||
@ -1071,13 +1070,13 @@ static void bam_start_dma(struct bam_chan *bchan)
|
||||
|
||||
/**
|
||||
* dma_tasklet - DMA IRQ tasklet
|
||||
* @data: tasklet argument (bam controller structure)
|
||||
* @t: tasklet argument (bam controller structure)
|
||||
*
|
||||
* Sets up next DMA operation and then processes all completed transactions
|
||||
*/
|
||||
static void dma_tasklet(unsigned long data)
|
||||
static void dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct bam_device *bdev = (struct bam_device *)data;
|
||||
struct bam_device *bdev = from_tasklet(bdev, t, task);
|
||||
struct bam_chan *bchan;
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
@ -1293,7 +1292,7 @@ static int bam_dma_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err_disable_clk;
|
||||
|
||||
tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
|
||||
tasklet_setup(&bdev->task, dma_tasklet);
|
||||
|
||||
bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
|
||||
sizeof(*bdev->channels), GFP_KERNEL);
|
||||
@ -1316,7 +1315,6 @@ static int bam_dma_probe(struct platform_device *pdev)
|
||||
|
||||
/* set max dma segment size */
|
||||
bdev->common.dev = bdev->dev;
|
||||
bdev->common.dev->dma_parms = &bdev->dma_parms;
|
||||
ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
|
||||
if (ret) {
|
||||
dev_err(bdev->dev, "cannot set maximum segment size\n");
|
||||
|
@ -224,9 +224,9 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hidma_issue_task(unsigned long arg)
|
||||
static void hidma_issue_task(struct tasklet_struct *t)
|
||||
{
|
||||
struct hidma_dev *dmadev = (struct hidma_dev *)arg;
|
||||
struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
|
||||
|
||||
pm_runtime_get_sync(dmadev->ddev.dev);
|
||||
hidma_ll_start(dmadev->lldev);
|
||||
@ -885,7 +885,7 @@ static int hidma_probe(struct platform_device *pdev)
|
||||
goto uninit;
|
||||
|
||||
dmadev->irq = chirq;
|
||||
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
|
||||
tasklet_setup(&dmadev->task, hidma_issue_task);
|
||||
hidma_debug_init(dmadev);
|
||||
hidma_sysfs_init(dmadev);
|
||||
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
|
||||
|
@ -173,9 +173,9 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
|
||||
/*
|
||||
* Multiple TREs may be queued and waiting in the pending queue.
|
||||
*/
|
||||
static void hidma_ll_tre_complete(unsigned long arg)
|
||||
static void hidma_ll_tre_complete(struct tasklet_struct *t)
|
||||
{
|
||||
struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
|
||||
struct hidma_lldev *lldev = from_tasklet(lldev, t, task);
|
||||
struct hidma_tre *tre;
|
||||
|
||||
while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
|
||||
@ -792,7 +792,7 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&lldev->lock);
|
||||
tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
|
||||
tasklet_setup(&lldev->task, hidma_ll_tre_complete);
|
||||
lldev->initialized = 1;
|
||||
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
||||
return lldev;
|
||||
|
@ -323,9 +323,9 @@ static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void sa11x0_dma_tasklet(unsigned long arg)
|
||||
static void sa11x0_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
|
||||
struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
|
||||
struct sa11x0_dma_phy *p;
|
||||
struct sa11x0_dma_chan *c;
|
||||
unsigned pch, pch_alloc = 0;
|
||||
@ -928,7 +928,7 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
|
||||
goto err_ioremap;
|
||||
}
|
||||
|
||||
tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
|
||||
tasklet_setup(&d->task, sa11x0_dma_tasklet);
|
||||
|
||||
for (i = 0; i < NR_PHY_CHAN; i++) {
|
||||
struct sa11x0_dma_phy *p = &d->phy[i];
|
||||
|
@ -281,10 +281,9 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
|
||||
desc->in_use = false;
|
||||
}
|
||||
|
||||
static void sf_pdma_donebh_tasklet(unsigned long arg)
|
||||
static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
|
||||
struct sf_pdma_desc *desc = chan->desc;
|
||||
struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
@ -295,12 +294,15 @@ static void sf_pdma_donebh_tasklet(unsigned long arg)
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
list_del(&chan->desc->vdesc.node);
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
}
|
||||
|
||||
static void sf_pdma_errbh_tasklet(unsigned long arg)
|
||||
static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
|
||||
struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
|
||||
struct sf_pdma_desc *desc = chan->desc;
|
||||
unsigned long flags;
|
||||
|
||||
@ -332,8 +334,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
|
||||
residue = readq(regs->residue);
|
||||
|
||||
if (!residue) {
|
||||
list_del(&chan->desc->vdesc.node);
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
tasklet_hi_schedule(&chan->done_tasklet);
|
||||
} else {
|
||||
/* submit next trascatioin if possible */
|
||||
struct sf_pdma_desc *desc = chan->desc;
|
||||
@ -347,8 +348,6 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
|
||||
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
|
||||
tasklet_hi_schedule(&chan->done_tasklet);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -476,10 +475,8 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
|
||||
|
||||
writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
|
||||
|
||||
tasklet_init(&chan->done_tasklet,
|
||||
sf_pdma_donebh_tasklet, (unsigned long)chan);
|
||||
tasklet_init(&chan->err_tasklet,
|
||||
sf_pdma_errbh_tasklet, (unsigned long)chan);
|
||||
tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
|
||||
tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,12 +32,12 @@ config SH_DMAE
|
||||
Enable support for the Renesas SuperH DMA controllers.
|
||||
|
||||
config RCAR_DMAC
|
||||
tristate "Renesas R-Car Gen2 DMA Controller"
|
||||
tristate "Renesas R-Car Gen{2,3} and RZ/G{1,2} DMA Controller"
|
||||
depends on ARCH_RENESAS || COMPILE_TEST
|
||||
select RENESAS_DMA
|
||||
help
|
||||
This driver supports the general purpose DMA controller found in the
|
||||
Renesas R-Car second generation SoCs.
|
||||
Renesas R-Car Gen{2,3} and RZ/G{1,2} SoCs.
|
||||
|
||||
config RENESAS_USB_DMAC
|
||||
tristate "Renesas USB-DMA Controller"
|
||||
|
@ -199,7 +199,6 @@ struct rcar_dmac {
|
||||
struct dma_device engine;
|
||||
struct device *dev;
|
||||
void __iomem *iomem;
|
||||
struct device_dma_parameters parms;
|
||||
|
||||
unsigned int n_channels;
|
||||
struct rcar_dmac_chan *channels;
|
||||
@ -1228,7 +1227,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
|
||||
* Allocate the sg list dynamically as it would consume too much stack
|
||||
* space.
|
||||
*/
|
||||
sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
|
||||
sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
|
||||
if (!sgl)
|
||||
return NULL;
|
||||
|
||||
@ -1845,7 +1844,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
|
||||
dmac->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, dmac);
|
||||
dmac->dev->dma_parms = &dmac->parms;
|
||||
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
||||
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
||||
|
||||
|
@ -728,7 +728,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
|
||||
* Allocate the sg list dynamically as it would consumer too much stack
|
||||
* space.
|
||||
*/
|
||||
sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
|
||||
sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
|
||||
if (!sgl)
|
||||
return NULL;
|
||||
|
||||
|
@ -393,9 +393,9 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
|
||||
}
|
||||
|
||||
/* DMA Tasklet */
|
||||
static void sirfsoc_dma_tasklet(unsigned long data)
|
||||
static void sirfsoc_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = (void *)data;
|
||||
struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet);
|
||||
|
||||
sirfsoc_dma_process_completed(sdma);
|
||||
}
|
||||
@ -938,7 +938,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
|
||||
list_add_tail(&schan->chan.device_node, &dma->channels);
|
||||
}
|
||||
|
||||
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
|
||||
tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet);
|
||||
|
||||
/* Register DMA engine */
|
||||
dev_set_drvdata(dev, sdma);
|
||||
|
@ -535,7 +535,6 @@ struct d40_gen_dmac {
|
||||
* mode" allocated physical channels.
|
||||
* @num_log_chans: The number of logical channels. Calculated from
|
||||
* num_phy_chans.
|
||||
* @dma_parms: DMA parameters for the channel
|
||||
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
|
||||
* @dma_slave: dma_device channels that can do only do slave transfers.
|
||||
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
|
||||
@ -577,7 +576,6 @@ struct d40_base {
|
||||
int num_memcpy_chans;
|
||||
int num_phy_chans;
|
||||
int num_log_chans;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct dma_device dma_both;
|
||||
struct dma_device dma_slave;
|
||||
struct dma_device dma_memcpy;
|
||||
@ -1573,9 +1571,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
|
||||
|
||||
}
|
||||
|
||||
static void dma_tasklet(unsigned long data)
|
||||
static void dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct d40_chan *d40c = (struct d40_chan *) data;
|
||||
struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
|
||||
struct d40_desc *d40d;
|
||||
unsigned long flags;
|
||||
bool callback_active;
|
||||
@ -2806,8 +2804,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
|
||||
INIT_LIST_HEAD(&d40c->client);
|
||||
INIT_LIST_HEAD(&d40c->prepare_queue);
|
||||
|
||||
tasklet_init(&d40c->tasklet, dma_tasklet,
|
||||
(unsigned long) d40c);
|
||||
tasklet_setup(&d40c->tasklet, dma_tasklet);
|
||||
|
||||
list_add_tail(&d40c->chan.device_node,
|
||||
&dma->channels);
|
||||
@ -3641,7 +3638,6 @@ static int __init d40_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto destroy_cache;
|
||||
|
||||
base->dev->dma_parms = &base->dma_parms;
|
||||
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
|
||||
if (ret) {
|
||||
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
|
||||
|
@ -1311,12 +1311,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(dmadev->base);
|
||||
|
||||
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(dmadev->clk)) {
|
||||
ret = PTR_ERR(dmadev->clk);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "Can't get clock\n");
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(dmadev->clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
|
||||
|
||||
ret = clk_prepare_enable(dmadev->clk);
|
||||
if (ret < 0) {
|
||||
|
@ -252,12 +252,9 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&stm32_dmamux->lock);
|
||||
|
||||
stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(stm32_dmamux->clk)) {
|
||||
ret = PTR_ERR(stm32_dmamux->clk);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "Missing clock controller\n");
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(stm32_dmamux->clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
|
||||
"Missing clock controller\n");
|
||||
|
||||
ret = clk_prepare_enable(stm32_dmamux->clk);
|
||||
if (ret < 0) {
|
||||
|
@ -1580,12 +1580,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(dmadev->base);
|
||||
|
||||
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(dmadev->clk)) {
|
||||
ret = PTR_ERR(dmadev->clk);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "Missing clock controller\n");
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(dmadev->clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
|
||||
"Missing clock controller\n");
|
||||
|
||||
ret = clk_prepare_enable(dmadev->clk);
|
||||
if (ret < 0) {
|
||||
|
@ -467,9 +467,9 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sun6i_dma_tasklet(unsigned long data)
|
||||
static void sun6i_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
|
||||
struct sun6i_dma_dev *sdev = from_tasklet(sdev, t, task);
|
||||
struct sun6i_vchan *vchan;
|
||||
struct sun6i_pchan *pchan;
|
||||
unsigned int pchan_alloc = 0;
|
||||
@ -1343,7 +1343,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
|
||||
if (!sdc->vchans)
|
||||
return -ENOMEM;
|
||||
|
||||
tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
|
||||
tasklet_setup(&sdc->task, sun6i_dma_tasklet);
|
||||
|
||||
for (i = 0; i < sdc->num_pchans; i++) {
|
||||
struct sun6i_pchan *pchan = &sdc->pchans[i];
|
||||
|
@ -644,9 +644,9 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
|
||||
}
|
||||
}
|
||||
|
||||
static void tegra_dma_tasklet(unsigned long data)
|
||||
static void tegra_dma_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
|
||||
struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
|
||||
struct dmaengine_desc_callback cb;
|
||||
struct tegra_dma_desc *dma_desc;
|
||||
unsigned int cb_count;
|
||||
@ -1523,8 +1523,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
|
||||
tdc->id = i;
|
||||
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
|
||||
|
||||
tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
|
||||
(unsigned long)tdc);
|
||||
tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
|
||||
spin_lock_init(&tdc->lock);
|
||||
init_waitqueue_head(&tdc->wq);
|
||||
|
||||
|
@ -4,5 +4,8 @@ obj-$(CONFIG_TI_EDMA) += edma.o
|
||||
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
|
||||
obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
|
||||
obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
|
||||
obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
|
||||
obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
|
||||
k3-psil-am654.o \
|
||||
k3-psil-j721e.o \
|
||||
k3-psil-j7200.o
|
||||
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
|
||||
|
175
drivers/dma/ti/k3-psil-j7200.c
Normal file
175
drivers/dma/ti/k3-psil-j7200.c
Normal file
@ -0,0 +1,175 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "k3-psil-priv.h"
|
||||
|
||||
#define PSIL_PDMA_XY_TR(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_PDMA_XY, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_PDMA_XY_PKT(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_PDMA_XY, \
|
||||
.pkt_mode = 1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_PDMA_MCASP(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_PDMA_XY, \
|
||||
.pdma_acc32 = 1, \
|
||||
.pdma_burst = 1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_ETHERNET(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_NATIVE, \
|
||||
.pkt_mode = 1, \
|
||||
.needs_epib = 1, \
|
||||
.psd_size = 16, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_SA2UL(x, tx) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_NATIVE, \
|
||||
.pkt_mode = 1, \
|
||||
.needs_epib = 1, \
|
||||
.psd_size = 64, \
|
||||
.notdpkt = tx, \
|
||||
}, \
|
||||
}
|
||||
|
||||
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
|
||||
static struct psil_ep j7200_src_ep_map[] = {
|
||||
/* PDMA_MCASP - McASP0-2 */
|
||||
PSIL_PDMA_MCASP(0x4400),
|
||||
PSIL_PDMA_MCASP(0x4401),
|
||||
PSIL_PDMA_MCASP(0x4402),
|
||||
/* PDMA_SPI_G0 - SPI0-3 */
|
||||
PSIL_PDMA_XY_PKT(0x4600),
|
||||
PSIL_PDMA_XY_PKT(0x4601),
|
||||
PSIL_PDMA_XY_PKT(0x4602),
|
||||
PSIL_PDMA_XY_PKT(0x4603),
|
||||
PSIL_PDMA_XY_PKT(0x4604),
|
||||
PSIL_PDMA_XY_PKT(0x4605),
|
||||
PSIL_PDMA_XY_PKT(0x4606),
|
||||
PSIL_PDMA_XY_PKT(0x4607),
|
||||
PSIL_PDMA_XY_PKT(0x4608),
|
||||
PSIL_PDMA_XY_PKT(0x4609),
|
||||
PSIL_PDMA_XY_PKT(0x460a),
|
||||
PSIL_PDMA_XY_PKT(0x460b),
|
||||
PSIL_PDMA_XY_PKT(0x460c),
|
||||
PSIL_PDMA_XY_PKT(0x460d),
|
||||
PSIL_PDMA_XY_PKT(0x460e),
|
||||
PSIL_PDMA_XY_PKT(0x460f),
|
||||
/* PDMA_SPI_G1 - SPI4-7 */
|
||||
PSIL_PDMA_XY_PKT(0x4610),
|
||||
PSIL_PDMA_XY_PKT(0x4611),
|
||||
PSIL_PDMA_XY_PKT(0x4612),
|
||||
PSIL_PDMA_XY_PKT(0x4613),
|
||||
PSIL_PDMA_XY_PKT(0x4614),
|
||||
PSIL_PDMA_XY_PKT(0x4615),
|
||||
PSIL_PDMA_XY_PKT(0x4616),
|
||||
PSIL_PDMA_XY_PKT(0x4617),
|
||||
PSIL_PDMA_XY_PKT(0x4618),
|
||||
PSIL_PDMA_XY_PKT(0x4619),
|
||||
PSIL_PDMA_XY_PKT(0x461a),
|
||||
PSIL_PDMA_XY_PKT(0x461b),
|
||||
PSIL_PDMA_XY_PKT(0x461c),
|
||||
PSIL_PDMA_XY_PKT(0x461d),
|
||||
PSIL_PDMA_XY_PKT(0x461e),
|
||||
PSIL_PDMA_XY_PKT(0x461f),
|
||||
/* PDMA_USART_G0 - UART0-1 */
|
||||
PSIL_PDMA_XY_PKT(0x4700),
|
||||
PSIL_PDMA_XY_PKT(0x4701),
|
||||
/* PDMA_USART_G1 - UART2-3 */
|
||||
PSIL_PDMA_XY_PKT(0x4702),
|
||||
PSIL_PDMA_XY_PKT(0x4703),
|
||||
/* PDMA_USART_G2 - UART4-9 */
|
||||
PSIL_PDMA_XY_PKT(0x4704),
|
||||
PSIL_PDMA_XY_PKT(0x4705),
|
||||
PSIL_PDMA_XY_PKT(0x4706),
|
||||
PSIL_PDMA_XY_PKT(0x4707),
|
||||
PSIL_PDMA_XY_PKT(0x4708),
|
||||
PSIL_PDMA_XY_PKT(0x4709),
|
||||
/* CPSW5 */
|
||||
PSIL_ETHERNET(0x4a00),
|
||||
/* CPSW0 */
|
||||
PSIL_ETHERNET(0x7000),
|
||||
/* MCU_PDMA_MISC_G0 - SPI0 */
|
||||
PSIL_PDMA_XY_PKT(0x7100),
|
||||
PSIL_PDMA_XY_PKT(0x7101),
|
||||
PSIL_PDMA_XY_PKT(0x7102),
|
||||
PSIL_PDMA_XY_PKT(0x7103),
|
||||
/* MCU_PDMA_MISC_G1 - SPI1-2 */
|
||||
PSIL_PDMA_XY_PKT(0x7200),
|
||||
PSIL_PDMA_XY_PKT(0x7201),
|
||||
PSIL_PDMA_XY_PKT(0x7202),
|
||||
PSIL_PDMA_XY_PKT(0x7203),
|
||||
PSIL_PDMA_XY_PKT(0x7204),
|
||||
PSIL_PDMA_XY_PKT(0x7205),
|
||||
PSIL_PDMA_XY_PKT(0x7206),
|
||||
PSIL_PDMA_XY_PKT(0x7207),
|
||||
/* MCU_PDMA_MISC_G2 - UART0 */
|
||||
PSIL_PDMA_XY_PKT(0x7300),
|
||||
/* MCU_PDMA_ADC - ADC0-1 */
|
||||
PSIL_PDMA_XY_TR(0x7400),
|
||||
PSIL_PDMA_XY_TR(0x7401),
|
||||
/* SA2UL */
|
||||
PSIL_SA2UL(0x7500, 0),
|
||||
PSIL_SA2UL(0x7501, 0),
|
||||
PSIL_SA2UL(0x7502, 0),
|
||||
PSIL_SA2UL(0x7503, 0),
|
||||
};
|
||||
|
||||
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
|
||||
static struct psil_ep j7200_dst_ep_map[] = {
|
||||
/* CPSW5 */
|
||||
PSIL_ETHERNET(0xca00),
|
||||
PSIL_ETHERNET(0xca01),
|
||||
PSIL_ETHERNET(0xca02),
|
||||
PSIL_ETHERNET(0xca03),
|
||||
PSIL_ETHERNET(0xca04),
|
||||
PSIL_ETHERNET(0xca05),
|
||||
PSIL_ETHERNET(0xca06),
|
||||
PSIL_ETHERNET(0xca07),
|
||||
/* CPSW0 */
|
||||
PSIL_ETHERNET(0xf000),
|
||||
PSIL_ETHERNET(0xf001),
|
||||
PSIL_ETHERNET(0xf002),
|
||||
PSIL_ETHERNET(0xf003),
|
||||
PSIL_ETHERNET(0xf004),
|
||||
PSIL_ETHERNET(0xf005),
|
||||
PSIL_ETHERNET(0xf006),
|
||||
PSIL_ETHERNET(0xf007),
|
||||
/* SA2UL */
|
||||
PSIL_SA2UL(0xf500, 1),
|
||||
PSIL_SA2UL(0xf501, 1),
|
||||
};
|
||||
|
||||
struct psil_ep_map j7200_ep_map = {
|
||||
.name = "j7200",
|
||||
.src = j7200_src_ep_map,
|
||||
.src_count = ARRAY_SIZE(j7200_src_ep_map),
|
||||
.dst = j7200_dst_ep_map,
|
||||
.dst_count = ARRAY_SIZE(j7200_dst_ep_map),
|
||||
};
|
@ -166,6 +166,8 @@ static struct psil_ep j721e_src_ep_map[] = {
|
||||
/* SA2UL */
|
||||
PSIL_SA2UL(0x7500, 0),
|
||||
PSIL_SA2UL(0x7501, 0),
|
||||
PSIL_SA2UL(0x7502, 0),
|
||||
PSIL_SA2UL(0x7503, 0),
|
||||
};
|
||||
|
||||
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
|
||||
@ -211,6 +213,7 @@ static struct psil_ep j721e_dst_ep_map[] = {
|
||||
PSIL_ETHERNET(0xf007),
|
||||
/* SA2UL */
|
||||
PSIL_SA2UL(0xf500, 1),
|
||||
PSIL_SA2UL(0xf501, 1),
|
||||
};
|
||||
|
||||
struct psil_ep_map j721e_ep_map = {
|
||||
|
@ -39,5 +39,6 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
|
||||
/* SoC PSI-L endpoint maps */
|
||||
extern struct psil_ep_map am654_ep_map;
|
||||
extern struct psil_ep_map j721e_ep_map;
|
||||
extern struct psil_ep_map j7200_ep_map;
|
||||
|
||||
#endif /* K3_PSIL_PRIV_H_ */
|
||||
|
@ -9,11 +9,19 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/sys_soc.h>
|
||||
|
||||
#include "k3-psil-priv.h"
|
||||
|
||||
static DEFINE_MUTEX(ep_map_mutex);
|
||||
static struct psil_ep_map *soc_ep_map;
|
||||
static const struct psil_ep_map *soc_ep_map;
|
||||
|
||||
static const struct soc_device_attribute k3_soc_devices[] = {
|
||||
{ .family = "AM65X", .data = &am654_ep_map },
|
||||
{ .family = "J721E", .data = &j721e_ep_map },
|
||||
{ .family = "J7200", .data = &j7200_ep_map },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
|
||||
{
|
||||
@ -21,10 +29,11 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
|
||||
|
||||
mutex_lock(&ep_map_mutex);
|
||||
if (!soc_ep_map) {
|
||||
if (of_machine_is_compatible("ti,am654")) {
|
||||
soc_ep_map = &am654_ep_map;
|
||||
} else if (of_machine_is_compatible("ti,j721e")) {
|
||||
soc_ep_map = &j721e_ep_map;
|
||||
const struct soc_device_attribute *soc;
|
||||
|
||||
soc = soc_device_match(k3_soc_devices);
|
||||
if (soc) {
|
||||
soc_ep_map = soc->data;
|
||||
} else {
|
||||
pr_err("PSIL: No compatible machine found for map\n");
|
||||
mutex_unlock(&ep_map_mutex);
|
||||
|
@ -378,17 +378,11 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
|
||||
|
||||
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
|
||||
{
|
||||
u32 txrt_ctl;
|
||||
|
||||
txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
|
||||
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
|
||||
txrt_ctl);
|
||||
UDMA_PEER_RT_EN_ENABLE);
|
||||
|
||||
txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
|
||||
UDMA_CHAN_RT_CTL_REG);
|
||||
txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
|
||||
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
|
||||
txrt_ctl);
|
||||
UDMA_CHAN_RT_CTL_EN);
|
||||
|
||||
k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
|
||||
return 0;
|
||||
@ -1058,19 +1052,14 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
|
||||
|
||||
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
|
||||
{
|
||||
u32 rxrt_ctl;
|
||||
|
||||
if (rx_chn->remote)
|
||||
return -EINVAL;
|
||||
|
||||
if (rx_chn->flows_ready < rx_chn->flow_num)
|
||||
return -EINVAL;
|
||||
|
||||
rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
|
||||
UDMA_CHAN_RT_CTL_REG);
|
||||
rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
|
||||
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
|
||||
rxrt_ctl);
|
||||
UDMA_CHAN_RT_CTL_EN);
|
||||
|
||||
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
|
||||
UDMA_PEER_RT_EN_ENABLE);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sys_soc.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_device.h>
|
||||
@ -91,6 +92,9 @@ struct udma_match_data {
|
||||
bool enable_memcpy_support;
|
||||
u32 flags;
|
||||
u32 statictr_z_mask;
|
||||
};
|
||||
|
||||
struct udma_soc_data {
|
||||
u32 rchan_oes_offset;
|
||||
};
|
||||
|
||||
@ -117,6 +121,7 @@ struct udma_dev {
|
||||
struct device *dev;
|
||||
void __iomem *mmrs[MMR_LAST];
|
||||
const struct udma_match_data *match_data;
|
||||
const struct udma_soc_data *soc_data;
|
||||
|
||||
u8 tpl_levels;
|
||||
u32 tpl_start_idx[3];
|
||||
@ -1679,7 +1684,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct udma_chan *uc = to_udma_chan(chan);
|
||||
struct udma_dev *ud = to_udma_dev(chan->device);
|
||||
const struct udma_match_data *match_data = ud->match_data;
|
||||
const struct udma_soc_data *soc_data = ud->soc_data;
|
||||
struct k3_ring *irq_ring;
|
||||
u32 irq_udma_idx;
|
||||
int ret;
|
||||
@ -1779,7 +1784,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
|
||||
K3_PSIL_DST_THREAD_ID_OFFSET;
|
||||
|
||||
irq_ring = uc->rflow->r_ring;
|
||||
irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
|
||||
irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
|
||||
|
||||
ret = udma_tisci_rx_channel_config(uc);
|
||||
break;
|
||||
@ -2024,11 +2029,6 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
||||
int num_tr = 0;
|
||||
int tr_idx = 0;
|
||||
|
||||
if (!is_slave_direction(dir)) {
|
||||
dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* estimate the number of TRs we will need */
|
||||
for_each_sg(sgl, sgent, sglen, i) {
|
||||
if (sg_dma_len(sgent) < SZ_64K)
|
||||
@ -2400,11 +2400,6 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
|
||||
unsigned int i;
|
||||
int num_tr;
|
||||
|
||||
if (!is_slave_direction(dir)) {
|
||||
dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
|
||||
&tr0_cnt1, &tr1_cnt0);
|
||||
if (num_tr < 0) {
|
||||
@ -2914,9 +2909,9 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
|
||||
* This tasklet handles the completion of a DMA descriptor by
|
||||
* calling its callback and freeing it.
|
||||
*/
|
||||
static void udma_vchan_complete(unsigned long arg)
|
||||
static void udma_vchan_complete(struct tasklet_struct *t)
|
||||
{
|
||||
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
|
||||
struct virt_dma_chan *vc = from_tasklet(vc, t, task);
|
||||
struct virt_dma_desc *vd, *_vd;
|
||||
struct dmaengine_desc_callback cb;
|
||||
LIST_HEAD(head);
|
||||
@ -3101,14 +3096,12 @@ static struct udma_match_data am654_main_data = {
|
||||
.psil_base = 0x1000,
|
||||
.enable_memcpy_support = true,
|
||||
.statictr_z_mask = GENMASK(11, 0),
|
||||
.rchan_oes_offset = 0x200,
|
||||
};
|
||||
|
||||
static struct udma_match_data am654_mcu_data = {
|
||||
.psil_base = 0x6000,
|
||||
.enable_memcpy_support = false,
|
||||
.statictr_z_mask = GENMASK(11, 0),
|
||||
.rchan_oes_offset = 0x200,
|
||||
};
|
||||
|
||||
static struct udma_match_data j721e_main_data = {
|
||||
@ -3116,7 +3109,6 @@ static struct udma_match_data j721e_main_data = {
|
||||
.enable_memcpy_support = true,
|
||||
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
|
||||
.statictr_z_mask = GENMASK(23, 0),
|
||||
.rchan_oes_offset = 0x400,
|
||||
};
|
||||
|
||||
static struct udma_match_data j721e_mcu_data = {
|
||||
@ -3124,7 +3116,6 @@ static struct udma_match_data j721e_mcu_data = {
|
||||
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
|
||||
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
|
||||
.statictr_z_mask = GENMASK(23, 0),
|
||||
.rchan_oes_offset = 0x400,
|
||||
};
|
||||
|
||||
static const struct of_device_id udma_of_match[] = {
|
||||
@ -3145,15 +3136,31 @@ static const struct of_device_id udma_of_match[] = {
|
||||
{ /* Sentinel */ },
|
||||
};
|
||||
|
||||
static struct udma_soc_data am654_soc_data = {
|
||||
.rchan_oes_offset = 0x200,
|
||||
};
|
||||
|
||||
static struct udma_soc_data j721e_soc_data = {
|
||||
.rchan_oes_offset = 0x400,
|
||||
};
|
||||
|
||||
static struct udma_soc_data j7200_soc_data = {
|
||||
.rchan_oes_offset = 0x80,
|
||||
};
|
||||
|
||||
static const struct soc_device_attribute k3_soc_devices[] = {
|
||||
{ .family = "AM65X", .data = &am654_soc_data },
|
||||
{ .family = "J721E", .data = &j721e_soc_data },
|
||||
{ .family = "J7200", .data = &j7200_soc_data },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
|
||||
{
|
||||
struct resource *res;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MMR_LAST; i++) {
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
mmr_names[i]);
|
||||
ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
|
||||
ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
|
||||
if (IS_ERR(ud->mmrs[i]))
|
||||
return PTR_ERR(ud->mmrs[i]);
|
||||
}
|
||||
@ -3287,7 +3294,7 @@ static int udma_setup_resources(struct udma_dev *ud)
|
||||
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
|
||||
for (j = 0; j < rm_res->sets; j++, i++) {
|
||||
irq_res.desc[i].start = rm_res->desc[j].start +
|
||||
ud->match_data->rchan_oes_offset;
|
||||
ud->soc_data->rchan_oes_offset;
|
||||
irq_res.desc[i].num = rm_res->desc[j].num;
|
||||
}
|
||||
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
|
||||
@ -3497,6 +3504,7 @@ static void udma_dbg_summary_show(struct seq_file *s,
|
||||
static int udma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *navss_node = pdev->dev.parent->of_node;
|
||||
const struct soc_device_attribute *soc;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct udma_dev *ud;
|
||||
const struct of_device_id *match;
|
||||
@ -3561,6 +3569,13 @@ static int udma_probe(struct platform_device *pdev)
|
||||
}
|
||||
ud->match_data = match->data;
|
||||
|
||||
soc = soc_device_match(k3_soc_devices);
|
||||
if (!soc) {
|
||||
dev_err(dev, "No compatible SoC found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
ud->soc_data = soc->data;
|
||||
|
||||
dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
|
||||
|
||||
@ -3649,8 +3664,7 @@ static int udma_probe(struct platform_device *pdev)
|
||||
|
||||
vchan_init(&uc->vc, &ud->ddev);
|
||||
/* Use custom vchan completion handling */
|
||||
tasklet_init(&uc->vc.task, udma_vchan_complete,
|
||||
(unsigned long)&uc->vc);
|
||||
tasklet_setup(&uc->vc.task, udma_vchan_complete);
|
||||
init_completion(&uc->teardown_completed);
|
||||
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
|
||||
}
|
||||
|
@ -1904,7 +1904,7 @@ static struct platform_driver omap_dma_driver = {
|
||||
.remove = omap_dma_remove,
|
||||
.driver = {
|
||||
.name = "omap-dma-engine",
|
||||
.of_match_table = of_match_ptr(omap_dma_match),
|
||||
.of_match_table = omap_dma_match,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -563,9 +563,9 @@ static int td_terminate_all(struct dma_chan *chan)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void td_tasklet(unsigned long data)
|
||||
static void td_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct timb_dma *td = (struct timb_dma *)data;
|
||||
struct timb_dma *td = from_tasklet(td, t, tasklet);
|
||||
u32 isr;
|
||||
u32 ipr;
|
||||
u32 ier;
|
||||
@ -658,7 +658,7 @@ static int td_probe(struct platform_device *pdev)
|
||||
iowrite32(0x0, td->membase + TIMBDMA_IER);
|
||||
iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
|
||||
|
||||
tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
|
||||
tasklet_setup(&td->tasklet, td_tasklet);
|
||||
|
||||
err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
|
||||
if (err) {
|
||||
|
@ -601,13 +601,13 @@ scan_done:
|
||||
}
|
||||
}
|
||||
|
||||
static void txx9dmac_chan_tasklet(unsigned long data)
|
||||
static void txx9dmac_chan_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
int irq;
|
||||
u32 csr;
|
||||
struct txx9dmac_chan *dc;
|
||||
|
||||
dc = (struct txx9dmac_chan *)data;
|
||||
dc = from_tasklet(dc, t, tasklet);
|
||||
csr = channel_readl(dc, CSR);
|
||||
dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
|
||||
|
||||
@ -638,13 +638,13 @@ static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void txx9dmac_tasklet(unsigned long data)
|
||||
static void txx9dmac_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
int irq;
|
||||
u32 csr;
|
||||
struct txx9dmac_chan *dc;
|
||||
|
||||
struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
|
||||
struct txx9dmac_dev *ddev = from_tasklet(ddev, t, tasklet);
|
||||
u32 mcr;
|
||||
int i;
|
||||
|
||||
@ -1113,8 +1113,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
|
||||
(unsigned long)dc);
|
||||
tasklet_setup(&dc->tasklet, txx9dmac_chan_tasklet);
|
||||
dc->irq = irq;
|
||||
err = devm_request_irq(&pdev->dev, dc->irq,
|
||||
txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
|
||||
@ -1200,8 +1199,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev)
|
||||
|
||||
ddev->irq = platform_get_irq(pdev, 0);
|
||||
if (ddev->irq >= 0) {
|
||||
tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
|
||||
(unsigned long)ddev);
|
||||
tasklet_setup(&ddev->tasklet, txx9dmac_tasklet);
|
||||
err = devm_request_irq(&pdev->dev, ddev->irq,
|
||||
txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
|
||||
if (err)
|
||||
|
@ -80,9 +80,9 @@ EXPORT_SYMBOL_GPL(vchan_find_desc);
|
||||
* This tasklet handles the completion of a DMA descriptor by
|
||||
* calling its callback and freeing it.
|
||||
*/
|
||||
static void vchan_complete(unsigned long arg)
|
||||
static void vchan_complete(struct tasklet_struct *t)
|
||||
{
|
||||
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
|
||||
struct virt_dma_chan *vc = from_tasklet(vc, t, task);
|
||||
struct virt_dma_desc *vd, *_vd;
|
||||
struct dmaengine_desc_callback cb;
|
||||
LIST_HEAD(head);
|
||||
@ -131,7 +131,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
|
||||
INIT_LIST_HEAD(&vc->desc_completed);
|
||||
INIT_LIST_HEAD(&vc->desc_terminated);
|
||||
|
||||
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
|
||||
tasklet_setup(&vc->task, vchan_complete);
|
||||
|
||||
vc->chan.device = dmadev;
|
||||
list_add_tail(&vc->chan.device_node, &dmadev->channels);
|
||||
|
@ -975,9 +975,9 @@ static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
|
||||
return dma_cookie_status(dchan, cookie, txstate);
|
||||
}
|
||||
|
||||
static void xgene_dma_tasklet_cb(unsigned long data)
|
||||
static void xgene_dma_tasklet_cb(struct tasklet_struct *t)
|
||||
{
|
||||
struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
|
||||
struct xgene_dma_chan *chan = from_tasklet(chan, t, tasklet);
|
||||
|
||||
/* Run all cleanup for descriptors which have been completed */
|
||||
xgene_dma_cleanup_descriptors(chan);
|
||||
@ -1539,8 +1539,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
|
||||
INIT_LIST_HEAD(&chan->ld_pending);
|
||||
INIT_LIST_HEAD(&chan->ld_running);
|
||||
INIT_LIST_HEAD(&chan->ld_completed);
|
||||
tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
|
||||
(unsigned long)chan);
|
||||
tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb);
|
||||
|
||||
chan->pending = 0;
|
||||
chan->desc_pool = NULL;
|
||||
|
@ -1044,11 +1044,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
|
||||
|
||||
/**
|
||||
* xilinx_dma_do_tasklet - Schedule completion tasklet
|
||||
* @data: Pointer to the Xilinx DMA channel structure
|
||||
* @t: Pointer to the Xilinx DMA channel structure
|
||||
*/
|
||||
static void xilinx_dma_do_tasklet(unsigned long data)
|
||||
static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
|
||||
struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
|
||||
|
||||
xilinx_dma_chan_desc_cleanup(chan);
|
||||
}
|
||||
@ -2536,13 +2536,8 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
|
||||
*tmp_clk = NULL;
|
||||
|
||||
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
|
||||
if (IS_ERR(*axi_clk)) {
|
||||
err = PTR_ERR(*axi_clk);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
if (IS_ERR(*axi_clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
|
||||
|
||||
*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
|
||||
if (IS_ERR(*tx_clk))
|
||||
@ -2603,22 +2598,12 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
|
||||
*tmp2_clk = NULL;
|
||||
|
||||
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
|
||||
if (IS_ERR(*axi_clk)) {
|
||||
err = PTR_ERR(*axi_clk);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
if (IS_ERR(*axi_clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
|
||||
|
||||
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
|
||||
if (IS_ERR(*dev_clk)) {
|
||||
err = PTR_ERR(*dev_clk);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
if (IS_ERR(*dev_clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
|
||||
|
||||
err = clk_prepare_enable(*axi_clk);
|
||||
if (err) {
|
||||
@ -2647,13 +2632,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
|
||||
int err;
|
||||
|
||||
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
|
||||
if (IS_ERR(*axi_clk)) {
|
||||
err = PTR_ERR(*axi_clk);
|
||||
if (err != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
if (IS_ERR(*axi_clk))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
|
||||
|
||||
*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
|
||||
if (IS_ERR(*tx_clk))
|
||||
@ -2866,8 +2846,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
||||
}
|
||||
|
||||
/* Initialize the tasklet */
|
||||
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
|
||||
(unsigned long)chan);
|
||||
tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
|
||||
|
||||
/*
|
||||
* Initialize the DMA channel and add it to the DMA engine channels
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dmapool.h>
|
||||
@ -266,6 +267,210 @@ struct xilinx_dpdma_device {
|
||||
bool ext_addr;
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* DebugFS
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
|
||||
#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
|
||||
|
||||
/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
|
||||
enum xilinx_dpdma_testcases {
|
||||
DPDMA_TC_INTR_DONE,
|
||||
DPDMA_TC_NONE
|
||||
};
|
||||
|
||||
struct xilinx_dpdma_debugfs {
|
||||
enum xilinx_dpdma_testcases testcase;
|
||||
u16 xilinx_dpdma_irq_done_count;
|
||||
unsigned int chan_id;
|
||||
};
|
||||
|
||||
static struct xilinx_dpdma_debugfs dpdma_debugfs;
|
||||
struct xilinx_dpdma_debugfs_request {
|
||||
const char *name;
|
||||
enum xilinx_dpdma_testcases tc;
|
||||
ssize_t (*read)(char *buf);
|
||||
int (*write)(char *args);
|
||||
};
|
||||
|
||||
static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
|
||||
{
|
||||
if (chan->id == dpdma_debugfs.chan_id)
|
||||
dpdma_debugfs.xilinx_dpdma_irq_done_count++;
|
||||
}
|
||||
|
||||
static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
|
||||
{
|
||||
size_t out_str_len;
|
||||
|
||||
dpdma_debugfs.testcase = DPDMA_TC_NONE;
|
||||
|
||||
out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
|
||||
out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
|
||||
out_str_len);
|
||||
snprintf(buf, out_str_len, "%d",
|
||||
dpdma_debugfs.xilinx_dpdma_irq_done_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
|
||||
{
|
||||
char *arg;
|
||||
int ret;
|
||||
u32 id;
|
||||
|
||||
arg = strsep(&args, " ");
|
||||
if (!arg || strncasecmp(arg, "start", 5))
|
||||
return -EINVAL;
|
||||
|
||||
arg = strsep(&args, " ");
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtou32(arg, 0, &id);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
|
||||
return -EINVAL;
|
||||
|
||||
dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
|
||||
dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
|
||||
dpdma_debugfs.chan_id = id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
|
||||
static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
|
||||
{
|
||||
.name = "DESCRIPTOR_DONE_INTR",
|
||||
.tc = DPDMA_TC_INTR_DONE,
|
||||
.read = xilinx_dpdma_debugfs_desc_done_irq_read,
|
||||
.write = xilinx_dpdma_debugfs_desc_done_irq_write,
|
||||
},
|
||||
};
|
||||
|
||||
static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
enum xilinx_dpdma_testcases testcase;
|
||||
char *kern_buff;
|
||||
int ret = 0;
|
||||
|
||||
if (*pos != 0 || size <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
|
||||
if (!kern_buff) {
|
||||
dpdma_debugfs.testcase = DPDMA_TC_NONE;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
testcase = READ_ONCE(dpdma_debugfs.testcase);
|
||||
if (testcase != DPDMA_TC_NONE) {
|
||||
ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
} else {
|
||||
strlcpy(kern_buff, "No testcase executed",
|
||||
XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
|
||||
}
|
||||
|
||||
size = min(size, strlen(kern_buff));
|
||||
if (copy_to_user(buf, kern_buff, size))
|
||||
ret = -EFAULT;
|
||||
|
||||
done:
|
||||
kfree(kern_buff);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*pos = size + 1;
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
|
||||
const char __user *buf, size_t size,
|
||||
loff_t *pos)
|
||||
{
|
||||
char *kern_buff, *kern_buff_start;
|
||||
char *testcase;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (*pos != 0 || size <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Supporting single instance of test as of now. */
|
||||
if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
kern_buff = kzalloc(size, GFP_KERNEL);
|
||||
if (!kern_buff)
|
||||
return -ENOMEM;
|
||||
kern_buff_start = kern_buff;
|
||||
|
||||
ret = strncpy_from_user(kern_buff, buf, size);
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
/* Read the testcase name from a user request. */
|
||||
testcase = strsep(&kern_buff, " ");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
|
||||
if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = dpdma_debugfs_reqs[i].write(kern_buff);
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
ret = size;
|
||||
|
||||
done:
|
||||
kfree(kern_buff_start);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_xilinx_dpdma_dbgfs = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = xilinx_dpdma_debugfs_read,
|
||||
.write = xilinx_dpdma_debugfs_write,
|
||||
};
|
||||
|
||||
static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
|
||||
{
|
||||
struct dentry *dent;
|
||||
|
||||
dpdma_debugfs.testcase = DPDMA_TC_NONE;
|
||||
|
||||
dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
|
||||
NULL, &fops_xilinx_dpdma_dbgfs);
|
||||
if (IS_ERR(dent))
|
||||
dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
|
||||
}
|
||||
|
||||
#else
|
||||
static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
|
||||
{
|
||||
}
|
||||
|
||||
static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* I/O Accessors
|
||||
*/
|
||||
@ -842,6 +1047,8 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
|
||||
xilinx_dpdma_debugfs_desc_done_irq(chan);
|
||||
|
||||
if (active)
|
||||
vchan_cyclic_callback(&active->vdesc);
|
||||
else
|
||||
@ -1251,15 +1458,15 @@ static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
|
||||
|
||||
/**
|
||||
* xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
|
||||
* @data: tasklet data to be casted to DPDMA channel structure
|
||||
* @t: pointer to the tasklet associated with this handler
|
||||
*
|
||||
* Per channel error handling tasklet. This function waits for the outstanding
|
||||
* transaction to complete and triggers error handling. After error handling,
|
||||
* re-enable channel error interrupts, and restart the channel if needed.
|
||||
*/
|
||||
static void xilinx_dpdma_chan_err_task(unsigned long data)
|
||||
static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
|
||||
{
|
||||
struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
|
||||
struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
|
||||
struct xilinx_dpdma_device *xdev = chan->xdev;
|
||||
unsigned long flags;
|
||||
|
||||
@ -1348,8 +1555,7 @@ static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
|
||||
spin_lock_init(&chan->lock);
|
||||
init_waitqueue_head(&chan->wait_to_stop);
|
||||
|
||||
tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
|
||||
(unsigned long)chan);
|
||||
tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
|
||||
|
||||
chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
|
||||
vchan_init(&chan->vchan, &xdev->common);
|
||||
@ -1477,6 +1683,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||
|
||||
xilinx_dpdma_enable_irq(xdev);
|
||||
|
||||
xilinx_dpdma_debugfs_init(xdev);
|
||||
|
||||
dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
|
||||
|
||||
return 0;
|
||||
|
@ -742,11 +742,11 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
|
||||
|
||||
/**
|
||||
* zynqmp_dma_do_tasklet - Schedule completion tasklet
|
||||
* @data: Pointer to the ZynqMP DMA channel structure
|
||||
* @t: Pointer to the ZynqMP DMA channel structure
|
||||
*/
|
||||
static void zynqmp_dma_do_tasklet(unsigned long data)
|
||||
static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
|
||||
struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet);
|
||||
u32 count;
|
||||
unsigned long irqflags;
|
||||
|
||||
@ -908,7 +908,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
|
||||
|
||||
chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
|
||||
zdev->chan = chan;
|
||||
tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan);
|
||||
tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
|
||||
spin_lock_init(&chan->lock);
|
||||
INIT_LIST_HEAD(&chan->active_list);
|
||||
INIT_LIST_HEAD(&chan->pending_list);
|
||||
|
@ -285,9 +285,7 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
|
||||
p = &d->phy[i];
|
||||
c = p->vchan;
|
||||
if (c) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
spin_lock(&c->vc.lock);
|
||||
if (c->cyclic) {
|
||||
vchan_cyclic_callback(&p->ds_run->vd);
|
||||
} else {
|
||||
@ -295,7 +293,7 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
|
||||
p->ds_done = p->ds_run;
|
||||
task = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
spin_unlock(&c->vc.lock);
|
||||
irq_chan |= BIT(i);
|
||||
}
|
||||
}
|
||||
|
@ -1472,7 +1472,6 @@ void dma_issue_pending_all(void);
|
||||
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
dma_filter_fn fn, void *fn_param,
|
||||
struct device_node *np);
|
||||
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
|
||||
|
||||
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
|
||||
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
|
||||
@ -1502,11 +1501,6 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct dma_chan *dma_request_chan(struct device *dev,
|
||||
const char *name)
|
||||
{
|
||||
@ -1527,8 +1521,6 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
|
||||
}
|
||||
#endif
|
||||
|
||||
#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
|
||||
|
||||
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct dma_slave_caps caps;
|
||||
@ -1577,6 +1569,15 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
|
||||
#define dma_request_channel(mask, x, y) \
|
||||
__dma_request_channel(&(mask), x, y, NULL)
|
||||
|
||||
/* Deprecated, please use dma_request_chan() directly */
|
||||
static inline struct dma_chan * __deprecated
|
||||
dma_request_slave_channel(struct device *dev, const char *name)
|
||||
{
|
||||
struct dma_chan *ch = dma_request_chan(dev, name);
|
||||
|
||||
return IS_ERR(ch) ? NULL : ch;
|
||||
}
|
||||
|
||||
static inline struct dma_chan
|
||||
*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
|
||||
dma_filter_fn fn, void *fn_param,
|
||||
|
@ -26,6 +26,7 @@ struct device;
|
||||
* @dst_id: dst request line
|
||||
* @m_master: memory master for transfers on allocated channel
|
||||
* @p_master: peripheral master for transfers on allocated channel
|
||||
* @channels: mask of the channels permitted for allocation (zero value means any)
|
||||
* @hs_polarity:set active low polarity of handshake interface
|
||||
*/
|
||||
struct dw_dma_slave {
|
||||
@ -34,6 +35,7 @@ struct dw_dma_slave {
|
||||
u8 dst_id;
|
||||
u8 m_master;
|
||||
u8 p_master;
|
||||
u8 channels;
|
||||
bool hs_polarity;
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user