mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: dma: Add SoF and EoF debugging to ipu_idmac.c, minor cleanup dw_dmac: add cyclic API to DW DMA driver dmaengine: Add privatecnt to revert DMA_PRIVATE property dmatest: add dma interrupts and callbacks dmatest: add xor test dmaengine: allow dma support for async_tx to be toggled async_tx: provide __async_inline for HAS_DMA=n archs dmaengine: kill some unused headers dmaengine: initialize tx_list in dma_async_tx_descriptor_init dma: i.MX31 IPU DMA robustness improvements dma: improve section assignment in i.MX31 IPU DMA driver dma: ipu_idmac driver cosmetic clean-up dmaengine: fail device registration if channel registration fails
This commit is contained in:
commit
133e2a3164
@ -30,7 +30,7 @@
|
|||||||
#ifdef CONFIG_DMA_ENGINE
|
#ifdef CONFIG_DMA_ENGINE
|
||||||
static int __init async_tx_init(void)
|
static int __init async_tx_init(void)
|
||||||
{
|
{
|
||||||
dmaengine_get();
|
async_dmaengine_get();
|
||||||
|
|
||||||
printk(KERN_INFO "async_tx: api initialized (async)\n");
|
printk(KERN_INFO "async_tx: api initialized (async)\n");
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ static int __init async_tx_init(void)
|
|||||||
|
|
||||||
static void __exit async_tx_exit(void)
|
static void __exit async_tx_exit(void)
|
||||||
{
|
{
|
||||||
dmaengine_put();
|
async_dmaengine_put();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -56,7 +56,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
|
|||||||
if (depend_tx &&
|
if (depend_tx &&
|
||||||
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
|
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
|
||||||
return depend_tx->chan;
|
return depend_tx->chan;
|
||||||
return dma_find_channel(tx_type);
|
return async_dma_find_channel(tx_type);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
|
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
|
||||||
#else
|
#else
|
||||||
|
@ -30,11 +30,8 @@
|
|||||||
#include <linux/raid/xor.h>
|
#include <linux/raid/xor.h>
|
||||||
#include <linux/async_tx.h>
|
#include <linux/async_tx.h>
|
||||||
|
|
||||||
/* do_async_xor - dma map the pages and perform the xor with an engine.
|
/* do_async_xor - dma map the pages and perform the xor with an engine */
|
||||||
* This routine is marked __always_inline so it can be compiled away
|
static __async_inline struct dma_async_tx_descriptor *
|
||||||
* when CONFIG_DMA_ENGINE=n
|
|
||||||
*/
|
|
||||||
static __always_inline struct dma_async_tx_descriptor *
|
|
||||||
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
|
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
|
||||||
unsigned int offset, int src_cnt, size_t len,
|
unsigned int offset, int src_cnt, size_t len,
|
||||||
enum async_tx_flags flags,
|
enum async_tx_flags flags,
|
||||||
|
@ -98,6 +98,17 @@ config NET_DMA
|
|||||||
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
|
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
|
||||||
say N.
|
say N.
|
||||||
|
|
||||||
|
config ASYNC_TX_DMA
|
||||||
|
bool "Async_tx: Offload support for the async_tx api"
|
||||||
|
depends on DMA_ENGINE
|
||||||
|
help
|
||||||
|
This allows the async_tx api to take advantage of offload engines for
|
||||||
|
memcpy, memset, xor, and raid6 p+q operations. If your platform has
|
||||||
|
a dma engine that can perform raid operations and you have enabled
|
||||||
|
MD_RAID456 say Y.
|
||||||
|
|
||||||
|
If unsure, say N.
|
||||||
|
|
||||||
config DMATEST
|
config DMATEST
|
||||||
tristate "DMA Test client"
|
tristate "DMA Test client"
|
||||||
depends on DMA_ENGINE
|
depends on DMA_ENGINE
|
||||||
|
@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
|
|||||||
* published in the general-purpose allocator
|
* published in the general-purpose allocator
|
||||||
*/
|
*/
|
||||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||||
|
device->privatecnt++;
|
||||||
err = dma_chan_get(chan);
|
err = dma_chan_get(chan);
|
||||||
|
|
||||||
if (err == -ENODEV) {
|
if (err == -ENODEV) {
|
||||||
@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
|
|||||||
dma_chan_name(chan), err);
|
dma_chan_name(chan), err);
|
||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
|
if (--device->privatecnt == 0)
|
||||||
|
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
|
||||||
chan->private = NULL;
|
chan->private = NULL;
|
||||||
chan = NULL;
|
chan = NULL;
|
||||||
}
|
}
|
||||||
@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan)
|
|||||||
WARN_ONCE(chan->client_count != 1,
|
WARN_ONCE(chan->client_count != 1,
|
||||||
"chan reference count %d != 1\n", chan->client_count);
|
"chan reference count %d != 1\n", chan->client_count);
|
||||||
dma_chan_put(chan);
|
dma_chan_put(chan);
|
||||||
|
/* drop PRIVATE cap enabled by __dma_request_channel() */
|
||||||
|
if (--chan->device->privatecnt == 0)
|
||||||
|
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
|
||||||
chan->private = NULL;
|
chan->private = NULL;
|
||||||
mutex_unlock(&dma_list_mutex);
|
mutex_unlock(&dma_list_mutex);
|
||||||
}
|
}
|
||||||
@ -602,6 +608,24 @@ void dmaengine_put(void)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dmaengine_put);
|
EXPORT_SYMBOL(dmaengine_put);
|
||||||
|
|
||||||
|
static int get_dma_id(struct dma_device *device)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
idr_retry:
|
||||||
|
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
|
||||||
|
return -ENOMEM;
|
||||||
|
mutex_lock(&dma_list_mutex);
|
||||||
|
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
|
||||||
|
mutex_unlock(&dma_list_mutex);
|
||||||
|
if (rc == -EAGAIN)
|
||||||
|
goto idr_retry;
|
||||||
|
else if (rc != 0)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_async_device_register - registers DMA devices found
|
* dma_async_device_register - registers DMA devices found
|
||||||
* @device: &dma_device
|
* @device: &dma_device
|
||||||
@ -640,27 +664,25 @@ int dma_async_device_register(struct dma_device *device)
|
|||||||
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
|
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
|
||||||
if (!idr_ref)
|
if (!idr_ref)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
atomic_set(idr_ref, 0);
|
rc = get_dma_id(device);
|
||||||
idr_retry:
|
if (rc != 0) {
|
||||||
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
|
kfree(idr_ref);
|
||||||
return -ENOMEM;
|
|
||||||
mutex_lock(&dma_list_mutex);
|
|
||||||
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
|
|
||||||
mutex_unlock(&dma_list_mutex);
|
|
||||||
if (rc == -EAGAIN)
|
|
||||||
goto idr_retry;
|
|
||||||
else if (rc != 0)
|
|
||||||
return rc;
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic_set(idr_ref, 0);
|
||||||
|
|
||||||
/* represent channels in sysfs. Probably want devs too */
|
/* represent channels in sysfs. Probably want devs too */
|
||||||
list_for_each_entry(chan, &device->channels, device_node) {
|
list_for_each_entry(chan, &device->channels, device_node) {
|
||||||
|
rc = -ENOMEM;
|
||||||
chan->local = alloc_percpu(typeof(*chan->local));
|
chan->local = alloc_percpu(typeof(*chan->local));
|
||||||
if (chan->local == NULL)
|
if (chan->local == NULL)
|
||||||
continue;
|
goto err_out;
|
||||||
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
|
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
|
||||||
if (chan->dev == NULL) {
|
if (chan->dev == NULL) {
|
||||||
free_percpu(chan->local);
|
free_percpu(chan->local);
|
||||||
continue;
|
chan->local = NULL;
|
||||||
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
chan->chan_id = chancnt++;
|
chan->chan_id = chancnt++;
|
||||||
@ -677,6 +699,8 @@ int dma_async_device_register(struct dma_device *device)
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
free_percpu(chan->local);
|
free_percpu(chan->local);
|
||||||
chan->local = NULL;
|
chan->local = NULL;
|
||||||
|
kfree(chan->dev);
|
||||||
|
atomic_dec(idr_ref);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
chan->client_count = 0;
|
chan->client_count = 0;
|
||||||
@ -701,12 +725,23 @@ int dma_async_device_register(struct dma_device *device)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
list_add_tail_rcu(&device->global_node, &dma_device_list);
|
list_add_tail_rcu(&device->global_node, &dma_device_list);
|
||||||
|
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
|
||||||
|
device->privatecnt++; /* Always private */
|
||||||
dma_channel_rebalance();
|
dma_channel_rebalance();
|
||||||
mutex_unlock(&dma_list_mutex);
|
mutex_unlock(&dma_list_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
|
/* if we never registered a channel just release the idr */
|
||||||
|
if (atomic_read(idr_ref) == 0) {
|
||||||
|
mutex_lock(&dma_list_mutex);
|
||||||
|
idr_remove(&dma_idr, device->dev_id);
|
||||||
|
mutex_unlock(&dma_list_mutex);
|
||||||
|
kfree(idr_ref);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry(chan, &device->channels, device_node) {
|
list_for_each_entry(chan, &device->channels, device_node) {
|
||||||
if (chan->local == NULL)
|
if (chan->local == NULL)
|
||||||
continue;
|
continue;
|
||||||
@ -893,6 +928,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
|
|||||||
{
|
{
|
||||||
tx->chan = chan;
|
tx->chan = chan;
|
||||||
spin_lock_init(&tx->lock);
|
spin_lock_init(&tx->lock);
|
||||||
|
INIT_LIST_HEAD(&tx->tx_list);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
|
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
|
||||||
|
|
||||||
|
@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO);
|
|||||||
MODULE_PARM_DESC(max_channels,
|
MODULE_PARM_DESC(max_channels,
|
||||||
"Maximum number of channels to use (default: all)");
|
"Maximum number of channels to use (default: all)");
|
||||||
|
|
||||||
|
static unsigned int xor_sources = 3;
|
||||||
|
module_param(xor_sources, uint, S_IRUGO);
|
||||||
|
MODULE_PARM_DESC(xor_sources,
|
||||||
|
"Number of xor source buffers (default: 3)");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialization patterns. All bytes in the source buffer has bit 7
|
* Initialization patterns. All bytes in the source buffer has bit 7
|
||||||
* set, all bytes in the destination buffer has bit 7 cleared.
|
* set, all bytes in the destination buffer has bit 7 cleared.
|
||||||
@ -59,8 +64,9 @@ struct dmatest_thread {
|
|||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
u8 *srcbuf;
|
u8 **srcs;
|
||||||
u8 *dstbuf;
|
u8 **dsts;
|
||||||
|
enum dma_transaction_type type;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dmatest_chan {
|
struct dmatest_chan {
|
||||||
@ -98,10 +104,12 @@ static unsigned long dmatest_random(void)
|
|||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
|
static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
u8 *buf;
|
||||||
|
|
||||||
|
for (; (buf = *bufs); bufs++) {
|
||||||
for (i = 0; i < start; i++)
|
for (i = 0; i < start; i++)
|
||||||
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
||||||
for ( ; i < start + len; i++)
|
for ( ; i < start + len; i++)
|
||||||
@ -109,12 +117,16 @@ static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
|
|||||||
| (~i & PATTERN_COUNT_MASK);;
|
| (~i & PATTERN_COUNT_MASK);;
|
||||||
for ( ; i < test_buf_size; i++)
|
for ( ; i < test_buf_size; i++)
|
||||||
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
|
||||||
|
buf++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
|
static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
u8 *buf;
|
||||||
|
|
||||||
|
for (; (buf = *bufs); bufs++) {
|
||||||
for (i = 0; i < start; i++)
|
for (i = 0; i < start; i++)
|
||||||
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
||||||
for ( ; i < start + len; i++)
|
for ( ; i < start + len; i++)
|
||||||
@ -123,6 +135,7 @@ static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
|
|||||||
for ( ; i < test_buf_size; i++)
|
for ( ; i < test_buf_size; i++)
|
||||||
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
||||||
unsigned int counter, bool is_srcbuf)
|
unsigned int counter, bool is_srcbuf)
|
||||||
@ -150,24 +163,31 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
|
|||||||
thread_name, index, expected, actual);
|
thread_name, index, expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int dmatest_verify(u8 *buf, unsigned int start,
|
static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
||||||
unsigned int end, unsigned int counter, u8 pattern,
|
unsigned int end, unsigned int counter, u8 pattern,
|
||||||
bool is_srcbuf)
|
bool is_srcbuf)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int error_count = 0;
|
unsigned int error_count = 0;
|
||||||
u8 actual;
|
u8 actual;
|
||||||
|
u8 expected;
|
||||||
|
u8 *buf;
|
||||||
|
unsigned int counter_orig = counter;
|
||||||
|
|
||||||
|
for (; (buf = *bufs); bufs++) {
|
||||||
|
counter = counter_orig;
|
||||||
for (i = start; i < end; i++) {
|
for (i = start; i < end; i++) {
|
||||||
actual = buf[i];
|
actual = buf[i];
|
||||||
if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
|
expected = pattern | (~counter & PATTERN_COUNT_MASK);
|
||||||
|
if (actual != expected) {
|
||||||
if (error_count < 32)
|
if (error_count < 32)
|
||||||
dmatest_mismatch(actual, pattern, i, counter,
|
dmatest_mismatch(actual, pattern, i,
|
||||||
is_srcbuf);
|
counter, is_srcbuf);
|
||||||
error_count++;
|
error_count++;
|
||||||
}
|
}
|
||||||
counter++;
|
counter++;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (error_count > 32)
|
if (error_count > 32)
|
||||||
pr_warning("%s: %u errors suppressed\n",
|
pr_warning("%s: %u errors suppressed\n",
|
||||||
@ -176,12 +196,17 @@ static unsigned int dmatest_verify(u8 *buf, unsigned int start,
|
|||||||
return error_count;
|
return error_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dmatest_callback(void *completion)
|
||||||
|
{
|
||||||
|
complete(completion);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function repeatedly tests DMA transfers of various lengths and
|
* This function repeatedly tests DMA transfers of various lengths and
|
||||||
* offsets until it is told to exit by kthread_stop(). There may be
|
* offsets for a given operation type until it is told to exit by
|
||||||
* multiple threads running this function in parallel for a single
|
* kthread_stop(). There may be multiple threads running this function
|
||||||
* channel, and there may be multiple channels being tested in
|
* in parallel for a single channel, and there may be multiple channels
|
||||||
* parallel.
|
* being tested in parallel.
|
||||||
*
|
*
|
||||||
* Before each test, the source and destination buffer is initialized
|
* Before each test, the source and destination buffer is initialized
|
||||||
* with a known pattern. This pattern is different depending on
|
* with a known pattern. This pattern is different depending on
|
||||||
@ -201,25 +226,57 @@ static int dmatest_func(void *data)
|
|||||||
unsigned int total_tests = 0;
|
unsigned int total_tests = 0;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
enum dma_status status;
|
enum dma_status status;
|
||||||
|
enum dma_ctrl_flags flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
int src_cnt;
|
||||||
|
int dst_cnt;
|
||||||
|
int i;
|
||||||
|
|
||||||
thread_name = current->comm;
|
thread_name = current->comm;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
|
|
||||||
if (!thread->srcbuf)
|
|
||||||
goto err_srcbuf;
|
|
||||||
thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
|
|
||||||
if (!thread->dstbuf)
|
|
||||||
goto err_dstbuf;
|
|
||||||
|
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
chan = thread->chan;
|
chan = thread->chan;
|
||||||
|
if (thread->type == DMA_MEMCPY)
|
||||||
|
src_cnt = dst_cnt = 1;
|
||||||
|
else if (thread->type == DMA_XOR) {
|
||||||
|
src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
|
||||||
|
dst_cnt = 1;
|
||||||
|
} else
|
||||||
|
goto err_srcs;
|
||||||
|
|
||||||
|
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
|
||||||
|
if (!thread->srcs)
|
||||||
|
goto err_srcs;
|
||||||
|
for (i = 0; i < src_cnt; i++) {
|
||||||
|
thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
|
||||||
|
if (!thread->srcs[i])
|
||||||
|
goto err_srcbuf;
|
||||||
|
}
|
||||||
|
thread->srcs[i] = NULL;
|
||||||
|
|
||||||
|
thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
|
||||||
|
if (!thread->dsts)
|
||||||
|
goto err_dsts;
|
||||||
|
for (i = 0; i < dst_cnt; i++) {
|
||||||
|
thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
|
||||||
|
if (!thread->dsts[i])
|
||||||
|
goto err_dstbuf;
|
||||||
|
}
|
||||||
|
thread->dsts[i] = NULL;
|
||||||
|
|
||||||
|
set_user_nice(current, 10);
|
||||||
|
|
||||||
|
flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
struct dma_device *dev = chan->device;
|
struct dma_device *dev = chan->device;
|
||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx = NULL;
|
||||||
dma_addr_t dma_src, dma_dest;
|
dma_addr_t dma_srcs[src_cnt];
|
||||||
|
dma_addr_t dma_dsts[dst_cnt];
|
||||||
|
struct completion cmp;
|
||||||
|
unsigned long tmo = msecs_to_jiffies(3000);
|
||||||
|
|
||||||
total_tests++;
|
total_tests++;
|
||||||
|
|
||||||
@ -227,22 +284,41 @@ static int dmatest_func(void *data)
|
|||||||
src_off = dmatest_random() % (test_buf_size - len + 1);
|
src_off = dmatest_random() % (test_buf_size - len + 1);
|
||||||
dst_off = dmatest_random() % (test_buf_size - len + 1);
|
dst_off = dmatest_random() % (test_buf_size - len + 1);
|
||||||
|
|
||||||
dmatest_init_srcbuf(thread->srcbuf, src_off, len);
|
dmatest_init_srcs(thread->srcs, src_off, len);
|
||||||
dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
|
dmatest_init_dsts(thread->dsts, dst_off, len);
|
||||||
|
|
||||||
dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off,
|
for (i = 0; i < src_cnt; i++) {
|
||||||
len, DMA_TO_DEVICE);
|
u8 *buf = thread->srcs[i] + src_off;
|
||||||
|
|
||||||
|
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
}
|
||||||
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
|
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
|
||||||
dma_dest = dma_map_single(dev->dev, thread->dstbuf,
|
for (i = 0; i < dst_cnt; i++) {
|
||||||
test_buf_size, DMA_BIDIRECTIONAL);
|
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
|
||||||
|
test_buf_size,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (thread->type == DMA_MEMCPY)
|
||||||
|
tx = dev->device_prep_dma_memcpy(chan,
|
||||||
|
dma_dsts[0] + dst_off,
|
||||||
|
dma_srcs[0], len,
|
||||||
|
flags);
|
||||||
|
else if (thread->type == DMA_XOR)
|
||||||
|
tx = dev->device_prep_dma_xor(chan,
|
||||||
|
dma_dsts[0] + dst_off,
|
||||||
|
dma_srcs, xor_sources,
|
||||||
|
len, flags);
|
||||||
|
|
||||||
tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
|
|
||||||
dma_src, len,
|
|
||||||
DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
|
|
||||||
if (!tx) {
|
if (!tx) {
|
||||||
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
|
for (i = 0; i < src_cnt; i++)
|
||||||
dma_unmap_single(dev->dev, dma_dest,
|
dma_unmap_single(dev->dev, dma_srcs[i], len,
|
||||||
test_buf_size, DMA_BIDIRECTIONAL);
|
DMA_TO_DEVICE);
|
||||||
|
for (i = 0; i < dst_cnt; i++)
|
||||||
|
dma_unmap_single(dev->dev, dma_dsts[i],
|
||||||
|
test_buf_size,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
pr_warning("%s: #%u: prep error with src_off=0x%x "
|
pr_warning("%s: #%u: prep error with src_off=0x%x "
|
||||||
"dst_off=0x%x len=0x%x\n",
|
"dst_off=0x%x len=0x%x\n",
|
||||||
thread_name, total_tests - 1,
|
thread_name, total_tests - 1,
|
||||||
@ -251,7 +327,10 @@ static int dmatest_func(void *data)
|
|||||||
failed_tests++;
|
failed_tests++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
tx->callback = NULL;
|
|
||||||
|
init_completion(&cmp);
|
||||||
|
tx->callback = dmatest_callback;
|
||||||
|
tx->callback_param = &cmp;
|
||||||
cookie = tx->tx_submit(tx);
|
cookie = tx->tx_submit(tx);
|
||||||
|
|
||||||
if (dma_submit_error(cookie)) {
|
if (dma_submit_error(cookie)) {
|
||||||
@ -263,44 +342,50 @@ static int dmatest_func(void *data)
|
|||||||
failed_tests++;
|
failed_tests++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
dma_async_memcpy_issue_pending(chan);
|
dma_async_issue_pending(chan);
|
||||||
|
|
||||||
do {
|
tmo = wait_for_completion_timeout(&cmp, tmo);
|
||||||
msleep(1);
|
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||||
status = dma_async_memcpy_complete(
|
|
||||||
chan, cookie, NULL, NULL);
|
|
||||||
} while (status == DMA_IN_PROGRESS);
|
|
||||||
|
|
||||||
if (status == DMA_ERROR) {
|
if (tmo == 0) {
|
||||||
pr_warning("%s: #%u: error during copy\n",
|
pr_warning("%s: #%u: test timed out\n",
|
||||||
thread_name, total_tests - 1);
|
thread_name, total_tests - 1);
|
||||||
failed_tests++;
|
failed_tests++;
|
||||||
continue;
|
continue;
|
||||||
|
} else if (status != DMA_SUCCESS) {
|
||||||
|
pr_warning("%s: #%u: got completion callback,"
|
||||||
|
" but status is \'%s\'\n",
|
||||||
|
thread_name, total_tests - 1,
|
||||||
|
status == DMA_ERROR ? "error" : "in progress");
|
||||||
|
failed_tests++;
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
|
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
|
||||||
dma_unmap_single(dev->dev, dma_dest,
|
for (i = 0; i < dst_cnt; i++)
|
||||||
test_buf_size, DMA_BIDIRECTIONAL);
|
dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
|
||||||
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
error_count = 0;
|
error_count = 0;
|
||||||
|
|
||||||
pr_debug("%s: verifying source buffer...\n", thread_name);
|
pr_debug("%s: verifying source buffer...\n", thread_name);
|
||||||
error_count += dmatest_verify(thread->srcbuf, 0, src_off,
|
error_count += dmatest_verify(thread->srcs, 0, src_off,
|
||||||
0, PATTERN_SRC, true);
|
0, PATTERN_SRC, true);
|
||||||
error_count += dmatest_verify(thread->srcbuf, src_off,
|
error_count += dmatest_verify(thread->srcs, src_off,
|
||||||
src_off + len, src_off,
|
src_off + len, src_off,
|
||||||
PATTERN_SRC | PATTERN_COPY, true);
|
PATTERN_SRC | PATTERN_COPY, true);
|
||||||
error_count += dmatest_verify(thread->srcbuf, src_off + len,
|
error_count += dmatest_verify(thread->srcs, src_off + len,
|
||||||
test_buf_size, src_off + len,
|
test_buf_size, src_off + len,
|
||||||
PATTERN_SRC, true);
|
PATTERN_SRC, true);
|
||||||
|
|
||||||
pr_debug("%s: verifying dest buffer...\n",
|
pr_debug("%s: verifying dest buffer...\n",
|
||||||
thread->task->comm);
|
thread->task->comm);
|
||||||
error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
|
error_count += dmatest_verify(thread->dsts, 0, dst_off,
|
||||||
0, PATTERN_DST, false);
|
0, PATTERN_DST, false);
|
||||||
error_count += dmatest_verify(thread->dstbuf, dst_off,
|
error_count += dmatest_verify(thread->dsts, dst_off,
|
||||||
dst_off + len, src_off,
|
dst_off + len, src_off,
|
||||||
PATTERN_SRC | PATTERN_COPY, false);
|
PATTERN_SRC | PATTERN_COPY, false);
|
||||||
error_count += dmatest_verify(thread->dstbuf, dst_off + len,
|
error_count += dmatest_verify(thread->dsts, dst_off + len,
|
||||||
test_buf_size, dst_off + len,
|
test_buf_size, dst_off + len,
|
||||||
PATTERN_DST, false);
|
PATTERN_DST, false);
|
||||||
|
|
||||||
@ -319,10 +404,16 @@ static int dmatest_func(void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
kfree(thread->dstbuf);
|
for (i = 0; thread->dsts[i]; i++)
|
||||||
|
kfree(thread->dsts[i]);
|
||||||
err_dstbuf:
|
err_dstbuf:
|
||||||
kfree(thread->srcbuf);
|
kfree(thread->dsts);
|
||||||
|
err_dsts:
|
||||||
|
for (i = 0; thread->srcs[i]; i++)
|
||||||
|
kfree(thread->srcs[i]);
|
||||||
err_srcbuf:
|
err_srcbuf:
|
||||||
|
kfree(thread->srcs);
|
||||||
|
err_srcs:
|
||||||
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
|
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
|
||||||
thread_name, total_tests, failed_tests, ret);
|
thread_name, total_tests, failed_tests, ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -344,11 +435,54 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
|
|||||||
kfree(dtc);
|
kfree(dtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
|
||||||
|
{
|
||||||
|
struct dmatest_thread *thread;
|
||||||
|
struct dma_chan *chan = dtc->chan;
|
||||||
|
char *op;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
if (type == DMA_MEMCPY)
|
||||||
|
op = "copy";
|
||||||
|
else if (type == DMA_XOR)
|
||||||
|
op = "xor";
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
for (i = 0; i < threads_per_chan; i++) {
|
||||||
|
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
|
||||||
|
if (!thread) {
|
||||||
|
pr_warning("dmatest: No memory for %s-%s%u\n",
|
||||||
|
dma_chan_name(chan), op, i);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread->chan = dtc->chan;
|
||||||
|
thread->type = type;
|
||||||
|
smp_wmb();
|
||||||
|
thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
|
||||||
|
dma_chan_name(chan), op, i);
|
||||||
|
if (IS_ERR(thread->task)) {
|
||||||
|
pr_warning("dmatest: Failed to run thread %s-%s%u\n",
|
||||||
|
dma_chan_name(chan), op, i);
|
||||||
|
kfree(thread);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* srcbuf and dstbuf are allocated by the thread itself */
|
||||||
|
|
||||||
|
list_add_tail(&thread->node, &dtc->threads);
|
||||||
|
}
|
||||||
|
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
static int dmatest_add_channel(struct dma_chan *chan)
|
static int dmatest_add_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct dmatest_chan *dtc;
|
struct dmatest_chan *dtc;
|
||||||
struct dmatest_thread *thread;
|
struct dma_device *dma_dev = chan->device;
|
||||||
unsigned int i;
|
unsigned int thread_count = 0;
|
||||||
|
unsigned int cnt;
|
||||||
|
|
||||||
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
|
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
|
||||||
if (!dtc) {
|
if (!dtc) {
|
||||||
@ -359,30 +493,17 @@ static int dmatest_add_channel(struct dma_chan *chan)
|
|||||||
dtc->chan = chan;
|
dtc->chan = chan;
|
||||||
INIT_LIST_HEAD(&dtc->threads);
|
INIT_LIST_HEAD(&dtc->threads);
|
||||||
|
|
||||||
for (i = 0; i < threads_per_chan; i++) {
|
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
|
||||||
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
|
cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
|
||||||
if (!thread) {
|
thread_count += cnt > 0 ?: 0;
|
||||||
pr_warning("dmatest: No memory for %s-test%u\n",
|
|
||||||
dma_chan_name(chan), i);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
thread->chan = dtc->chan;
|
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
||||||
smp_wmb();
|
cnt = dmatest_add_threads(dtc, DMA_XOR);
|
||||||
thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
|
thread_count += cnt > 0 ?: 0;
|
||||||
dma_chan_name(chan), i);
|
|
||||||
if (IS_ERR(thread->task)) {
|
|
||||||
pr_warning("dmatest: Failed to run thread %s-test%u\n",
|
|
||||||
dma_chan_name(chan), i);
|
|
||||||
kfree(thread);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* srcbuf and dstbuf are allocated by the thread itself */
|
pr_info("dmatest: Started %u threads using %s\n",
|
||||||
|
thread_count, dma_chan_name(chan));
|
||||||
list_add_tail(&thread->node, &dtc->threads);
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
|
|
||||||
|
|
||||||
list_add_tail(&dtc->node, &dmatest_channels);
|
list_add_tail(&dtc->node, &dmatest_channels);
|
||||||
nr_channels++;
|
nr_channels++;
|
||||||
|
@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||||||
dwc_descriptor_complete(dwc, bad_desc);
|
dwc_descriptor_complete(dwc, bad_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* --------------------- Cyclic DMA API extensions -------------------- */
|
||||||
|
|
||||||
|
inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
return channel_readl(dwc, SAR);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_get_src_addr);
|
||||||
|
|
||||||
|
inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
return channel_readl(dwc, DAR);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_get_dst_addr);
|
||||||
|
|
||||||
|
/* called with dwc->lock held and all DMAC interrupts disabled */
|
||||||
|
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
||||||
|
u32 status_block, u32 status_err, u32 status_xfer)
|
||||||
|
{
|
||||||
|
if (status_block & dwc->mask) {
|
||||||
|
void (*callback)(void *param);
|
||||||
|
void *callback_param;
|
||||||
|
|
||||||
|
dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
|
||||||
|
channel_readl(dwc, LLP));
|
||||||
|
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
|
||||||
|
|
||||||
|
callback = dwc->cdesc->period_callback;
|
||||||
|
callback_param = dwc->cdesc->period_callback_param;
|
||||||
|
if (callback) {
|
||||||
|
spin_unlock(&dwc->lock);
|
||||||
|
callback(callback_param);
|
||||||
|
spin_lock(&dwc->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Error and transfer complete are highly unlikely, and will most
|
||||||
|
* likely be due to a configuration error by the user.
|
||||||
|
*/
|
||||||
|
if (unlikely(status_err & dwc->mask) ||
|
||||||
|
unlikely(status_xfer & dwc->mask)) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
|
||||||
|
"interrupt, stopping DMA transfer\n",
|
||||||
|
status_xfer ? "xfer" : "error");
|
||||||
|
dev_err(chan2dev(&dwc->chan),
|
||||||
|
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||||
|
channel_readl(dwc, SAR),
|
||||||
|
channel_readl(dwc, DAR),
|
||||||
|
channel_readl(dwc, LLP),
|
||||||
|
channel_readl(dwc, CTL_HI),
|
||||||
|
channel_readl(dwc, CTL_LO));
|
||||||
|
|
||||||
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||||
|
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
/* make sure DMA does not restart by loading a new list */
|
||||||
|
channel_writel(dwc, LLP, 0);
|
||||||
|
channel_writel(dwc, CTL_LO, 0);
|
||||||
|
channel_writel(dwc, CTL_HI, 0);
|
||||||
|
|
||||||
|
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
|
||||||
|
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||||||
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||||
|
|
||||||
|
for (i = 0; i < dwc->cdesc->periods; i++)
|
||||||
|
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------------------------------------------------- */
|
||||||
|
|
||||||
static void dw_dma_tasklet(unsigned long data)
|
static void dw_dma_tasklet(unsigned long data)
|
||||||
{
|
{
|
||||||
struct dw_dma *dw = (struct dw_dma *)data;
|
struct dw_dma *dw = (struct dw_dma *)data;
|
||||||
@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
|
|||||||
for (i = 0; i < dw->dma.chancnt; i++) {
|
for (i = 0; i < dw->dma.chancnt; i++) {
|
||||||
dwc = &dw->chan[i];
|
dwc = &dw->chan[i];
|
||||||
spin_lock(&dwc->lock);
|
spin_lock(&dwc->lock);
|
||||||
if (status_err & (1 << i))
|
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
|
||||||
|
dwc_handle_cyclic(dw, dwc, status_block, status_err,
|
||||||
|
status_xfer);
|
||||||
|
else if (status_err & (1 << i))
|
||||||
dwc_handle_error(dw, dwc);
|
dwc_handle_error(dw, dwc);
|
||||||
else if ((status_block | status_xfer) & (1 << i))
|
else if ((status_block | status_xfer) & (1 << i))
|
||||||
dwc_scan_descriptors(dw, dwc);
|
dwc_scan_descriptors(dw, dwc);
|
||||||
@ -826,7 +905,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||||
desc->txd.tx_submit = dwc_tx_submit;
|
desc->txd.tx_submit = dwc_tx_submit;
|
||||||
desc->txd.flags = DMA_CTRL_ACK;
|
desc->txd.flags = DMA_CTRL_ACK;
|
||||||
INIT_LIST_HEAD(&desc->txd.tx_list);
|
|
||||||
desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
|
desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
|
||||||
sizeof(desc->lli), DMA_TO_DEVICE);
|
sizeof(desc->lli), DMA_TO_DEVICE);
|
||||||
dwc_desc_put(dwc, desc);
|
dwc_desc_put(dwc, desc);
|
||||||
@ -884,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||||||
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
|
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* --------------------- Cyclic DMA API extensions -------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dw_dma_cyclic_start - start the cyclic DMA transfer
|
||||||
|
* @chan: the DMA channel to start
|
||||||
|
*
|
||||||
|
* Must be called with soft interrupts disabled. Returns zero on success or
|
||||||
|
* -errno on failure.
|
||||||
|
*/
|
||||||
|
int dw_dma_cyclic_start(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||||
|
|
||||||
|
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
|
||||||
|
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&dwc->lock);
|
||||||
|
|
||||||
|
/* assert channel is idle */
|
||||||
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||||
|
dev_err(chan2dev(&dwc->chan),
|
||||||
|
"BUG: Attempted to start non-idle channel\n");
|
||||||
|
dev_err(chan2dev(&dwc->chan),
|
||||||
|
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||||
|
channel_readl(dwc, SAR),
|
||||||
|
channel_readl(dwc, DAR),
|
||||||
|
channel_readl(dwc, LLP),
|
||||||
|
channel_readl(dwc, CTL_HI),
|
||||||
|
channel_readl(dwc, CTL_LO));
|
||||||
|
spin_unlock(&dwc->lock);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
|
||||||
|
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||||||
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||||
|
|
||||||
|
/* setup DMAC channel registers */
|
||||||
|
channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
|
||||||
|
channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
||||||
|
channel_writel(dwc, CTL_HI, 0);
|
||||||
|
|
||||||
|
channel_set_bit(dw, CH_EN, dwc->mask);
|
||||||
|
|
||||||
|
spin_unlock(&dwc->lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_cyclic_start);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dw_dma_cyclic_stop - stop the cyclic DMA transfer
|
||||||
|
* @chan: the DMA channel to stop
|
||||||
|
*
|
||||||
|
* Must be called with soft interrupts disabled.
|
||||||
|
*/
|
||||||
|
void dw_dma_cyclic_stop(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||||
|
|
||||||
|
spin_lock(&dwc->lock);
|
||||||
|
|
||||||
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||||
|
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
spin_unlock(&dwc->lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_cyclic_stop);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dw_dma_cyclic_prep - prepare the cyclic DMA transfer
|
||||||
|
* @chan: the DMA channel to prepare
|
||||||
|
* @buf_addr: physical DMA address where the buffer starts
|
||||||
|
* @buf_len: total number of bytes for the entire buffer
|
||||||
|
* @period_len: number of bytes for each period
|
||||||
|
* @direction: transfer direction, to or from device
|
||||||
|
*
|
||||||
|
* Must be called before trying to start the transfer. Returns a valid struct
|
||||||
|
* dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
|
||||||
|
*/
|
||||||
|
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||||||
|
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
struct dw_cyclic_desc *cdesc;
|
||||||
|
struct dw_cyclic_desc *retval = NULL;
|
||||||
|
struct dw_desc *desc;
|
||||||
|
struct dw_desc *last = NULL;
|
||||||
|
struct dw_dma_slave *dws = chan->private;
|
||||||
|
unsigned long was_cyclic;
|
||||||
|
unsigned int reg_width;
|
||||||
|
unsigned int periods;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
spin_lock_bh(&dwc->lock);
|
||||||
|
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
|
||||||
|
spin_unlock_bh(&dwc->lock);
|
||||||
|
dev_dbg(chan2dev(&dwc->chan),
|
||||||
|
"queue and/or active list are not empty\n");
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
}
|
||||||
|
|
||||||
|
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||||||
|
spin_unlock_bh(&dwc->lock);
|
||||||
|
if (was_cyclic) {
|
||||||
|
dev_dbg(chan2dev(&dwc->chan),
|
||||||
|
"channel already prepared for cyclic DMA\n");
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
}
|
||||||
|
|
||||||
|
retval = ERR_PTR(-EINVAL);
|
||||||
|
reg_width = dws->reg_width;
|
||||||
|
periods = buf_len / period_len;
|
||||||
|
|
||||||
|
/* Check for too big/unaligned periods and unaligned DMA buffer. */
|
||||||
|
if (period_len > (DWC_MAX_COUNT << reg_width))
|
||||||
|
goto out_err;
|
||||||
|
if (unlikely(period_len & ((1 << reg_width) - 1)))
|
||||||
|
goto out_err;
|
||||||
|
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||||||
|
goto out_err;
|
||||||
|
if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
retval = ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
if (periods > NR_DESCS_PER_CHANNEL)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
|
||||||
|
if (!cdesc)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
|
||||||
|
if (!cdesc->desc)
|
||||||
|
goto out_err_alloc;
|
||||||
|
|
||||||
|
for (i = 0; i < periods; i++) {
|
||||||
|
desc = dwc_desc_get(dwc);
|
||||||
|
if (!desc)
|
||||||
|
goto out_err_desc_get;
|
||||||
|
|
||||||
|
switch (direction) {
|
||||||
|
case DMA_TO_DEVICE:
|
||||||
|
desc->lli.dar = dws->tx_reg;
|
||||||
|
desc->lli.sar = buf_addr + (period_len * i);
|
||||||
|
desc->lli.ctllo = (DWC_DEFAULT_CTLLO
|
||||||
|
| DWC_CTLL_DST_WIDTH(reg_width)
|
||||||
|
| DWC_CTLL_SRC_WIDTH(reg_width)
|
||||||
|
| DWC_CTLL_DST_FIX
|
||||||
|
| DWC_CTLL_SRC_INC
|
||||||
|
| DWC_CTLL_FC_M2P
|
||||||
|
| DWC_CTLL_INT_EN);
|
||||||
|
break;
|
||||||
|
case DMA_FROM_DEVICE:
|
||||||
|
desc->lli.dar = buf_addr + (period_len * i);
|
||||||
|
desc->lli.sar = dws->rx_reg;
|
||||||
|
desc->lli.ctllo = (DWC_DEFAULT_CTLLO
|
||||||
|
| DWC_CTLL_SRC_WIDTH(reg_width)
|
||||||
|
| DWC_CTLL_DST_WIDTH(reg_width)
|
||||||
|
| DWC_CTLL_DST_INC
|
||||||
|
| DWC_CTLL_SRC_FIX
|
||||||
|
| DWC_CTLL_FC_P2M
|
||||||
|
| DWC_CTLL_INT_EN);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
desc->lli.ctlhi = (period_len >> reg_width);
|
||||||
|
cdesc->desc[i] = desc;
|
||||||
|
|
||||||
|
if (last) {
|
||||||
|
last->lli.llp = desc->txd.phys;
|
||||||
|
dma_sync_single_for_device(chan2parent(chan),
|
||||||
|
last->txd.phys, sizeof(last->lli),
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
last = desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* lets make a cyclic list */
|
||||||
|
last->lli.llp = cdesc->desc[0]->txd.phys;
|
||||||
|
dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
|
||||||
|
sizeof(last->lli), DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
|
||||||
|
"period %zu periods %d\n", buf_addr, buf_len,
|
||||||
|
period_len, periods);
|
||||||
|
|
||||||
|
cdesc->periods = periods;
|
||||||
|
dwc->cdesc = cdesc;
|
||||||
|
|
||||||
|
return cdesc;
|
||||||
|
|
||||||
|
out_err_desc_get:
|
||||||
|
while (i--)
|
||||||
|
dwc_desc_put(dwc, cdesc->desc[i]);
|
||||||
|
out_err_alloc:
|
||||||
|
kfree(cdesc);
|
||||||
|
out_err:
|
||||||
|
clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||||||
|
return (struct dw_cyclic_desc *)retval;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_cyclic_prep);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dw_dma_cyclic_free - free a prepared cyclic DMA transfer
|
||||||
|
* @chan: the DMA channel to free
|
||||||
|
*/
|
||||||
|
void dw_dma_cyclic_free(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||||
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||||
|
struct dw_cyclic_desc *cdesc = dwc->cdesc;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
|
||||||
|
|
||||||
|
if (!cdesc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_bh(&dwc->lock);
|
||||||
|
|
||||||
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||||
|
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
|
||||||
|
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||||||
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||||
|
|
||||||
|
spin_unlock_bh(&dwc->lock);
|
||||||
|
|
||||||
|
for (i = 0; i < cdesc->periods; i++)
|
||||||
|
dwc_desc_put(dwc, cdesc->desc[i]);
|
||||||
|
|
||||||
|
kfree(cdesc->desc);
|
||||||
|
kfree(cdesc);
|
||||||
|
|
||||||
|
clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dw_dma_cyclic_free);
|
||||||
|
|
||||||
/*----------------------------------------------------------------------*/
|
/*----------------------------------------------------------------------*/
|
||||||
|
|
||||||
static void dw_dma_off(struct dw_dma *dw)
|
static void dw_dma_off(struct dw_dma *dw)
|
||||||
|
@ -126,6 +126,10 @@ struct dw_dma_regs {
|
|||||||
|
|
||||||
#define DW_REGLEN 0x400
|
#define DW_REGLEN 0x400
|
||||||
|
|
||||||
|
enum dw_dmac_flags {
|
||||||
|
DW_DMA_IS_CYCLIC = 0,
|
||||||
|
};
|
||||||
|
|
||||||
struct dw_dma_chan {
|
struct dw_dma_chan {
|
||||||
struct dma_chan chan;
|
struct dma_chan chan;
|
||||||
void __iomem *ch_regs;
|
void __iomem *ch_regs;
|
||||||
@ -134,10 +138,12 @@ struct dw_dma_chan {
|
|||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
/* these other elements are all protected by lock */
|
/* these other elements are all protected by lock */
|
||||||
|
unsigned long flags;
|
||||||
dma_cookie_t completed;
|
dma_cookie_t completed;
|
||||||
struct list_head active_list;
|
struct list_head active_list;
|
||||||
struct list_head queue;
|
struct list_head queue;
|
||||||
struct list_head free_list;
|
struct list_head free_list;
|
||||||
|
struct dw_cyclic_desc *cdesc;
|
||||||
|
|
||||||
unsigned int descs_allocated;
|
unsigned int descs_allocated;
|
||||||
};
|
};
|
||||||
@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
|
|||||||
return container_of(chan, struct dw_dma_chan, chan);
|
return container_of(chan, struct dw_dma_chan, chan);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct dw_dma {
|
struct dw_dma {
|
||||||
struct dma_device dma;
|
struct dma_device dma;
|
||||||
void __iomem *regs;
|
void __iomem *regs;
|
||||||
|
@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
|
|||||||
dma_async_tx_descriptor_init(&desc_sw->async_tx,
|
dma_async_tx_descriptor_init(&desc_sw->async_tx,
|
||||||
&fsl_chan->common);
|
&fsl_chan->common);
|
||||||
desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
|
desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
|
||||||
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
|
|
||||||
desc_sw->async_tx.phys = pdesc;
|
desc_sw->async_tx.phys = pdesc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
|
|||||||
desc_sw->async_tx.tx_submit = ioat2_tx_submit;
|
desc_sw->async_tx.tx_submit = ioat2_tx_submit;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
|
|
||||||
|
|
||||||
desc_sw->hw = desc;
|
desc_sw->hw = desc;
|
||||||
desc_sw->async_tx.phys = phys;
|
desc_sw->async_tx.phys = phys;
|
||||||
|
@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
slot->async_tx.tx_submit = iop_adma_tx_submit;
|
slot->async_tx.tx_submit = iop_adma_tx_submit;
|
||||||
INIT_LIST_HEAD(&slot->chain_node);
|
INIT_LIST_HEAD(&slot->chain_node);
|
||||||
INIT_LIST_HEAD(&slot->slot_node);
|
INIT_LIST_HEAD(&slot->slot_node);
|
||||||
INIT_LIST_HEAD(&slot->async_tx.tx_list);
|
|
||||||
hw_desc = (char *) iop_chan->device->dma_desc_pool;
|
hw_desc = (char *) iop_chan->device->dma_desc_pool;
|
||||||
slot->async_tx.phys =
|
slot->async_tx.phys =
|
||||||
(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
|
(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
#define FS_VF_IN_VALID 0x00000002
|
#define FS_VF_IN_VALID 0x00000002
|
||||||
#define FS_ENC_IN_VALID 0x00000001
|
#define FS_ENC_IN_VALID 0x00000001
|
||||||
|
|
||||||
|
static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
|
||||||
|
bool wait_for_stop);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There can be only one, we could allocate it dynamically, but then we'd have
|
* There can be only one, we could allocate it dynamically, but then we'd have
|
||||||
* to add an extra parameter to some functions, and use something as ugly as
|
* to add an extra parameter to some functions, and use something as ugly as
|
||||||
@ -107,7 +110,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable / disable direct write to memory by the Camera Sensor Interface */
|
/* Enable direct write to memory by the Camera Sensor Interface */
|
||||||
static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
|
static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
|
||||||
{
|
{
|
||||||
uint32_t ic_conf, mask;
|
uint32_t ic_conf, mask;
|
||||||
@ -126,6 +129,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
|
|||||||
idmac_write_icreg(ipu, ic_conf, IC_CONF);
|
idmac_write_icreg(ipu, ic_conf, IC_CONF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called under spin_lock_irqsave(&ipu_data.lock) */
|
||||||
static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
|
static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
|
||||||
{
|
{
|
||||||
uint32_t ic_conf, mask;
|
uint32_t ic_conf, mask;
|
||||||
@ -422,7 +426,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
dev_err(ipu_data.dev,
|
dev_err(ipu_data.dev,
|
||||||
"mxc ipu: unimplemented pixel format %d\n", pixel_fmt);
|
"mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,20 +437,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
|
|||||||
uint16_t burst_pixels)
|
uint16_t burst_pixels)
|
||||||
{
|
{
|
||||||
params->pp.npb = burst_pixels - 1;
|
params->pp.npb = burst_pixels - 1;
|
||||||
};
|
}
|
||||||
|
|
||||||
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
|
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
|
||||||
dma_addr_t buf0, dma_addr_t buf1)
|
dma_addr_t buf0, dma_addr_t buf1)
|
||||||
{
|
{
|
||||||
params->pp.eba0 = buf0;
|
params->pp.eba0 = buf0;
|
||||||
params->pp.eba1 = buf1;
|
params->pp.eba1 = buf1;
|
||||||
};
|
}
|
||||||
|
|
||||||
static void ipu_ch_param_set_rotation(union chan_param_mem *params,
|
static void ipu_ch_param_set_rotation(union chan_param_mem *params,
|
||||||
enum ipu_rotate_mode rotate)
|
enum ipu_rotate_mode rotate)
|
||||||
{
|
{
|
||||||
params->pp.bam = rotate;
|
params->pp.bam = rotate;
|
||||||
};
|
}
|
||||||
|
|
||||||
static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
|
static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
|
||||||
uint32_t num_words)
|
uint32_t num_words)
|
||||||
@ -571,7 +575,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch)
|
|||||||
{
|
{
|
||||||
/* Channel Parameter Memory */
|
/* Channel Parameter Memory */
|
||||||
return 0x10000 | (dma_ch << 4);
|
return 0x10000 | (dma_ch << 4);
|
||||||
};
|
}
|
||||||
|
|
||||||
static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
|
static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
|
||||||
bool prio)
|
bool prio)
|
||||||
@ -611,7 +615,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* ipu_enable_channel() - enable an IPU channel.
|
* ipu_enable_channel() - enable an IPU channel.
|
||||||
* @channel: channel ID.
|
* @idmac: IPU DMAC context.
|
||||||
|
* @ichan: IDMAC channel.
|
||||||
* @return: 0 on success or negative error code on failure.
|
* @return: 0 on success or negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
|
static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
|
||||||
@ -649,7 +654,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
|
* ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
|
||||||
* @channel: channel ID.
|
* @ichan: IDMAC channel.
|
||||||
* @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
|
* @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
|
||||||
* @width: width of buffer in pixels.
|
* @width: width of buffer in pixels.
|
||||||
* @height: height of buffer in pixels.
|
* @height: height of buffer in pixels.
|
||||||
@ -687,7 +692,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* IC channel's stride must be a multiple of 8 pixels */
|
/* IC channel's stride must be a multiple of 8 pixels */
|
||||||
if ((channel <= 13) && (stride % 8)) {
|
if ((channel <= IDMAC_IC_13) && (stride % 8)) {
|
||||||
dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
|
dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -752,7 +757,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* ipu_update_channel_buffer() - update physical address of a channel buffer.
|
* ipu_update_channel_buffer() - update physical address of a channel buffer.
|
||||||
* @channel: channel ID.
|
* @ichan: IDMAC channel.
|
||||||
* @buffer_n: buffer number to update.
|
* @buffer_n: buffer number to update.
|
||||||
* 0 or 1 are the only valid values.
|
* 0 or 1 are the only valid values.
|
||||||
* @phyaddr: buffer physical address.
|
* @phyaddr: buffer physical address.
|
||||||
@ -760,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
|
|||||||
* function will fail if the buffer is set to ready.
|
* function will fail if the buffer is set to ready.
|
||||||
*/
|
*/
|
||||||
/* Called under spin_lock(_irqsave)(&ichan->lock) */
|
/* Called under spin_lock(_irqsave)(&ichan->lock) */
|
||||||
static int ipu_update_channel_buffer(enum ipu_channel channel,
|
static int ipu_update_channel_buffer(struct idmac_channel *ichan,
|
||||||
int buffer_n, dma_addr_t phyaddr)
|
int buffer_n, dma_addr_t phyaddr)
|
||||||
{
|
{
|
||||||
|
enum ipu_channel channel = ichan->dma_chan.chan_id;
|
||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -771,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
|
|||||||
if (buffer_n == 0) {
|
if (buffer_n == 0) {
|
||||||
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
|
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
|
||||||
if (reg & (1UL << channel)) {
|
if (reg & (1UL << channel)) {
|
||||||
spin_unlock_irqrestore(&ipu_data.lock, flags);
|
ipu_ic_disable_task(&ipu_data, channel);
|
||||||
return -EACCES;
|
ichan->status = IPU_CHANNEL_READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
|
/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
|
||||||
@ -782,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
|
|||||||
} else {
|
} else {
|
||||||
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
|
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
|
||||||
if (reg & (1UL << channel)) {
|
if (reg & (1UL << channel)) {
|
||||||
spin_unlock_irqrestore(&ipu_data.lock, flags);
|
ipu_ic_disable_task(&ipu_data, channel);
|
||||||
return -EACCES;
|
ichan->status = IPU_CHANNEL_READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if double-buffering is already enabled */
|
/* Check if double-buffering is already enabled */
|
||||||
@ -804,6 +810,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Called under spin_lock_irqsave(&ichan->lock) */
|
||||||
|
static int ipu_submit_buffer(struct idmac_channel *ichan,
|
||||||
|
struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
|
||||||
|
{
|
||||||
|
unsigned int chan_id = ichan->dma_chan.chan_id;
|
||||||
|
struct device *dev = &ichan->dma_chan.dev->device;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (async_tx_test_ack(&desc->txd))
|
||||||
|
return -EINTR;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On first invocation this shouldn't be necessary, the call to
|
||||||
|
* ipu_init_channel_buffer() above will set addresses for us, so we
|
||||||
|
* could make it conditional on status >= IPU_CHANNEL_ENABLED, but
|
||||||
|
* doing it again shouldn't hurt either.
|
||||||
|
*/
|
||||||
|
ret = ipu_update_channel_buffer(ichan, buf_idx,
|
||||||
|
sg_dma_address(sg));
|
||||||
|
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
|
||||||
|
sg, chan_id, buf_idx);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ipu_select_buffer(chan_id, buf_idx);
|
||||||
|
dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
|
||||||
|
sg, chan_id, buf_idx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Called under spin_lock_irqsave(&ichan->lock) */
|
/* Called under spin_lock_irqsave(&ichan->lock) */
|
||||||
static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
|
static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
|
||||||
struct idmac_tx_desc *desc)
|
struct idmac_tx_desc *desc)
|
||||||
@ -815,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
|
|||||||
if (!ichan->sg[i]) {
|
if (!ichan->sg[i]) {
|
||||||
ichan->sg[i] = sg;
|
ichan->sg[i] = sg;
|
||||||
|
|
||||||
/*
|
ret = ipu_submit_buffer(ichan, desc, sg, i);
|
||||||
* On first invocation this shouldn't be necessary, the
|
|
||||||
* call to ipu_init_channel_buffer() above will set
|
|
||||||
* addresses for us, so we could make it conditional
|
|
||||||
* on status >= IPU_CHANNEL_ENABLED, but doing it again
|
|
||||||
* shouldn't hurt either.
|
|
||||||
*/
|
|
||||||
ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
|
|
||||||
sg_dma_address(sg));
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ipu_select_buffer(ichan->dma_chan.chan_id, i);
|
|
||||||
|
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -842,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||||||
struct idmac_channel *ichan = to_idmac_chan(tx->chan);
|
struct idmac_channel *ichan = to_idmac_chan(tx->chan);
|
||||||
struct idmac *idmac = to_idmac(tx->chan->device);
|
struct idmac *idmac = to_idmac(tx->chan->device);
|
||||||
struct ipu *ipu = to_ipu(idmac);
|
struct ipu *ipu = to_ipu(idmac);
|
||||||
|
struct device *dev = &ichan->dma_chan.dev->device;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Sanity check */
|
/* Sanity check */
|
||||||
if (!list_empty(&desc->list)) {
|
if (!list_empty(&desc->list)) {
|
||||||
/* The descriptor doesn't belong to client */
|
/* The descriptor doesn't belong to client */
|
||||||
dev_err(&ichan->dma_chan.dev->device,
|
dev_err(dev, "Descriptor %p not prepared!\n", tx);
|
||||||
"Descriptor %p not prepared!\n", tx);
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ichan->chan_mutex);
|
mutex_lock(&ichan->chan_mutex);
|
||||||
|
|
||||||
|
async_tx_clear_ack(tx);
|
||||||
|
|
||||||
if (ichan->status < IPU_CHANNEL_READY) {
|
if (ichan->status < IPU_CHANNEL_READY) {
|
||||||
struct idmac_video_param *video = &ichan->params.video;
|
struct idmac_video_param *video = &ichan->params.video;
|
||||||
/*
|
/*
|
||||||
@ -878,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ipu->lock can be taken under ichan->lock, but not v.v. */
|
dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
|
||||||
spin_lock_irqsave(&ichan->lock, flags);
|
|
||||||
|
|
||||||
/* submit_buffers() atomically verifies and fills empty sg slots */
|
|
||||||
cookie = ipu_submit_channel_buffers(ichan, desc);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ichan->lock, flags);
|
|
||||||
|
|
||||||
if (cookie < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
cookie = ichan->dma_chan.cookie;
|
cookie = ichan->dma_chan.cookie;
|
||||||
|
|
||||||
@ -897,23 +920,39 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||||||
/* from dmaengine.h: "last cookie value returned to client" */
|
/* from dmaengine.h: "last cookie value returned to client" */
|
||||||
ichan->dma_chan.cookie = cookie;
|
ichan->dma_chan.cookie = cookie;
|
||||||
tx->cookie = cookie;
|
tx->cookie = cookie;
|
||||||
|
|
||||||
|
/* ipu->lock can be taken under ichan->lock, but not v.v. */
|
||||||
spin_lock_irqsave(&ichan->lock, flags);
|
spin_lock_irqsave(&ichan->lock, flags);
|
||||||
|
|
||||||
list_add_tail(&desc->list, &ichan->queue);
|
list_add_tail(&desc->list, &ichan->queue);
|
||||||
|
/* submit_buffers() atomically verifies and fills empty sg slots */
|
||||||
|
ret = ipu_submit_channel_buffers(ichan, desc);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ichan->lock, flags);
|
spin_unlock_irqrestore(&ichan->lock, flags);
|
||||||
|
|
||||||
if (ichan->status < IPU_CHANNEL_ENABLED) {
|
|
||||||
int ret = ipu_enable_channel(idmac, ichan);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
cookie = ret;
|
cookie = ret;
|
||||||
|
goto dequeue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ichan->status < IPU_CHANNEL_ENABLED) {
|
||||||
|
ret = ipu_enable_channel(idmac, ichan);
|
||||||
|
if (ret < 0) {
|
||||||
|
cookie = ret;
|
||||||
|
goto dequeue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dump_idmac_reg(ipu);
|
||||||
|
|
||||||
|
dequeue:
|
||||||
|
if (cookie < 0) {
|
||||||
spin_lock_irqsave(&ichan->lock, flags);
|
spin_lock_irqsave(&ichan->lock, flags);
|
||||||
list_del_init(&desc->list);
|
list_del_init(&desc->list);
|
||||||
spin_unlock_irqrestore(&ichan->lock, flags);
|
spin_unlock_irqrestore(&ichan->lock, flags);
|
||||||
tx->cookie = cookie;
|
tx->cookie = cookie;
|
||||||
ichan->dma_chan.cookie = cookie;
|
ichan->dma_chan.cookie = cookie;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
dump_idmac_reg(ipu);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ichan->chan_mutex);
|
mutex_unlock(&ichan->chan_mutex);
|
||||||
@ -944,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
|
|||||||
memset(txd, 0, sizeof(*txd));
|
memset(txd, 0, sizeof(*txd));
|
||||||
dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
|
dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
|
||||||
txd->tx_submit = idmac_tx_submit;
|
txd->tx_submit = idmac_tx_submit;
|
||||||
txd->chan = &ichan->dma_chan;
|
|
||||||
INIT_LIST_HEAD(&txd->tx_list);
|
|
||||||
|
|
||||||
list_add(&desc->list, &ichan->free_list);
|
list_add(&desc->list, &ichan->free_list);
|
||||||
|
|
||||||
@ -1161,6 +1198,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
|
||||||
|
struct idmac_tx_desc **desc, struct scatterlist *sg)
|
||||||
|
{
|
||||||
|
struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
|
||||||
|
|
||||||
|
if (sgnew)
|
||||||
|
/* next sg-element in this list */
|
||||||
|
return sgnew;
|
||||||
|
|
||||||
|
if ((*desc)->list.next == &ichan->queue)
|
||||||
|
/* No more descriptors on the queue */
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/* Fetch next descriptor */
|
||||||
|
*desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
|
||||||
|
return (*desc)->sg;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have several possibilities here:
|
* We have several possibilities here:
|
||||||
* current BUF next BUF
|
* current BUF next BUF
|
||||||
@ -1176,23 +1231,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
|
|||||||
static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct idmac_channel *ichan = dev_id;
|
struct idmac_channel *ichan = dev_id;
|
||||||
|
struct device *dev = &ichan->dma_chan.dev->device;
|
||||||
unsigned int chan_id = ichan->dma_chan.chan_id;
|
unsigned int chan_id = ichan->dma_chan.chan_id;
|
||||||
struct scatterlist **sg, *sgnext, *sgnew = NULL;
|
struct scatterlist **sg, *sgnext, *sgnew = NULL;
|
||||||
/* Next transfer descriptor */
|
/* Next transfer descriptor */
|
||||||
struct idmac_tx_desc *desc = NULL, *descnew;
|
struct idmac_tx_desc *desc, *descnew;
|
||||||
dma_async_tx_callback callback;
|
dma_async_tx_callback callback;
|
||||||
void *callback_param;
|
void *callback_param;
|
||||||
bool done = false;
|
bool done = false;
|
||||||
u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY),
|
u32 ready0, ready1, curbuf, err;
|
||||||
ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY),
|
unsigned long flags;
|
||||||
curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
|
|
||||||
|
|
||||||
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
|
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
|
||||||
|
|
||||||
pr_debug("IDMAC irq %d\n", irq);
|
dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&ipu_data.lock, flags);
|
||||||
|
|
||||||
|
ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
|
||||||
|
ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
|
||||||
|
curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
|
||||||
|
err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
|
||||||
|
|
||||||
|
if (err & (1 << chan_id)) {
|
||||||
|
idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
|
||||||
|
spin_unlock_irqrestore(&ipu_data.lock, flags);
|
||||||
|
/*
|
||||||
|
* Doing this
|
||||||
|
* ichan->sg[0] = ichan->sg[1] = NULL;
|
||||||
|
* you can force channel re-enable on the next tx_submit(), but
|
||||||
|
* this is dirty - think about descriptors with multiple
|
||||||
|
* sg elements.
|
||||||
|
*/
|
||||||
|
dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
|
||||||
|
chan_id, ready0, ready1, curbuf);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&ipu_data.lock, flags);
|
||||||
|
|
||||||
/* Other interrupts do not interfere with this channel */
|
/* Other interrupts do not interfere with this channel */
|
||||||
spin_lock(&ichan->lock);
|
spin_lock(&ichan->lock);
|
||||||
|
|
||||||
if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
|
if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
|
||||||
((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
|
((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
|
||||||
int i = 100;
|
int i = 100;
|
||||||
@ -1207,19 +1285,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||||||
|
|
||||||
if (!i) {
|
if (!i) {
|
||||||
spin_unlock(&ichan->lock);
|
spin_unlock(&ichan->lock);
|
||||||
dev_dbg(ichan->dma_chan.device->dev,
|
dev_dbg(dev,
|
||||||
"IRQ on active buffer on channel %x, active "
|
"IRQ on active buffer on channel %x, active "
|
||||||
"%d, ready %x, %x, current %x!\n", chan_id,
|
"%d, ready %x, %x, current %x!\n", chan_id,
|
||||||
ichan->active_buffer, ready0, ready1, curbuf);
|
ichan->active_buffer, ready0, ready1, curbuf);
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
}
|
} else
|
||||||
|
dev_dbg(dev,
|
||||||
|
"Buffer deactivated on channel %x, active "
|
||||||
|
"%d, ready %x, %x, current %x, rest %d!\n", chan_id,
|
||||||
|
ichan->active_buffer, ready0, ready1, curbuf, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
|
if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
|
||||||
(!ichan->active_buffer && (ready0 >> chan_id) & 1)
|
(!ichan->active_buffer && (ready0 >> chan_id) & 1)
|
||||||
)) {
|
)) {
|
||||||
spin_unlock(&ichan->lock);
|
spin_unlock(&ichan->lock);
|
||||||
dev_dbg(ichan->dma_chan.device->dev,
|
dev_dbg(dev,
|
||||||
"IRQ with active buffer still ready on channel %x, "
|
"IRQ with active buffer still ready on channel %x, "
|
||||||
"active %d, ready %x, %x!\n", chan_id,
|
"active %d, ready %x, %x!\n", chan_id,
|
||||||
ichan->active_buffer, ready0, ready1);
|
ichan->active_buffer, ready0, ready1);
|
||||||
@ -1227,8 +1309,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(list_empty(&ichan->queue))) {
|
if (unlikely(list_empty(&ichan->queue))) {
|
||||||
|
ichan->sg[ichan->active_buffer] = NULL;
|
||||||
spin_unlock(&ichan->lock);
|
spin_unlock(&ichan->lock);
|
||||||
dev_err(ichan->dma_chan.device->dev,
|
dev_err(dev,
|
||||||
"IRQ without queued buffers on channel %x, active %d, "
|
"IRQ without queued buffers on channel %x, active %d, "
|
||||||
"ready %x, %x!\n", chan_id,
|
"ready %x, %x!\n", chan_id,
|
||||||
ichan->active_buffer, ready0, ready1);
|
ichan->active_buffer, ready0, ready1);
|
||||||
@ -1243,40 +1326,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||||||
sg = &ichan->sg[ichan->active_buffer];
|
sg = &ichan->sg[ichan->active_buffer];
|
||||||
sgnext = ichan->sg[!ichan->active_buffer];
|
sgnext = ichan->sg[!ichan->active_buffer];
|
||||||
|
|
||||||
|
if (!*sg) {
|
||||||
|
spin_unlock(&ichan->lock);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
|
||||||
|
descnew = desc;
|
||||||
|
|
||||||
|
dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
|
||||||
|
irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
|
||||||
|
|
||||||
|
/* Find the descriptor of sgnext */
|
||||||
|
sgnew = idmac_sg_next(ichan, &descnew, *sg);
|
||||||
|
if (sgnext != sgnew)
|
||||||
|
dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if sgnext == NULL sg must be the last element in a scatterlist and
|
* if sgnext == NULL sg must be the last element in a scatterlist and
|
||||||
* queue must be empty
|
* queue must be empty
|
||||||
*/
|
*/
|
||||||
if (unlikely(!sgnext)) {
|
if (unlikely(!sgnext)) {
|
||||||
if (unlikely(sg_next(*sg))) {
|
if (!WARN_ON(sg_next(*sg)))
|
||||||
dev_err(ichan->dma_chan.device->dev,
|
dev_dbg(dev, "Underrun on channel %x\n", chan_id);
|
||||||
"Broken buffer-update locking on channel %x!\n",
|
ichan->sg[!ichan->active_buffer] = sgnew;
|
||||||
chan_id);
|
|
||||||
/* We'll let the user catch up */
|
if (unlikely(sgnew)) {
|
||||||
|
ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
|
||||||
} else {
|
} else {
|
||||||
/* Underrun */
|
spin_lock_irqsave(&ipu_data.lock, flags);
|
||||||
ipu_ic_disable_task(&ipu_data, chan_id);
|
ipu_ic_disable_task(&ipu_data, chan_id);
|
||||||
dev_dbg(ichan->dma_chan.device->dev,
|
spin_unlock_irqrestore(&ipu_data.lock, flags);
|
||||||
"Underrun on channel %x\n", chan_id);
|
|
||||||
ichan->status = IPU_CHANNEL_READY;
|
ichan->status = IPU_CHANNEL_READY;
|
||||||
/* Continue to check for complete descriptor */
|
/* Continue to check for complete descriptor */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
|
/* Calculate and submit the next sg element */
|
||||||
|
sgnew = idmac_sg_next(ichan, &descnew, sgnew);
|
||||||
/* First calculate and submit the next sg element */
|
|
||||||
if (likely(sgnext))
|
|
||||||
sgnew = sg_next(sgnext);
|
|
||||||
|
|
||||||
if (unlikely(!sgnew)) {
|
|
||||||
/* Start a new scatterlist, if any queued */
|
|
||||||
if (likely(desc->list.next != &ichan->queue)) {
|
|
||||||
descnew = list_entry(desc->list.next,
|
|
||||||
struct idmac_tx_desc, list);
|
|
||||||
sgnew = &descnew->sg[0];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(!sg_next(*sg)) || !sgnext) {
|
if (unlikely(!sg_next(*sg)) || !sgnext) {
|
||||||
/*
|
/*
|
||||||
@ -1289,17 +1376,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||||||
|
|
||||||
*sg = sgnew;
|
*sg = sgnew;
|
||||||
|
|
||||||
if (likely(sgnew)) {
|
if (likely(sgnew) &&
|
||||||
int ret;
|
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
|
||||||
|
callback = desc->txd.callback;
|
||||||
ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer,
|
callback_param = desc->txd.callback_param;
|
||||||
sg_dma_address(*sg));
|
spin_unlock(&ichan->lock);
|
||||||
if (ret < 0)
|
callback(callback_param);
|
||||||
dev_err(ichan->dma_chan.device->dev,
|
spin_lock(&ichan->lock);
|
||||||
"Failed to update buffer on channel %x buffer %d!\n",
|
|
||||||
chan_id, ichan->active_buffer);
|
|
||||||
else
|
|
||||||
ipu_select_buffer(chan_id, ichan->active_buffer);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flip the active buffer - even if update above failed */
|
/* Flip the active buffer - even if update above failed */
|
||||||
@ -1327,13 +1410,20 @@ static void ipu_gc_tasklet(unsigned long arg)
|
|||||||
struct idmac_channel *ichan = ipu->channel + i;
|
struct idmac_channel *ichan = ipu->channel + i;
|
||||||
struct idmac_tx_desc *desc;
|
struct idmac_tx_desc *desc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int j;
|
struct scatterlist *sg;
|
||||||
|
int j, k;
|
||||||
|
|
||||||
for (j = 0; j < ichan->n_tx_desc; j++) {
|
for (j = 0; j < ichan->n_tx_desc; j++) {
|
||||||
desc = ichan->desc + j;
|
desc = ichan->desc + j;
|
||||||
spin_lock_irqsave(&ichan->lock, flags);
|
spin_lock_irqsave(&ichan->lock, flags);
|
||||||
if (async_tx_test_ack(&desc->txd)) {
|
if (async_tx_test_ack(&desc->txd)) {
|
||||||
list_move(&desc->list, &ichan->free_list);
|
list_move(&desc->list, &ichan->free_list);
|
||||||
|
for_each_sg(desc->sg, sg, desc->sg_len, k) {
|
||||||
|
if (ichan->sg[0] == sg)
|
||||||
|
ichan->sg[0] = NULL;
|
||||||
|
else if (ichan->sg[1] == sg)
|
||||||
|
ichan->sg[1] = NULL;
|
||||||
|
}
|
||||||
async_tx_clear_ack(&desc->txd);
|
async_tx_clear_ack(&desc->txd);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ichan->lock, flags);
|
spin_unlock_irqrestore(&ichan->lock, flags);
|
||||||
@ -1341,13 +1431,7 @@ static void ipu_gc_tasklet(unsigned long arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Allocate and initialise a transfer descriptor. */
|
||||||
* At the time .device_alloc_chan_resources() method is called, we cannot know,
|
|
||||||
* whether the client will accept the channel. Thus we must only check, if we
|
|
||||||
* can satisfy client's request but the only real criterion to verify, whether
|
|
||||||
* the client has accepted our offer is the client_count. That's why we have to
|
|
||||||
* perform the rest of our allocation tasks on the first call to this function.
|
|
||||||
*/
|
|
||||||
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
|
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
|
||||||
struct scatterlist *sgl, unsigned int sg_len,
|
struct scatterlist *sgl, unsigned int sg_len,
|
||||||
enum dma_data_direction direction, unsigned long tx_flags)
|
enum dma_data_direction direction, unsigned long tx_flags)
|
||||||
@ -1358,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* We only can handle these three channels so far */
|
/* We only can handle these three channels so far */
|
||||||
if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 &&
|
if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
|
||||||
ichan->dma_chan.chan_id != IDMAC_IC_7)
|
chan->chan_id != IDMAC_IC_7)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
|
if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
|
||||||
@ -1400,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan)
|
|||||||
|
|
||||||
/* This is not always needed, but doesn't hurt either */
|
/* This is not always needed, but doesn't hurt either */
|
||||||
spin_lock_irqsave(&ipu->lock, flags);
|
spin_lock_irqsave(&ipu->lock, flags);
|
||||||
ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer);
|
ipu_select_buffer(chan->chan_id, ichan->active_buffer);
|
||||||
spin_unlock_irqrestore(&ipu->lock, flags);
|
spin_unlock_irqrestore(&ipu->lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1432,8 +1516,7 @@ static void __idmac_terminate_all(struct dma_chan *chan)
|
|||||||
struct idmac_tx_desc *desc = ichan->desc + i;
|
struct idmac_tx_desc *desc = ichan->desc + i;
|
||||||
if (list_empty(&desc->list))
|
if (list_empty(&desc->list))
|
||||||
/* Descriptor was prepared, but not submitted */
|
/* Descriptor was prepared, but not submitted */
|
||||||
list_add(&desc->list,
|
list_add(&desc->list, &ichan->free_list);
|
||||||
&ichan->free_list);
|
|
||||||
|
|
||||||
async_tx_clear_ack(&desc->txd);
|
async_tx_clear_ack(&desc->txd);
|
||||||
}
|
}
|
||||||
@ -1458,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan)
|
|||||||
mutex_unlock(&ichan->chan_mutex);
|
mutex_unlock(&ichan->chan_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
static irqreturn_t ic_sof_irq(int irq, void *dev_id)
|
||||||
|
{
|
||||||
|
struct idmac_channel *ichan = dev_id;
|
||||||
|
printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
|
||||||
|
irq, ichan->dma_chan.chan_id);
|
||||||
|
disable_irq(irq);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static irqreturn_t ic_eof_irq(int irq, void *dev_id)
|
||||||
|
{
|
||||||
|
struct idmac_channel *ichan = dev_id;
|
||||||
|
printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
|
||||||
|
irq, ichan->dma_chan.chan_id);
|
||||||
|
disable_irq(irq);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ic_sof = -EINVAL, ic_eof = -EINVAL;
|
||||||
|
#endif
|
||||||
|
|
||||||
static int idmac_alloc_chan_resources(struct dma_chan *chan)
|
static int idmac_alloc_chan_resources(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct idmac_channel *ichan = to_idmac_chan(chan);
|
struct idmac_channel *ichan = to_idmac_chan(chan);
|
||||||
@ -1471,31 +1576,49 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
chan->cookie = 1;
|
chan->cookie = 1;
|
||||||
ichan->completed = -ENXIO;
|
ichan->completed = -ENXIO;
|
||||||
|
|
||||||
ret = ipu_irq_map(ichan->dma_chan.chan_id);
|
ret = ipu_irq_map(chan->chan_id);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto eimap;
|
goto eimap;
|
||||||
|
|
||||||
ichan->eof_irq = ret;
|
ichan->eof_irq = ret;
|
||||||
ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
|
|
||||||
ichan->eof_name, ichan);
|
/*
|
||||||
if (ret < 0)
|
* Important to first disable the channel, because maybe someone
|
||||||
goto erirq;
|
* used it before us, e.g., the bootloader
|
||||||
|
*/
|
||||||
|
ipu_disable_channel(idmac, ichan, true);
|
||||||
|
|
||||||
ret = ipu_init_channel(idmac, ichan);
|
ret = ipu_init_channel(idmac, ichan);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto eichan;
|
goto eichan;
|
||||||
|
|
||||||
|
ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
|
||||||
|
ichan->eof_name, ichan);
|
||||||
|
if (ret < 0)
|
||||||
|
goto erirq;
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
if (chan->chan_id == IDMAC_IC_7) {
|
||||||
|
ic_sof = ipu_irq_map(69);
|
||||||
|
if (ic_sof > 0)
|
||||||
|
request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
|
||||||
|
ic_eof = ipu_irq_map(70);
|
||||||
|
if (ic_eof > 0)
|
||||||
|
request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
ichan->status = IPU_CHANNEL_INITIALIZED;
|
ichan->status = IPU_CHANNEL_INITIALIZED;
|
||||||
|
|
||||||
dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
|
dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
|
||||||
ichan->dma_chan.chan_id, ichan->eof_irq);
|
chan->chan_id, ichan->eof_irq);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
eichan:
|
|
||||||
free_irq(ichan->eof_irq, ichan);
|
|
||||||
erirq:
|
erirq:
|
||||||
ipu_irq_unmap(ichan->dma_chan.chan_id);
|
ipu_uninit_channel(idmac, ichan);
|
||||||
|
eichan:
|
||||||
|
ipu_irq_unmap(chan->chan_id);
|
||||||
eimap:
|
eimap:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1510,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
|
|||||||
__idmac_terminate_all(chan);
|
__idmac_terminate_all(chan);
|
||||||
|
|
||||||
if (ichan->status > IPU_CHANNEL_FREE) {
|
if (ichan->status > IPU_CHANNEL_FREE) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
if (chan->chan_id == IDMAC_IC_7) {
|
||||||
|
if (ic_sof > 0) {
|
||||||
|
free_irq(ic_sof, ichan);
|
||||||
|
ipu_irq_unmap(69);
|
||||||
|
ic_sof = -EINVAL;
|
||||||
|
}
|
||||||
|
if (ic_eof > 0) {
|
||||||
|
free_irq(ic_eof, ichan);
|
||||||
|
ipu_irq_unmap(70);
|
||||||
|
ic_eof = -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
free_irq(ichan->eof_irq, ichan);
|
free_irq(ichan->eof_irq, ichan);
|
||||||
ipu_irq_unmap(ichan->dma_chan.chan_id);
|
ipu_irq_unmap(chan->chan_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
ichan->status = IPU_CHANNEL_FREE;
|
ichan->status = IPU_CHANNEL_FREE;
|
||||||
@ -1573,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
|
|||||||
dma_chan->device = &idmac->dma;
|
dma_chan->device = &idmac->dma;
|
||||||
dma_chan->cookie = 1;
|
dma_chan->cookie = 1;
|
||||||
dma_chan->chan_id = i;
|
dma_chan->chan_id = i;
|
||||||
list_add_tail(&ichan->dma_chan.device_node, &dma->channels);
|
list_add_tail(&dma_chan->device_node, &dma->channels);
|
||||||
}
|
}
|
||||||
|
|
||||||
idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
|
idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
|
||||||
@ -1581,7 +1718,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
|
|||||||
return dma_async_device_register(&idmac->dma);
|
return dma_async_device_register(&idmac->dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ipu_idmac_exit(struct ipu *ipu)
|
static void __exit ipu_idmac_exit(struct ipu *ipu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct idmac *idmac = &ipu->idmac;
|
struct idmac *idmac = &ipu->idmac;
|
||||||
@ -1600,7 +1737,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
|
|||||||
* IPU common probe / remove
|
* IPU common probe / remove
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int ipu_probe(struct platform_device *pdev)
|
static int __init ipu_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct ipu_platform_data *pdata = pdev->dev.platform_data;
|
struct ipu_platform_data *pdata = pdev->dev.platform_data;
|
||||||
struct resource *mem_ipu, *mem_ic;
|
struct resource *mem_ipu, *mem_ic;
|
||||||
@ -1700,7 +1837,7 @@ err_noirq:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ipu_remove(struct platform_device *pdev)
|
static int __exit ipu_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct ipu *ipu = platform_get_drvdata(pdev);
|
struct ipu *ipu = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
@ -1725,7 +1862,7 @@ static struct platform_driver ipu_platform_driver = {
|
|||||||
.name = "ipu-core",
|
.name = "ipu-core",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
},
|
},
|
||||||
.remove = ipu_remove,
|
.remove = __exit_p(ipu_remove),
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init ipu_init(void)
|
static int __init ipu_init(void)
|
||||||
|
@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Install the IRQ handler */
|
/* Install the IRQ handler */
|
||||||
int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
|
int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
|
||||||
{
|
{
|
||||||
struct ipu_platform_data *pdata = dev->dev.platform_data;
|
struct ipu_platform_data *pdata = dev->dev.platform_data;
|
||||||
unsigned int irq, irq_base, i;
|
unsigned int irq, irq_base, i;
|
||||||
|
@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
|
|||||||
slot->async_tx.tx_submit = mv_xor_tx_submit;
|
slot->async_tx.tx_submit = mv_xor_tx_submit;
|
||||||
INIT_LIST_HEAD(&slot->chain_node);
|
INIT_LIST_HEAD(&slot->chain_node);
|
||||||
INIT_LIST_HEAD(&slot->slot_node);
|
INIT_LIST_HEAD(&slot->slot_node);
|
||||||
INIT_LIST_HEAD(&slot->async_tx.tx_list);
|
|
||||||
hw_desc = (char *) mv_chan->device->dma_desc_pool;
|
hw_desc = (char *) mv_chan->device->dma_desc_pool;
|
||||||
slot->async_tx.phys =
|
slot->async_tx.phys =
|
||||||
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
|
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
|
||||||
|
@ -21,6 +21,15 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
|
/* on architectures without dma-mapping capabilities we need to ensure
|
||||||
|
* that the asynchronous path compiles away
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_HAS_DMA
|
||||||
|
#define __async_inline
|
||||||
|
#else
|
||||||
|
#define __async_inline __always_inline
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_chan_ref - object used to manage dma channels received from the
|
* dma_chan_ref - object used to manage dma channels received from the
|
||||||
* dmaengine core.
|
* dmaengine core.
|
||||||
|
@ -23,9 +23,6 @@
|
|||||||
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/kref.h>
|
|
||||||
#include <linux/completion.h>
|
|
||||||
#include <linux/rcupdate.h>
|
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
|
|||||||
/**
|
/**
|
||||||
* struct dma_device - info on the entity supplying DMA services
|
* struct dma_device - info on the entity supplying DMA services
|
||||||
* @chancnt: how many DMA channels are supported
|
* @chancnt: how many DMA channels are supported
|
||||||
|
* @privatecnt: how many DMA channels are requested by dma_request_channel
|
||||||
* @channels: the list of struct dma_chan
|
* @channels: the list of struct dma_chan
|
||||||
* @global_node: list_head for global dma_device_list
|
* @global_node: list_head for global dma_device_list
|
||||||
* @cap_mask: one or more dma_capability flags
|
* @cap_mask: one or more dma_capability flags
|
||||||
@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
|
|||||||
struct dma_device {
|
struct dma_device {
|
||||||
|
|
||||||
unsigned int chancnt;
|
unsigned int chancnt;
|
||||||
|
unsigned int privatecnt;
|
||||||
struct list_head channels;
|
struct list_head channels;
|
||||||
struct list_head global_node;
|
struct list_head global_node;
|
||||||
dma_cap_mask_t cap_mask;
|
dma_cap_mask_t cap_mask;
|
||||||
@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_ASYNC_TX_DMA
|
||||||
|
#define async_dmaengine_get() dmaengine_get()
|
||||||
|
#define async_dmaengine_put() dmaengine_put()
|
||||||
|
#define async_dma_find_channel(type) dma_find_channel(type)
|
||||||
|
#else
|
||||||
|
static inline void async_dmaengine_get(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline void async_dmaengine_put(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline struct dma_chan *
|
||||||
|
async_dma_find_channel(enum dma_transaction_type type)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||||
void *dest, void *src, size_t len);
|
void *dest, void *src, size_t len);
|
||||||
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
||||||
@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
|
|||||||
set_bit(tx_type, dstp->bits);
|
set_bit(tx_type, dstp->bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
|
||||||
|
static inline void
|
||||||
|
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
|
||||||
|
{
|
||||||
|
clear_bit(tx_type, dstp->bits);
|
||||||
|
}
|
||||||
|
|
||||||
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
|
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
|
||||||
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
|
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
|
||||||
{
|
{
|
||||||
|
@ -74,4 +74,23 @@ struct dw_dma_slave {
|
|||||||
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
|
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
|
||||||
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
|
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
|
||||||
|
|
||||||
|
/* DMA API extensions */
|
||||||
|
struct dw_cyclic_desc {
|
||||||
|
struct dw_desc **desc;
|
||||||
|
unsigned long periods;
|
||||||
|
void (*period_callback)(void *param);
|
||||||
|
void *period_callback_param;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||||||
|
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||||||
|
enum dma_data_direction direction);
|
||||||
|
void dw_dma_cyclic_free(struct dma_chan *chan);
|
||||||
|
int dw_dma_cyclic_start(struct dma_chan *chan);
|
||||||
|
void dw_dma_cyclic_stop(struct dma_chan *chan);
|
||||||
|
|
||||||
|
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
|
||||||
|
|
||||||
|
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
|
||||||
|
|
||||||
#endif /* DW_DMAC_H */
|
#endif /* DW_DMAC_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user