2019-05-27 06:55:05 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2007-10-15 07:50:19 +00:00
|
|
|
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
|
2005-04-16 22:20:36 +00:00
|
|
|
* Takashi Iwai <tiwai@suse.de>
|
|
|
|
*
|
|
|
|
* Generic memory allocators
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2013-10-23 03:47:43 +00:00
|
|
|
#include <linux/genalloc.h>
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
#include <linux/highmem.h>
|
2019-11-05 08:01:36 +00:00
|
|
|
#include <linux/vmalloc.h>
|
2018-08-08 15:01:00 +00:00
|
|
|
#ifdef CONFIG_X86
|
|
|
|
#include <asm/set_memory.h>
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <sound/memalloc.h>
|
2021-06-09 16:25:49 +00:00
|
|
|
#include "memalloc_local.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
|
|
|
|
static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
|
|
|
|
gfp_t default_gfp)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-06-09 16:25:49 +00:00
|
|
|
if (!dmab->dev.dev)
|
|
|
|
return default_gfp;
|
|
|
|
else
|
|
|
|
return (__force gfp_t)(unsigned long)dmab->dev.dev;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2013-10-23 03:47:43 +00:00
|
|
|
|
2021-08-02 07:28:01 +00:00
|
|
|
static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
|
2013-10-23 03:47:43 +00:00
|
|
|
{
|
2021-06-09 16:25:49 +00:00
|
|
|
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
|
2013-10-23 03:47:43 +00:00
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
if (WARN_ON_ONCE(!ops || !ops->alloc))
|
2021-08-02 07:28:01 +00:00
|
|
|
return NULL;
|
2021-06-09 16:25:49 +00:00
|
|
|
return ops->alloc(dmab, size);
|
2019-11-05 08:01:35 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
* snd_dma_alloc_dir_pages - allocate the buffer area according to the given
|
|
|
|
* type and direction
|
2005-04-16 22:20:36 +00:00
|
|
|
* @type: the DMA buffer type
|
|
|
|
* @device: the device pointer
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
* @dir: DMA direction
|
2005-04-16 22:20:36 +00:00
|
|
|
* @size: the buffer size to allocate
|
|
|
|
* @dmab: buffer allocation record to store the allocated data
|
|
|
|
*
|
|
|
|
* Calls the memory-allocator function for the corresponding
|
|
|
|
* buffer type.
|
2013-03-11 21:05:14 +00:00
|
|
|
*
|
|
|
|
* Return: Zero if the buffer with the given size is allocated successfully,
|
|
|
|
* otherwise a negative value on error.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
int snd_dma_alloc_dir_pages(int type, struct device *device,
|
|
|
|
enum dma_data_direction dir, size_t size,
|
|
|
|
struct snd_dma_buffer *dmab)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-08-08 15:09:09 +00:00
|
|
|
if (WARN_ON(!size))
|
|
|
|
return -ENXIO;
|
|
|
|
if (WARN_ON(!dmab))
|
|
|
|
return -ENXIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-12-18 14:56:24 +00:00
|
|
|
size = PAGE_ALIGN(size);
|
2005-04-16 22:20:36 +00:00
|
|
|
dmab->dev.type = type;
|
|
|
|
dmab->dev.dev = device;
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
dmab->dev.dir = dir;
|
2005-04-16 22:20:36 +00:00
|
|
|
dmab->bytes = 0;
|
2020-06-15 16:00:43 +00:00
|
|
|
dmab->addr = 0;
|
|
|
|
dmab->private_data = NULL;
|
2021-08-02 07:28:01 +00:00
|
|
|
dmab->area = __snd_dma_alloc_pages(dmab, size);
|
2021-06-09 16:25:49 +00:00
|
|
|
if (!dmab->area)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
dmab->bytes = size;
|
|
|
|
return 0;
|
|
|
|
}
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
|
|
|
|
* @type: the DMA buffer type
|
|
|
|
* @device: the device pointer
|
|
|
|
* @size: the buffer size to allocate
|
|
|
|
* @dmab: buffer allocation record to store the allocated data
|
|
|
|
*
|
|
|
|
* Calls the memory-allocator function for the corresponding
|
|
|
|
* buffer type. When no space is left, this function reduces the size and
|
|
|
|
* tries to allocate again. The size actually allocated is stored in
|
|
|
|
* res_size argument.
|
2013-03-11 21:05:14 +00:00
|
|
|
*
|
|
|
|
* Return: Zero if the buffer with the given size is allocated successfully,
|
|
|
|
* otherwise a negative value on error.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
|
|
|
|
struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
|
|
|
|
if (err != -ENOMEM)
|
|
|
|
return err;
|
|
|
|
if (size <= PAGE_SIZE)
|
|
|
|
return -ENOMEM;
|
2018-07-19 09:01:04 +00:00
|
|
|
size >>= 1;
|
|
|
|
size = PAGE_SIZE << get_order(size);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
if (! dmab->area)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-16 14:16:33 +00:00
|
|
|
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* snd_dma_free_pages - release the allocated buffer
|
|
|
|
* @dmab: the buffer allocation record to release
|
|
|
|
*
|
|
|
|
* Releases the allocated buffer via snd_dma_alloc_pages().
|
|
|
|
*/
|
|
|
|
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
2021-06-09 16:25:49 +00:00
|
|
|
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
|
|
|
|
|
|
|
|
if (ops && ops->free)
|
|
|
|
ops->free(dmab);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_dma_free_pages);
|
|
|
|
|
2021-07-15 07:58:23 +00:00
|
|
|
/* called by devres */
|
|
|
|
static void __snd_release_pages(struct device *dev, void *res)
|
|
|
|
{
|
|
|
|
snd_dma_free_pages(res);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
* snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
|
2021-07-15 07:58:23 +00:00
|
|
|
* @dev: the device pointer
|
|
|
|
* @type: the DMA buffer type
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
* @dir: DMA direction
|
2021-07-15 07:58:23 +00:00
|
|
|
* @size: the buffer size to allocate
|
|
|
|
*
|
|
|
|
* Allocate buffer pages depending on the given type and manage using devres.
|
|
|
|
* The pages will be released automatically at the device removal.
|
|
|
|
*
|
|
|
|
* Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
|
|
|
|
* hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
|
|
|
|
* SNDRV_DMA_TYPE_VMALLOC type.
|
|
|
|
*
|
|
|
|
* The function returns the snd_dma_buffer object at success, or NULL if failed.
|
|
|
|
*/
|
|
|
|
struct snd_dma_buffer *
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
snd_devm_alloc_dir_pages(struct device *dev, int type,
|
|
|
|
enum dma_data_direction dir, size_t size)
|
2021-07-15 07:58:23 +00:00
|
|
|
{
|
|
|
|
struct snd_dma_buffer *dmab;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
|
|
|
|
type == SNDRV_DMA_TYPE_VMALLOC))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
|
|
|
|
if (!dmab)
|
|
|
|
return NULL;
|
|
|
|
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
|
2021-07-15 07:58:23 +00:00
|
|
|
if (err < 0) {
|
|
|
|
devres_free(dmab);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
devres_add(dev, dmab);
|
|
|
|
return dmab;
|
|
|
|
}
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
|
2021-07-15 07:58:23 +00:00
|
|
|
|
2021-06-09 16:25:50 +00:00
|
|
|
/**
|
|
|
|
* snd_dma_buffer_mmap - perform mmap of the given DMA buffer
|
|
|
|
* @dmab: buffer allocation information
|
|
|
|
* @area: VM area information
|
|
|
|
*/
|
|
|
|
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
|
|
|
|
|
|
|
|
if (ops && ops->mmap)
|
|
|
|
return ops->mmap(dmab, area);
|
|
|
|
else
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_dma_buffer_mmap);
|
|
|
|
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
#ifdef CONFIG_HAS_DMA
|
|
|
|
/**
|
|
|
|
* snd_dma_buffer_sync - sync DMA buffer between CPU and device
|
|
|
|
* @dmab: buffer allocation information
|
|
|
|
* @mod: sync mode
|
|
|
|
*/
|
|
|
|
void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
|
|
|
|
enum snd_dma_sync_mode mode)
|
|
|
|
{
|
|
|
|
const struct snd_malloc_ops *ops;
|
|
|
|
|
|
|
|
if (!dmab || !dmab->dev.need_sync)
|
|
|
|
return;
|
|
|
|
ops = snd_dma_get_ops(dmab);
|
|
|
|
if (ops && ops->sync)
|
|
|
|
ops->sync(dmab, mode);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
|
|
|
|
#endif /* CONFIG_HAS_DMA */
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
/**
|
|
|
|
* snd_sgbuf_get_addr - return the physical address at the corresponding offset
|
|
|
|
* @dmab: buffer allocation information
|
|
|
|
* @offset: offset in the ring buffer
|
|
|
|
*/
|
|
|
|
dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
|
|
|
|
{
|
|
|
|
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
|
|
|
|
|
|
|
|
if (ops && ops->get_addr)
|
|
|
|
return ops->get_addr(dmab, offset);
|
|
|
|
else
|
|
|
|
return dmab->addr + offset;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_sgbuf_get_addr);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* snd_sgbuf_get_page - return the physical page at the corresponding offset
|
|
|
|
* @dmab: buffer allocation information
|
|
|
|
* @offset: offset in the ring buffer
|
|
|
|
*/
|
|
|
|
struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
|
|
|
|
{
|
|
|
|
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
|
|
|
|
|
|
|
|
if (ops && ops->get_page)
|
|
|
|
return ops->get_page(dmab, offset);
|
|
|
|
else
|
|
|
|
return virt_to_page(dmab->area + offset);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_sgbuf_get_page);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
|
|
|
|
* on sg-buffer
|
|
|
|
* @dmab: buffer allocation information
|
|
|
|
* @ofs: offset in the ring buffer
|
|
|
|
* @size: the requested size
|
|
|
|
*/
|
|
|
|
unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
|
|
|
|
unsigned int ofs, unsigned int size)
|
|
|
|
{
|
|
|
|
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
|
|
|
|
|
|
|
|
if (ops && ops->get_chunk_size)
|
|
|
|
return ops->get_chunk_size(dmab, ofs, size);
|
|
|
|
else
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Continuous pages allocator
|
|
|
|
*/
|
2021-08-02 07:28:01 +00:00
|
|
|
static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
|
2021-06-09 16:25:49 +00:00
|
|
|
{
|
|
|
|
gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
|
2021-08-04 07:41:25 +00:00
|
|
|
void *p = alloc_pages_exact(size, gfp);
|
2021-06-09 16:25:49 +00:00
|
|
|
|
2021-08-04 07:41:25 +00:00
|
|
|
if (p)
|
|
|
|
dmab->addr = page_to_phys(virt_to_page(p));
|
|
|
|
return p;
|
2021-06-09 16:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
free_pages_exact(dmab->area, dmab->bytes);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:51 +00:00
|
|
|
static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
return remap_pfn_range(area, area->vm_start,
|
2021-08-04 07:41:25 +00:00
|
|
|
dmab->addr >> PAGE_SHIFT,
|
2021-06-09 16:25:51 +00:00
|
|
|
area->vm_end - area->vm_start,
|
|
|
|
area->vm_page_prot);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
static const struct snd_malloc_ops snd_dma_continuous_ops = {
|
|
|
|
.alloc = snd_dma_continuous_alloc,
|
|
|
|
.free = snd_dma_continuous_free,
|
2021-06-09 16:25:51 +00:00
|
|
|
.mmap = snd_dma_continuous_mmap,
|
2021-06-09 16:25:49 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMALLOC allocator
|
|
|
|
*/
|
2021-08-02 07:28:01 +00:00
|
|
|
static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
|
2021-06-09 16:25:49 +00:00
|
|
|
{
|
|
|
|
gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM);
|
|
|
|
|
2021-08-02 07:28:01 +00:00
|
|
|
return __vmalloc(size, gfp);
|
2021-06-09 16:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
vfree(dmab->area);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:51 +00:00
|
|
|
static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
return remap_vmalloc_range(area, dmab->area, 0);
|
|
|
|
}
|
|
|
|
|
2021-08-13 08:16:45 +00:00
|
|
|
#define get_vmalloc_page_addr(dmab, offset) \
|
|
|
|
page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
|
|
|
|
size_t offset)
|
|
|
|
{
|
2021-08-13 08:16:45 +00:00
|
|
|
return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
|
2021-06-09 16:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
|
|
|
|
size_t offset)
|
|
|
|
{
|
|
|
|
return vmalloc_to_page(dmab->area + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
|
|
|
|
unsigned int ofs, unsigned int size)
|
|
|
|
{
|
2021-08-13 08:16:45 +00:00
|
|
|
unsigned int start, end;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
start = ALIGN_DOWN(ofs, PAGE_SIZE);
|
|
|
|
end = ofs + size - 1; /* the last byte address */
|
|
|
|
/* check page continuity */
|
|
|
|
addr = get_vmalloc_page_addr(dmab, start);
|
|
|
|
for (;;) {
|
|
|
|
start += PAGE_SIZE;
|
|
|
|
if (start > end)
|
|
|
|
break;
|
|
|
|
addr += PAGE_SIZE;
|
|
|
|
if (get_vmalloc_page_addr(dmab, start) != addr)
|
|
|
|
return start - ofs;
|
|
|
|
}
|
|
|
|
/* ok, all on continuous pages */
|
|
|
|
return size;
|
2021-06-09 16:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
|
|
|
|
.alloc = snd_dma_vmalloc_alloc,
|
|
|
|
.free = snd_dma_vmalloc_free,
|
2021-06-09 16:25:51 +00:00
|
|
|
.mmap = snd_dma_vmalloc_mmap,
|
2021-06-09 16:25:49 +00:00
|
|
|
.get_addr = snd_dma_vmalloc_get_addr,
|
|
|
|
.get_page = snd_dma_vmalloc_get_page,
|
|
|
|
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
|
|
|
};
|
|
|
|
|
2007-07-26 16:59:36 +00:00
|
|
|
#ifdef CONFIG_HAS_DMA
|
2021-06-09 16:25:49 +00:00
|
|
|
/*
|
|
|
|
* IRAM allocator
|
|
|
|
*/
|
2013-10-24 12:25:32 +00:00
|
|
|
#ifdef CONFIG_GENERIC_ALLOCATOR
|
2021-08-02 07:28:01 +00:00
|
|
|
static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
|
2021-06-09 16:25:49 +00:00
|
|
|
{
|
|
|
|
struct device *dev = dmab->dev.dev;
|
|
|
|
struct gen_pool *pool;
|
2021-08-02 07:28:01 +00:00
|
|
|
void *p;
|
2021-06-09 16:25:49 +00:00
|
|
|
|
|
|
|
if (dev->of_node) {
|
|
|
|
pool = of_gen_pool_get(dev->of_node, "iram", 0);
|
|
|
|
/* Assign the pool into private_data field */
|
|
|
|
dmab->private_data = pool;
|
|
|
|
|
2021-08-02 07:28:01 +00:00
|
|
|
p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
|
|
|
|
if (p)
|
|
|
|
return p;
|
2021-06-09 16:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Internal memory might have limited size and no enough space,
|
|
|
|
* so if we fail to malloc, try to fetch memory traditionally.
|
|
|
|
*/
|
|
|
|
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
|
|
|
|
return __snd_dma_alloc_pages(dmab, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
struct gen_pool *pool = dmab->private_data;
|
|
|
|
|
|
|
|
if (pool && dmab->area)
|
|
|
|
gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:50 +00:00
|
|
|
static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
|
|
|
|
return remap_pfn_range(area, area->vm_start,
|
|
|
|
dmab->addr >> PAGE_SHIFT,
|
|
|
|
area->vm_end - area->vm_start,
|
|
|
|
area->vm_page_prot);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
static const struct snd_malloc_ops snd_dma_iram_ops = {
|
|
|
|
.alloc = snd_dma_iram_alloc,
|
|
|
|
.free = snd_dma_iram_free,
|
2021-06-09 16:25:50 +00:00
|
|
|
.mmap = snd_dma_iram_mmap,
|
2021-06-09 16:25:49 +00:00
|
|
|
};
|
2013-10-24 12:25:32 +00:00
|
|
|
#endif /* CONFIG_GENERIC_ALLOCATOR */
|
2021-06-09 16:25:49 +00:00
|
|
|
|
2021-08-02 07:28:04 +00:00
|
|
|
#define DEFAULT_GFP \
|
|
|
|
(GFP_KERNEL | \
|
|
|
|
__GFP_COMP | /* compound page lets parts be mapped */ \
|
|
|
|
__GFP_NORETRY | /* don't trigger OOM-killer */ \
|
|
|
|
__GFP_NOWARN) /* no stack trace print - this call is non-critical */
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
/*
|
|
|
|
* Coherent device pages allocator
|
|
|
|
*/
|
2021-08-02 07:28:01 +00:00
|
|
|
static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
|
2021-06-09 16:25:49 +00:00
|
|
|
{
|
2021-08-02 07:28:01 +00:00
|
|
|
void *p;
|
2021-06-09 16:25:49 +00:00
|
|
|
|
2021-08-02 07:28:04 +00:00
|
|
|
p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
|
2021-06-09 16:25:49 +00:00
|
|
|
#ifdef CONFIG_X86
|
2021-08-02 07:28:02 +00:00
|
|
|
if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
|
2021-08-02 07:28:01 +00:00
|
|
|
set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
2008-06-17 14:39:06 +00:00
|
|
|
#endif
|
2021-08-02 07:28:01 +00:00
|
|
|
return p;
|
2021-06-09 16:25:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86
|
2021-08-02 07:28:02 +00:00
|
|
|
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
|
2021-06-09 16:25:49 +00:00
|
|
|
set_memory_wb((unsigned long)dmab->area,
|
|
|
|
PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
|
|
|
|
#endif
|
|
|
|
dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:50 +00:00
|
|
|
static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
2021-08-04 06:13:29 +00:00
|
|
|
#ifdef CONFIG_X86
|
|
|
|
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
|
|
|
|
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
|
|
|
|
#endif
|
2021-06-09 16:25:50 +00:00
|
|
|
return dma_mmap_coherent(dmab->dev.dev, area,
|
|
|
|
dmab->area, dmab->addr, dmab->bytes);
|
|
|
|
}
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
static const struct snd_malloc_ops snd_dma_dev_ops = {
|
|
|
|
.alloc = snd_dma_dev_alloc,
|
|
|
|
.free = snd_dma_dev_free,
|
2021-06-09 16:25:50 +00:00
|
|
|
.mmap = snd_dma_dev_mmap,
|
2021-06-09 16:25:49 +00:00
|
|
|
};
|
2021-08-02 07:28:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Write-combined pages
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86
|
|
|
|
/* On x86, share the same ops as the standard dev ops */
|
|
|
|
#define snd_dma_wc_ops snd_dma_dev_ops
|
|
|
|
#else /* CONFIG_X86 */
|
|
|
|
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|
|
|
{
|
|
|
|
return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
return dma_mmap_wc(dmab->dev.dev, area,
|
|
|
|
dmab->area, dmab->addr, dmab->bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct snd_malloc_ops snd_dma_wc_ops = {
|
|
|
|
.alloc = snd_dma_wc_alloc,
|
|
|
|
.free = snd_dma_wc_free,
|
|
|
|
.mmap = snd_dma_wc_mmap,
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_X86 */
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Non-contiguous pages allocator
|
|
|
|
*/
|
|
|
|
static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|
|
|
{
|
|
|
|
struct sg_table *sgt;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
|
|
|
|
DEFAULT_GFP, 0);
|
|
|
|
if (!sgt)
|
|
|
|
return NULL;
|
|
|
|
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
|
|
|
|
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
|
|
|
|
if (p)
|
|
|
|
dmab->private_data = sgt;
|
|
|
|
else
|
|
|
|
dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
|
|
|
|
dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
|
|
|
|
dmab->dev.dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
return dma_mmap_noncontiguous(dmab->dev.dev, area,
|
|
|
|
dmab->bytes, dmab->private_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
|
|
|
|
enum snd_dma_sync_mode mode)
|
|
|
|
{
|
|
|
|
if (mode == SNDRV_DMA_SYNC_CPU) {
|
|
|
|
if (dmab->dev.dir == DMA_TO_DEVICE)
|
|
|
|
return;
|
|
|
|
dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
|
|
|
|
dmab->dev.dir);
|
|
|
|
invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
|
|
|
|
} else {
|
|
|
|
if (dmab->dev.dir == DMA_FROM_DEVICE)
|
|
|
|
return;
|
|
|
|
flush_kernel_vmap_range(dmab->area, dmab->bytes);
|
|
|
|
dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
|
|
|
|
dmab->dev.dir);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct snd_malloc_ops snd_dma_noncontig_ops = {
|
|
|
|
.alloc = snd_dma_noncontig_alloc,
|
|
|
|
.free = snd_dma_noncontig_free,
|
|
|
|
.mmap = snd_dma_noncontig_mmap,
|
|
|
|
.sync = snd_dma_noncontig_sync,
|
|
|
|
/* re-use vmalloc helpers for get_* ops */
|
|
|
|
.get_addr = snd_dma_vmalloc_get_addr,
|
|
|
|
.get_page = snd_dma_vmalloc_get_page,
|
|
|
|
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
|
|
|
};
|
|
|
|
|
2021-10-17 07:48:59 +00:00
|
|
|
/* x86-specific SG-buffer with WC pages */
|
|
|
|
#ifdef CONFIG_SND_DMA_SGBUF
|
|
|
|
#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
|
|
|
|
|
|
|
|
static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|
|
|
{
|
|
|
|
void *p = snd_dma_noncontig_alloc(dmab, size);
|
|
|
|
size_t ofs;
|
|
|
|
|
|
|
|
if (!p)
|
|
|
|
return NULL;
|
|
|
|
for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
|
|
|
|
set_memory_uc(vmalloc_to_virt(p + ofs), 1);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
size_t ofs;
|
|
|
|
|
|
|
|
for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
|
|
|
|
set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
|
|
|
|
snd_dma_noncontig_free(dmab);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
|
|
|
|
/* FIXME: dma_mmap_noncontiguous() works? */
|
|
|
|
return -ENOENT; /* continue with the default mmap handler */
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct snd_malloc_ops snd_dma_sg_wc_ops = {
|
|
|
|
.alloc = snd_dma_sg_wc_alloc,
|
|
|
|
.free = snd_dma_sg_wc_free,
|
|
|
|
.mmap = snd_dma_sg_wc_mmap,
|
|
|
|
.sync = snd_dma_noncontig_sync,
|
|
|
|
.get_addr = snd_dma_vmalloc_get_addr,
|
|
|
|
.get_page = snd_dma_vmalloc_get_page,
|
|
|
|
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_SND_DMA_SGBUF */
|
|
|
|
|
2021-10-17 07:48:58 +00:00
|
|
|
/*
|
|
|
|
* Non-coherent pages allocator
|
|
|
|
*/
|
|
|
|
static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
|
|
|
|
{
|
|
|
|
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
|
|
|
|
return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
|
|
|
|
dmab->dev.dir, DEFAULT_GFP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
|
|
|
|
dmab->addr, dmab->dev.dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
|
|
|
|
struct vm_area_struct *area)
|
|
|
|
{
|
|
|
|
area->vm_page_prot = vm_get_page_prot(area->vm_flags);
|
|
|
|
return dma_mmap_pages(dmab->dev.dev, area,
|
|
|
|
area->vm_end - area->vm_start,
|
|
|
|
virt_to_page(dmab->area));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
|
|
|
|
enum snd_dma_sync_mode mode)
|
|
|
|
{
|
|
|
|
if (mode == SNDRV_DMA_SYNC_CPU) {
|
|
|
|
if (dmab->dev.dir != DMA_TO_DEVICE)
|
|
|
|
dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
|
|
|
|
dmab->bytes, dmab->dev.dir);
|
|
|
|
} else {
|
|
|
|
if (dmab->dev.dir != DMA_FROM_DEVICE)
|
|
|
|
dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
|
|
|
|
dmab->bytes, dmab->dev.dir);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
|
|
|
|
.alloc = snd_dma_noncoherent_alloc,
|
|
|
|
.free = snd_dma_noncoherent_free,
|
|
|
|
.mmap = snd_dma_noncoherent_mmap,
|
|
|
|
.sync = snd_dma_noncoherent_sync,
|
|
|
|
};
|
|
|
|
|
2021-06-09 16:25:49 +00:00
|
|
|
#endif /* CONFIG_HAS_DMA */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Entry points
|
|
|
|
*/
|
|
|
|
static const struct snd_malloc_ops *dma_ops[] = {
|
|
|
|
[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
|
|
|
|
[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
|
|
|
|
#ifdef CONFIG_HAS_DMA
|
|
|
|
[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
|
2021-08-02 07:28:04 +00:00
|
|
|
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
|
ALSA: memalloc: Support for non-contiguous page allocation
This patch adds the support for allocation of non-contiguous DMA pages
in the common memalloc helper. It's another SG-buffer type, but
unlike the existing one, this is directional and requires the explicit
sync / invalidation of dirty pages on non-coherent architectures.
For this enhancement, the following points are changed:
- snd_dma_device stores the DMA direction.
- snd_dma_device stores need_sync flag indicating whether the explicit
sync is required or not.
- A new variant of helper functions, snd_dma_alloc_dir_pages() and
*_all() are introduced; the old snd_dma_alloc_pages() and *_all()
kept as just wrappers with DMA_BIDIRECTIONAL.
- A new helper snd_dma_buffer_sync() is introduced; this gets called
in the appropriate places.
- A new allocation type, SNDRV_DMA_TYPE_NONCONTIG, is introduced.
When the driver allocates pages with this new type, and it may require
the SNDRV_PCM_INFO_EXPLICIT_SYNC flag set to the PCM hardware.info for
taking the full control of PCM applptr and hwptr changes (that implies
disabling the mmap of control/status data). When the buffer
allocation is managed by snd_pcm_set_managed_buffer(), this flag is
automatically set depending on the result of dma_need_sync()
internally. Otherwise, if the buffer is managed manually, the driver
has to set the flag explicitly, too.
The explicit sync between CPU and device for non-coherent memory is
performed at the points before and after read/write transfer as well
as the applptr/hwptr syncptr ioctl. In the case of mmap mode,
user-space is supposed to call the syncptr ioctl with the hwptr flag
to update and fetch the status at first; this corresponds to CPU-sync.
Then user-space advances the applptr via syncptr ioctl again with
applptr flag, and this corresponds to the device sync with flushing.
Other than the DMA direction and the explicit sync, the usage of this
new buffer type is almost equivalent with the existing
SNDRV_DMA_TYPE_DEV_SG; you can get the page and the address via
snd_sgbuf_get_page() and snd_sgbuf_get_addr(), also calculate the
continuous pages via snd_sgbuf_get_chunk_size().
For those SG-page handling, the non-contig type shares the same ops
with the vmalloc handler. As we do always vmap the SG pages at first,
the actual address can be deduced from the vmapped address easily
without iterating the SG-list.
Link: https://lore.kernel.org/r/20211017074859.24112-2-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
2021-10-17 07:48:57 +00:00
|
|
|
[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
|
2021-10-17 07:48:58 +00:00
|
|
|
[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
|
2021-10-17 07:48:59 +00:00
|
|
|
#ifdef CONFIG_SND_DMA_SGBUF
|
|
|
|
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
|
|
|
|
#endif
|
2021-06-09 16:25:49 +00:00
|
|
|
#ifdef CONFIG_GENERIC_ALLOCATOR
|
|
|
|
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
|
|
|
|
#endif /* CONFIG_GENERIC_ALLOCATOR */
|
|
|
|
#endif /* CONFIG_HAS_DMA */
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
|
|
|
|
dmab->dev.type >= ARRAY_SIZE(dma_ops)))
|
|
|
|
return NULL;
|
|
|
|
return dma_ops[dmab->dev.type];
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|