mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
DMA-API: sound: fix dma mask handling in a lot of drivers
This code sequence is unsafe in modules: static u64 mask = DMA_BIT_MASK(something); ... if (!dev->dma_mask) dev->dma_mask = &mask; as if a module is reloaded, the mask will be pointing at the original module's mask address, and this can lead to oopses. Moreover, they all follow this with: if (!dev->coherent_dma_mask) dev->coherent_dma_mask = mask; where 'mask' is the same value as the statically defined mask, and this bypasses the architecture's check on whether the DMA mask is possible. Fix these issues by using the new dma_coerce_coherent_and_mask() function. Acked-by: Mark Brown <broonie@linaro.org> Acked-by: Takashi Iwai <tiwai@suse.de> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
fa6a8d6d65
commit
c9bd5e690a
@ -11,6 +11,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#include <sound/core.h>
|
||||
@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
|
||||
.mmap = pxa2xx_pcm_mmap,
|
||||
};
|
||||
|
||||
static u64 pxa2xx_pcm_dmamask = 0xffffffff;
|
||||
|
||||
int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
|
||||
struct snd_pcm **rpcm)
|
||||
{
|
||||
@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
|
||||
pcm->private_data = client;
|
||||
pcm->private_free = pxa2xx_pcm_free_dma_buffers;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &pxa2xx_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = 0xffffffff;
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (play) {
|
||||
int stream = SNDRV_PCM_STREAM_PLAYBACK;
|
||||
|
@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
|
||||
|
||||
static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &atmel_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
|
||||
|
@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s enter\n", __func__);
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &bf5xx_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
|
||||
|
@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
|
||||
.silence = bf5xx_pcm_silence,
|
||||
};
|
||||
|
||||
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s enter\n", __func__);
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &bf5xx_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
|
||||
SNDRV_DMA_TYPE_DEV, card->dev, size, size);
|
||||
|
@ -844,18 +844,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &davinci_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = davinci_pcm_preallocate_dma_buffer(pcm,
|
||||
|
@ -298,14 +298,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &fsl_dma_dmamask;
|
||||
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = fsl_dma_dmamask;
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Some codecs have separate DAIs for playback and capture, so we
|
||||
* should allocate a DMA buffer only for the streams that are valid.
|
||||
|
@ -272,18 +272,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &imx_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = imx_pcm_preallocate_dma_buffer(pcm,
|
||||
SNDRV_PCM_STREAM_PLAYBACK);
|
||||
|
@ -299,7 +299,6 @@ static struct snd_pcm_ops psc_dma_ops = {
|
||||
.hw_params = psc_dma_hw_params,
|
||||
};
|
||||
|
||||
static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
|
||||
static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
@ -307,15 +306,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
|
||||
size_t size = psc_dma_hardware.buffer_bytes_max;
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
|
||||
card, dai, pcm);
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &psc_dma_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
|
||||
|
@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &jz4740_pcm_dmamask;
|
||||
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = jz4740_pcm_preallocate_dma_buffer(pcm,
|
||||
|
@ -59,8 +59,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
|
||||
.fifo_size = 0,
|
||||
};
|
||||
|
||||
static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
|
||||
{
|
||||
struct kirkwood_dma_data *priv = dev_id;
|
||||
@ -292,10 +290,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &kirkwood_dma_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
|
||||
|
@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
|
||||
snd_pcm_lib_preallocate_free_for_all(pcm);
|
||||
}
|
||||
|
||||
static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &nuc900_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
|
||||
card->dev, 4 * 1024, (4 * 1024) - 1);
|
||||
|
@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
|
||||
.mmap = omap_pcm_mmap,
|
||||
};
|
||||
|
||||
static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
|
||||
|
||||
static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
|
||||
int stream)
|
||||
{
|
||||
@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &omap_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = omap_pcm_preallocate_dma_buffer(pcm,
|
||||
|
@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
|
||||
.mmap = pxa2xx_pcm_mmap,
|
||||
};
|
||||
|
||||
static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &pxa2xx_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
|
||||
|
@ -444,8 +444,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
|
||||
snd_pcm_lib_preallocate_free_for_all(pcm);
|
||||
}
|
||||
|
||||
static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
|
||||
{
|
||||
struct snd_card *card = runtime->card->snd_card;
|
||||
@ -456,10 +454,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
|
||||
params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
|
||||
pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &s6000_pcm_dmamask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
if (params->dma_in) {
|
||||
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
|
||||
|
@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
static int dma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
pr_debug("Entered %s\n", __func__);
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &dma_mask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = preallocate_dma_buffer(pcm,
|
||||
|
@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 idma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
static int idma_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!card->dev->dma_mask)
|
||||
card->dev->dma_mask = &idma_mask;
|
||||
if (!card->dev->coherent_dma_mask)
|
||||
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
|
||||
ret = preallocate_idma_buffer(pcm,
|
||||
|
Loading…
Reference in New Issue
Block a user