dmaengine: Remove site specific OOM error messages on kzalloc
If kzalloc() fails it will issue it's own error message including a dump_stack(). So remove the site specific error messages. Signed-off-by: Peter Griffin <peter.griffin@linaro.org> Acked-by: Jon Hunter <jonathanh@nvidia.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
parent
71f7e6cc55
commit
aef94fea97
@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
|
|||||||
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
|
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
|
||||||
if (!dsg) {
|
if (!dsg) {
|
||||||
pl08x_free_txd(pl08x, txd);
|
pl08x_free_txd(pl08x, txd);
|
||||||
dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
|
|
||||||
__func__);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
list_add_tail(&dsg->node, &txd->dsg_list);
|
list_add_tail(&dsg->node, &txd->dsg_list);
|
||||||
@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
|
|||||||
*/
|
*/
|
||||||
for (i = 0; i < channels; i++) {
|
for (i = 0; i < channels; i++) {
|
||||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||||
if (!chan) {
|
if (!chan)
|
||||||
dev_err(&pl08x->adev->dev,
|
|
||||||
"%s no memory for channel\n", __func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
chan->host = pl08x;
|
chan->host = pl08x;
|
||||||
chan->state = PL08X_CHAN_IDLE;
|
chan->state = PL08X_CHAN_IDLE;
|
||||||
@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
|
pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!pl08x->phy_chans) {
|
if (!pl08x->phy_chans) {
|
||||||
dev_err(&adev->dev, "%s failed to allocate "
|
|
||||||
"physical channel holders\n",
|
|
||||||
__func__);
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_no_phychans;
|
goto out_no_phychans;
|
||||||
}
|
}
|
||||||
|
@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op)
|
|||||||
/* Get a clean struct */
|
/* Get a clean struct */
|
||||||
bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
|
bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
|
||||||
if (!bcom_eng) {
|
if (!bcom_eng) {
|
||||||
printk(KERN_ERR DRIVER_NAME ": "
|
|
||||||
"Can't allocate state structure\n");
|
|
||||||
rv = -ENOMEM;
|
rv = -ENOMEM;
|
||||||
goto error_sramclean;
|
goto error_sramclean;
|
||||||
}
|
}
|
||||||
|
@ -1069,10 +1069,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||||||
|
|
||||||
edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
|
edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!edesc) {
|
if (!edesc)
|
||||||
dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
edesc->pset_nr = sg_len;
|
edesc->pset_nr = sg_len;
|
||||||
edesc->residue = 0;
|
edesc->residue = 0;
|
||||||
@ -1173,10 +1171,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
|||||||
|
|
||||||
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
|
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!edesc) {
|
if (!edesc)
|
||||||
dev_dbg(dev, "Failed to allocate a descriptor\n");
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
edesc->pset_nr = nslots;
|
edesc->pset_nr = nslots;
|
||||||
edesc->residue = edesc->residue_stat = len;
|
edesc->residue = edesc->residue_stat = len;
|
||||||
@ -1298,10 +1294,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
|
|||||||
|
|
||||||
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
|
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!edesc) {
|
if (!edesc)
|
||||||
dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
edesc->cyclic = 1;
|
edesc->cyclic = 1;
|
||||||
edesc->pset_nr = nslots;
|
edesc->pset_nr = nslots;
|
||||||
@ -2207,10 +2201,8 @@ static int edma_probe(struct platform_device *pdev)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
|
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
|
||||||
if (!ecc) {
|
if (!ecc)
|
||||||
dev_err(dev, "Can't allocate controller\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
ecc->dev = dev;
|
ecc->dev = dev;
|
||||||
ecc->id = pdev->id;
|
ecc->id = pdev->id;
|
||||||
|
@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
|
|||||||
/* alloc channel */
|
/* alloc channel */
|
||||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||||
if (!chan) {
|
if (!chan) {
|
||||||
dev_err(fdev->dev, "no free memory for DMA channels!\n");
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_return;
|
goto out_return;
|
||||||
}
|
}
|
||||||
@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op)
|
|||||||
|
|
||||||
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
|
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
|
||||||
if (!fdev) {
|
if (!fdev) {
|
||||||
dev_err(&op->dev, "No enough memory for 'priv'\n");
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_return;
|
goto out_return;
|
||||||
}
|
}
|
||||||
|
@ -425,10 +425,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
|
|||||||
|
|
||||||
num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
|
num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
|
||||||
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
|
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
|
||||||
if (!ds) {
|
if (!ds)
|
||||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
|
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
|
||||||
ds->size = len;
|
ds->size = len;
|
||||||
ds->desc_num = num;
|
ds->desc_num = num;
|
||||||
@ -481,10 +480,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
|
|||||||
}
|
}
|
||||||
|
|
||||||
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
|
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
|
||||||
if (!ds) {
|
if (!ds)
|
||||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
|
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
|
||||||
ds->desc_num = num;
|
ds->desc_num = num;
|
||||||
num = 0;
|
num = 0;
|
||||||
|
@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
|
|||||||
|
|
||||||
/* alloc channel */
|
/* alloc channel */
|
||||||
tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
|
tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
|
||||||
if (!tdmac) {
|
if (!tdmac)
|
||||||
dev_err(tdev->dev, "no free memory for DMA channels!\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
if (irq)
|
if (irq)
|
||||||
tdmac->irq = irq;
|
tdmac->irq = irq;
|
||||||
tdmac->dev = tdev->dev;
|
tdmac->dev = tdev->dev;
|
||||||
|
@ -574,10 +574,8 @@ static int moxart_probe(struct platform_device *pdev)
|
|||||||
struct moxart_dmadev *mdc;
|
struct moxart_dmadev *mdc;
|
||||||
|
|
||||||
mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
|
mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
|
||||||
if (!mdc) {
|
if (!mdc)
|
||||||
dev_err(dev, "can't allocate DMA container\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
irq = irq_of_parse_and_map(node, 0);
|
irq = irq_of_parse_and_map(node, 0);
|
||||||
if (irq == NO_IRQ) {
|
if (irq == NO_IRQ) {
|
||||||
|
@ -1300,10 +1300,9 @@ static int nbpf_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
|
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
|
||||||
sizeof(nbpf->chan[0]), GFP_KERNEL);
|
sizeof(nbpf->chan[0]), GFP_KERNEL);
|
||||||
if (!nbpf) {
|
if (!nbpf)
|
||||||
dev_err(dev, "Memory allocation failed\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
dma_dev = &nbpf->dma_dev;
|
dma_dev = &nbpf->dma_dev;
|
||||||
dma_dev->dev = dev;
|
dma_dev->dev = dev;
|
||||||
|
|
||||||
|
@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
|
|
||||||
/* Allocate a new DMAC and its Channels */
|
/* Allocate a new DMAC and its Channels */
|
||||||
pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
|
pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
|
||||||
if (!pl330) {
|
if (!pl330)
|
||||||
dev_err(&adev->dev, "unable to allocate mem\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
pd = &pl330->ddma;
|
pd = &pl330->ddma;
|
||||||
pd->dev = &adev->dev;
|
pd->dev = &adev->dev;
|
||||||
@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||||||
pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
|
pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
|
||||||
if (!pl330->peripherals) {
|
if (!pl330->peripherals) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
|
|
||||||
goto probe_err2;
|
goto probe_err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
|
|||||||
/* create a device */
|
/* create a device */
|
||||||
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
|
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
|
||||||
if (!adev) {
|
if (!adev) {
|
||||||
dev_err(&ofdev->dev, "failed to allocate device\n");
|
|
||||||
initcode = PPC_ADMA_INIT_ALLOC;
|
initcode = PPC_ADMA_INIT_ALLOC;
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_adev_alloc;
|
goto err_adev_alloc;
|
||||||
@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
|
|||||||
/* create a channel */
|
/* create a channel */
|
||||||
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
|
||||||
if (!chan) {
|
if (!chan) {
|
||||||
dev_err(&ofdev->dev, "can't allocate channel structure\n");
|
|
||||||
initcode = PPC_ADMA_INIT_CHANNEL;
|
initcode = PPC_ADMA_INIT_CHANNEL;
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_chan_alloc;
|
goto err_chan_alloc;
|
||||||
|
@ -1101,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
|
|||||||
*/
|
*/
|
||||||
for (i = 0; i < channels; i++) {
|
for (i = 0; i < channels; i++) {
|
||||||
chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
|
chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
|
||||||
if (!chan) {
|
if (!chan)
|
||||||
dev_err(dmadev->dev,
|
|
||||||
"%s no memory for channel\n", __func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
chan->id = i;
|
chan->id = i;
|
||||||
chan->host = s3cdma;
|
chan->host = s3cdma;
|
||||||
|
@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
|
|||||||
|
|
||||||
sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
|
sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!sh_chan) {
|
if (!sh_chan)
|
||||||
dev_err(sdev->dma_dev.dev,
|
|
||||||
"No free memory for allocating dma channels!\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
schan = &sh_chan->shdma_chan;
|
schan = &sh_chan->shdma_chan;
|
||||||
schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
|
schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
|
||||||
@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
|
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!shdev) {
|
if (!shdev)
|
||||||
dev_err(&pdev->dev, "Not enough memory\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
dma_dev = &shdev->shdma_dev.dma_dev;
|
dma_dev = &shdev->shdma_dev.dma_dev;
|
||||||
|
|
||||||
|
@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
|
sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
|
||||||
if (!sc) {
|
if (!sc)
|
||||||
dev_err(sdev->dma_dev.dev,
|
|
||||||
"No free memory for allocating dma channels!\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
schan = &sc->shdma_chan;
|
schan = &sc->shdma_chan;
|
||||||
schan->max_xfer_len = 64 * 1024 * 1024 - 1;
|
schan->max_xfer_len = 64 * 1024 * 1024 - 1;
|
||||||
@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev)
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
|
su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!su_dev) {
|
if (!su_dev)
|
||||||
dev_err(&pdev->dev, "Not enough memory\n");
|
|
||||||
return err;
|
return err;
|
||||||
}
|
|
||||||
|
|
||||||
dma_dev = &su_dev->shdma_dev.dma_dev;
|
dma_dev = &su_dev->shdma_dev.dma_dev;
|
||||||
|
|
||||||
|
@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
|
|||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
|
sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
|
||||||
if (!sdma) {
|
if (!sdma)
|
||||||
dev_err(dev, "Memory exhausted!\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
data = (struct sirfsoc_dmadata *)
|
data = (struct sirfsoc_dmadata *)
|
||||||
(of_match_device(op->dev.driver->of_match_table,
|
(of_match_device(op->dev.driver->of_match_table,
|
||||||
&op->dev)->data);
|
&op->dev)->data);
|
||||||
|
@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|||||||
(num_phy_chans + num_log_chans + num_memcpy_chans) *
|
(num_phy_chans + num_log_chans + num_memcpy_chans) *
|
||||||
sizeof(struct d40_chan), GFP_KERNEL);
|
sizeof(struct d40_chan), GFP_KERNEL);
|
||||||
|
|
||||||
if (base == NULL) {
|
if (base == NULL)
|
||||||
d40_err(&pdev->dev, "Out of memory\n");
|
|
||||||
goto failure;
|
goto failure;
|
||||||
}
|
|
||||||
|
|
||||||
base->rev = rev;
|
base->rev = rev;
|
||||||
base->clk = clk;
|
base->clk = clk;
|
||||||
|
@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
|
|||||||
|
|
||||||
/* Allocate DMA desc */
|
/* Allocate DMA desc */
|
||||||
dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
|
dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
|
||||||
if (!dma_desc) {
|
if (!dma_desc)
|
||||||
dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
|
dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
|
||||||
dma_desc->txd.tx_submit = tegra_dma_tx_submit;
|
dma_desc->txd.tx_submit = tegra_dma_tx_submit;
|
||||||
@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
|
|||||||
spin_unlock_irqrestore(&tdc->lock, flags);
|
spin_unlock_irqrestore(&tdc->lock, flags);
|
||||||
|
|
||||||
sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
|
sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
|
||||||
if (!sg_req)
|
|
||||||
dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
|
|
||||||
return sg_req;
|
return sg_req;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1319,10 +1316,8 @@ static int tegra_dma_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
|
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
|
||||||
sizeof(struct tegra_dma_channel), GFP_KERNEL);
|
sizeof(struct tegra_dma_channel), GFP_KERNEL);
|
||||||
if (!tdma) {
|
if (!tdma)
|
||||||
dev_err(&pdev->dev, "Error: memory allocation failed\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
tdma->dev = &pdev->dev;
|
tdma->dev = &pdev->dev;
|
||||||
tdma->chip_data = cdata;
|
tdma->chip_data = cdata;
|
||||||
|
@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
|
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
|
||||||
if (!td_desc) {
|
if (!td_desc)
|
||||||
dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
|
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
|
||||||
|
|
||||||
td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
|
td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
|
||||||
if (!td_desc->desc_list) {
|
if (!td_desc->desc_list)
|
||||||
dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
dma_async_tx_descriptor_init(&td_desc->txd, chan);
|
dma_async_tx_descriptor_init(&td_desc->txd, chan);
|
||||||
td_desc->txd.tx_submit = td_tx_submit;
|
td_desc->txd.tx_submit = td_tx_submit;
|
||||||
|
Loading…
Reference in New Issue
Block a user