forked from Minki/linux
remove dma_zalloc_coherent
We've always had a weird situation around dma_zalloc_coherent. To safely support mapping the allocations to userspace major architectures like x86 and arm have always zeroed allocations from dma_alloc_coherent, but a couple other architectures were missing that zeroing either always or in corner cases. Then later we grew anothe dma_zalloc_coherent interface to explicitly request zeroing, but that just added __GFP_ZERO to the allocation flags, which for some allocators that didn't end up using the page allocator ended up being a no-op and still not zeroing the allocations. So for this merge window I fixed up all remaining architectures to zero the memory in dma_alloc_coherent, and made dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, which fixes all of the above issues. dma_zalloc_coherent is now pointless and can go away, and Luis helped me writing a cocchinelle script and patch series to kill it, which I think we should apply now just after -rc1 to finally settle these issue. -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlw6LV0LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPd1hAAshbVLVIUg750CQoKD5sk44/IW7klkQUnzcp9ueOY /GIYS/ils8q9DSITAyMJxHKpjt1EEVlavWLvYLlfpkDfLaVGMUJu+zKGaolhU5F6 OuldJKZV6tWrC7zGVl+09y5CAyelVxLyuD09I+QYnHUIO9ljgZHB2+W3ezOFxBRD FjrQRuFY6Xpr1F42zWc4aJrgACffH761pLx3fbJlIs8aEInWKqDbuyL6Lg71BRXh kHKt0DQxFxklyQmqaYyDesujjXUysweAFLNxgN9GSrlWBR8GE3qJpsSrIzjX5k8w WKzbypYqVQepI3zYCN5EoCAoiHBFZXPSNHCoXAH6tHjYwgQ3uoDpzxEKJOEykO4i 1+kcJh3ArQZA/BsMBf3I/CNMsxvBuC3/QKFMcs/7pKx1ABoumSBSIpqB4pG4NU+o fxRBHKjqbILufWKReb2PuRXiPpddwuo0vg70U0FK2aWZrClRYEpBdExPKrBUAG34 WtQCGA0YFXV/kAgPPmOvnPlwpYM2ZrVLVl5Ct2diR5QaLee3o1GiStQm0LuspRzk HSzVyCYdKRxH4zkEBzKUn/PuyYLoMRyPP4PQ3R/xlQrFqvv6FeiGYnow89+1JpUp 2qWg5vU1aLM7/WXnyVGDED3T42eZREi/uMPQIADXqRIVC7e43/eKcLF06n0lIWh9 usg= =VIBB -----END PGP SIGNATURE----- Merge tag 'remove-dma_zalloc_coherent-5.0' of git://git.infradead.org/users/hch/dma-mapping Pull dma_zalloc_coherent() removal from Christoph Hellwig: "We've always had a weird situation around dma_zalloc_coherent. To safely support mapping the allocations to userspace major architectures like x86 and arm have always zeroed allocations from dma_alloc_coherent, but a couple other architectures were missing that zeroing either always or in corner cases. Then later we grew anothe dma_zalloc_coherent interface to explicitly request zeroing, but that just added __GFP_ZERO to the allocation flags, which for some allocators that didn't end up using the page allocator ended up being a no-op and still not zeroing the allocations. So for this merge window I fixed up all remaining architectures to zero the memory in dma_alloc_coherent, and made dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, which fixes all of the above issues. dma_zalloc_coherent is now pointless and can go away, and Luis helped me writing a cocchinelle script and patch series to kill it, which I think we should apply now just after -rc1 to finally settle these issue" * tag 'remove-dma_zalloc_coherent-5.0' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: remove dma_zalloc_coherent() cross-tree: phase out dma_zalloc_coherent() on headers cross-tree: phase out dma_zalloc_coherent()
This commit is contained in:
commit
66c56cfa64
@ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
|
||||
unsigned long flags;
|
||||
|
||||
ch->desc = 0;
|
||||
ch->desc_base = dma_zalloc_coherent(ch->dev,
|
||||
LTQ_DESC_NUM * LTQ_DESC_SIZE,
|
||||
&ch->phys, GFP_ATOMIC);
|
||||
ch->desc_base = dma_alloc_coherent(ch->dev,
|
||||
LTQ_DESC_NUM * LTQ_DESC_SIZE,
|
||||
&ch->phys, GFP_ATOMIC);
|
||||
|
||||
spin_lock_irqsave(<q_dma_lock, flags);
|
||||
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
|
||||
|
@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
|
||||
|
||||
chan->ring_size = ring_size;
|
||||
|
||||
chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev,
|
||||
chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
|
||||
ring_size * sizeof(u64),
|
||||
&chan->ring_dma, GFP_KERNEL);
|
||||
|
||||
|
@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
|
||||
}
|
||||
|
||||
/* Initialize outbound message descriptor ring */
|
||||
rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev,
|
||||
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
|
||||
&rmu->msg_tx_ring.phys, GFP_KERNEL);
|
||||
rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
|
||||
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
|
||||
&rmu->msg_tx_ring.phys,
|
||||
GFP_KERNEL);
|
||||
if (!rmu->msg_tx_ring.virt) {
|
||||
rc = -ENOMEM;
|
||||
goto out_dma;
|
||||
|
@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap)
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
|
||||
mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
|
||||
GFP_KERNEL);
|
||||
mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
|
||||
GFP_KERNEL);
|
||||
if (!mem) {
|
||||
kfree(pp);
|
||||
return -ENOMEM;
|
||||
|
@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
|
||||
|
||||
static int he_init_tpdrq(struct he_dev *he_dev)
|
||||
{
|
||||
he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
|
||||
&he_dev->tpdrq_phys, GFP_KERNEL);
|
||||
he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
|
||||
&he_dev->tpdrq_phys,
|
||||
GFP_KERNEL);
|
||||
if (he_dev->tpdrq_base == NULL) {
|
||||
hprintk("failed to alloc tpdrq\n");
|
||||
return -ENOMEM;
|
||||
@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
|
||||
goto out_free_rbpl_virt;
|
||||
}
|
||||
|
||||
he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
|
||||
&he_dev->rbpl_phys, GFP_KERNEL);
|
||||
he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
|
||||
&he_dev->rbpl_phys, GFP_KERNEL);
|
||||
if (he_dev->rbpl_base == NULL) {
|
||||
hprintk("failed to alloc rbpl_base\n");
|
||||
goto out_destroy_rbpl_pool;
|
||||
@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
|
||||
|
||||
/* rx buffer ready queue */
|
||||
|
||||
he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
|
||||
&he_dev->rbrq_phys, GFP_KERNEL);
|
||||
he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
|
||||
&he_dev->rbrq_phys, GFP_KERNEL);
|
||||
if (he_dev->rbrq_base == NULL) {
|
||||
hprintk("failed to allocate rbrq\n");
|
||||
goto out_free_rbpl;
|
||||
@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
|
||||
|
||||
/* tx buffer ready queue */
|
||||
|
||||
he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
|
||||
&he_dev->tbrq_phys, GFP_KERNEL);
|
||||
he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
|
||||
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
|
||||
&he_dev->tbrq_phys, GFP_KERNEL);
|
||||
if (he_dev->tbrq_base == NULL) {
|
||||
hprintk("failed to allocate tbrq\n");
|
||||
goto out_free_rbpq_base;
|
||||
@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
|
||||
/* 2.9.3.5 tail offset for each interrupt queue is located after the
|
||||
end of the interrupt queue */
|
||||
|
||||
he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
|
||||
(CONFIG_IRQ_SIZE + 1)
|
||||
* sizeof(struct he_irq),
|
||||
&he_dev->irq_phys,
|
||||
GFP_KERNEL);
|
||||
he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
|
||||
(CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
|
||||
&he_dev->irq_phys, GFP_KERNEL);
|
||||
if (he_dev->irq_base == NULL) {
|
||||
hprintk("failed to allocate irq\n");
|
||||
return -ENOMEM;
|
||||
@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev)
|
||||
|
||||
/* host status page */
|
||||
|
||||
he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
|
||||
sizeof(struct he_hsp),
|
||||
&he_dev->hsp_phys, GFP_KERNEL);
|
||||
he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
|
||||
sizeof(struct he_hsp),
|
||||
&he_dev->hsp_phys, GFP_KERNEL);
|
||||
if (he_dev->hsp == NULL) {
|
||||
hprintk("failed to allocate host status page\n");
|
||||
return -ENOMEM;
|
||||
|
@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
|
||||
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
|
||||
if (!scq)
|
||||
return NULL;
|
||||
scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
|
||||
&scq->paddr, GFP_KERNEL);
|
||||
scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
|
||||
&scq->paddr, GFP_KERNEL);
|
||||
if (scq->base == NULL) {
|
||||
kfree(scq);
|
||||
return NULL;
|
||||
@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
|
||||
{
|
||||
struct rsq_entry *rsqe;
|
||||
|
||||
card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
|
||||
&card->rsq.paddr, GFP_KERNEL);
|
||||
card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
|
||||
&card->rsq.paddr, GFP_KERNEL);
|
||||
if (card->rsq.base == NULL) {
|
||||
printk("%s: can't allocate RSQ.\n", card->name);
|
||||
return -1;
|
||||
@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev)
|
||||
writel(0, SAR_REG_GP);
|
||||
|
||||
/* Initialize RAW Cell Handle Register */
|
||||
card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
|
||||
2 * sizeof(u32),
|
||||
&card->raw_cell_paddr,
|
||||
GFP_KERNEL);
|
||||
card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
|
||||
2 * sizeof(u32),
|
||||
&card->raw_cell_paddr,
|
||||
GFP_KERNEL);
|
||||
if (!card->raw_cell_hnd) {
|
||||
printk("%s: memory allocation failure.\n", card->name);
|
||||
deinit_card(card);
|
||||
|
@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
|
||||
"comp pci_alloc, total bytes %zd entries %d\n",
|
||||
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
|
||||
|
||||
skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
|
||||
&skdev->cq_dma_address, GFP_KERNEL);
|
||||
skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
|
||||
&skdev->cq_dma_address, GFP_KERNEL);
|
||||
|
||||
if (skcomp == NULL) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
|
||||
*/
|
||||
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
|
||||
{
|
||||
dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
|
||||
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
|
||||
&dev->gdr_pa, GFP_ATOMIC);
|
||||
dev->gdr = dma_alloc_coherent(dev->core_dev->device,
|
||||
sizeof(struct ce_gd) * PPC4XX_NUM_GD,
|
||||
&dev->gdr_pa, GFP_ATOMIC);
|
||||
if (!dev->gdr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
|
||||
mcode->num_cores = is_ae ? 6 : 10;
|
||||
|
||||
/* Allocate DMAable space */
|
||||
mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
|
||||
&mcode->phys_base, GFP_KERNEL);
|
||||
mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
|
||||
&mcode->phys_base, GFP_KERNEL);
|
||||
if (!mcode->code) {
|
||||
dev_err(dev, "Unable to allocate space for microcode");
|
||||
ret = -ENOMEM;
|
||||
|
@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
|
||||
|
||||
c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
|
||||
rem_q_size;
|
||||
curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
|
||||
c_size + CPT_NEXT_CHUNK_PTR_SIZE,
|
||||
&curr->dma_addr, GFP_KERNEL);
|
||||
curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
|
||||
c_size + CPT_NEXT_CHUNK_PTR_SIZE,
|
||||
&curr->dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!curr->head) {
|
||||
dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
|
||||
i, queue->nchunks);
|
||||
|
@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
|
||||
struct nitrox_device *ndev = cmdq->ndev;
|
||||
|
||||
cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
|
||||
cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
|
||||
&cmdq->unalign_dma,
|
||||
GFP_KERNEL);
|
||||
cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
|
||||
&cmdq->unalign_dma,
|
||||
GFP_KERNEL);
|
||||
if (!cmdq->unalign_base)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp)
|
||||
/* Page alignment satisfies our needs for N <= 128 */
|
||||
BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
|
||||
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
|
||||
cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
|
||||
&cmd_q->qbase_dma,
|
||||
GFP_KERNEL);
|
||||
cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
|
||||
&cmd_q->qbase_dma,
|
||||
GFP_KERNEL);
|
||||
if (!cmd_q->qbase) {
|
||||
dev_err(dev, "unable to allocate command queue\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
|
||||
memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
|
||||
} else {
|
||||
/* new key */
|
||||
ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
|
||||
&ctx->pkey, GFP_KERNEL);
|
||||
ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
|
||||
&ctx->pkey, GFP_KERNEL);
|
||||
if (!ctx->key) {
|
||||
mutex_unlock(&ctx->lock);
|
||||
return -ENOMEM;
|
||||
|
@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
|
||||
struct sec_queue_ring_db *ring_db = &queue->ring_db;
|
||||
int ret;
|
||||
|
||||
ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
|
||||
&ring_cmd->paddr,
|
||||
GFP_KERNEL);
|
||||
ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
|
||||
&ring_cmd->paddr, GFP_KERNEL);
|
||||
if (!ring_cmd->vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
|
||||
mutex_init(&ring_cmd->lock);
|
||||
ring_cmd->callback = sec_alg_callback;
|
||||
|
||||
ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
|
||||
&ring_cq->paddr,
|
||||
GFP_KERNEL);
|
||||
ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
|
||||
&ring_cq->paddr, GFP_KERNEL);
|
||||
if (!ring_cq->vaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_ring_cmd;
|
||||
}
|
||||
|
||||
ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
|
||||
&ring_db->paddr,
|
||||
GFP_KERNEL);
|
||||
ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
|
||||
&ring_db->paddr, GFP_KERNEL);
|
||||
if (!ring_db->vaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_ring_cq;
|
||||
|
@ -260,9 +260,9 @@ static int setup_crypt_desc(void)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
|
||||
crypt_virt = dma_zalloc_coherent(dev,
|
||||
NPE_QLEN * sizeof(struct crypt_ctl),
|
||||
&crypt_phys, GFP_ATOMIC);
|
||||
crypt_virt = dma_alloc_coherent(dev,
|
||||
NPE_QLEN * sizeof(struct crypt_ctl),
|
||||
&crypt_phys, GFP_ATOMIC);
|
||||
if (!crypt_virt)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
|
||||
if (!ring[i])
|
||||
goto err_cleanup;
|
||||
|
||||
ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev,
|
||||
MTK_DESC_RING_SZ,
|
||||
&ring[i]->cmd_dma,
|
||||
GFP_KERNEL);
|
||||
ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
|
||||
MTK_DESC_RING_SZ,
|
||||
&ring[i]->cmd_dma,
|
||||
GFP_KERNEL);
|
||||
if (!ring[i]->cmd_base)
|
||||
goto err_cleanup;
|
||||
|
||||
ring[i]->res_base = dma_zalloc_coherent(cryp->dev,
|
||||
MTK_DESC_RING_SZ,
|
||||
&ring[i]->res_dma,
|
||||
GFP_KERNEL);
|
||||
ring[i]->res_base = dma_alloc_coherent(cryp->dev,
|
||||
MTK_DESC_RING_SZ,
|
||||
&ring[i]->res_dma,
|
||||
GFP_KERNEL);
|
||||
if (!ring[i]->res_base)
|
||||
goto err_cleanup;
|
||||
|
||||
|
@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
|
||||
dev_to_node(&GET_DEV(accel_dev)));
|
||||
if (!admin)
|
||||
return -ENOMEM;
|
||||
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
&admin->phy_addr, GFP_KERNEL);
|
||||
admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
&admin->phy_addr, GFP_KERNEL);
|
||||
if (!admin->virt_addr) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
|
||||
kfree(admin);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
|
||||
PAGE_SIZE,
|
||||
&admin->const_tbl_addr,
|
||||
GFP_KERNEL);
|
||||
admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
|
||||
PAGE_SIZE,
|
||||
&admin->const_tbl_addr,
|
||||
GFP_KERNEL);
|
||||
if (!admin->virt_tbl_addr) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
|
||||
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
|
@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||
|
||||
dev = &GET_DEV(inst->accel_dev);
|
||||
ctx->inst = inst;
|
||||
ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->enc_cd) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->dec_cd) {
|
||||
goto out_free_enc;
|
||||
}
|
||||
@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
|
||||
|
||||
dev = &GET_DEV(inst->accel_dev);
|
||||
ctx->inst = inst;
|
||||
ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->enc_cd) {
|
||||
spin_unlock(&ctx->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->dec_cd) {
|
||||
spin_unlock(&ctx->lock);
|
||||
goto out_free_enc;
|
||||
|
@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||
} else {
|
||||
int shift = ctx->p_size - req->src_len;
|
||||
|
||||
qat_req->src_align = dma_zalloc_coherent(dev,
|
||||
ctx->p_size,
|
||||
&qat_req->in.dh.in.b,
|
||||
GFP_KERNEL);
|
||||
qat_req->src_align = dma_alloc_coherent(dev,
|
||||
ctx->p_size,
|
||||
&qat_req->in.dh.in.b,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||
goto unmap_src;
|
||||
|
||||
} else {
|
||||
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
|
||||
&qat_req->out.dh.r,
|
||||
GFP_KERNEL);
|
||||
qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
|
||||
&qat_req->out.dh.r,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
}
|
||||
@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
|
||||
return -EINVAL;
|
||||
|
||||
ctx->p_size = params->p_size;
|
||||
ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
|
||||
ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
|
||||
if (!ctx->p)
|
||||
return -ENOMEM;
|
||||
memcpy(ctx->p, params->p, ctx->p_size);
|
||||
@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
|
||||
ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
|
||||
if (!ctx->g)
|
||||
return -ENOMEM;
|
||||
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
|
||||
@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
if (ret < 0)
|
||||
goto err_clear_ctx;
|
||||
|
||||
ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
|
||||
GFP_KERNEL);
|
||||
ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
|
||||
GFP_KERNEL);
|
||||
if (!ctx->xa) {
|
||||
ret = -ENOMEM;
|
||||
goto err_clear_ctx;
|
||||
@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
||||
} else {
|
||||
int shift = ctx->key_sz - req->src_len;
|
||||
|
||||
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->in.rsa.enc.m,
|
||||
GFP_KERNEL);
|
||||
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->in.rsa.enc.m,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
||||
goto unmap_src;
|
||||
|
||||
} else {
|
||||
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->out.rsa.enc.c,
|
||||
GFP_KERNEL);
|
||||
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->out.rsa.enc.c,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
|
||||
@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
||||
} else {
|
||||
int shift = ctx->key_sz - req->src_len;
|
||||
|
||||
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->in.rsa.dec.c,
|
||||
GFP_KERNEL);
|
||||
qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->in.rsa.dec.c,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
||||
goto unmap_src;
|
||||
|
||||
} else {
|
||||
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->out.rsa.dec.m,
|
||||
GFP_KERNEL);
|
||||
qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
|
||||
&qat_req->out.rsa.dec.m,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
|
||||
@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
|
||||
goto err;
|
||||
|
||||
ret = -ENOMEM;
|
||||
ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
|
||||
ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
|
||||
if (!ctx->n)
|
||||
goto err;
|
||||
|
||||
@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
|
||||
ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
|
||||
if (!ctx->e)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
|
||||
goto err;
|
||||
|
||||
ret = -ENOMEM;
|
||||
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
|
||||
ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
|
||||
if (!ctx->d)
|
||||
goto err;
|
||||
|
||||
@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
|
||||
qat_rsa_drop_leading_zeros(&ptr, &len);
|
||||
if (!len)
|
||||
goto err;
|
||||
ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
|
||||
ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
|
||||
if (!ctx->p)
|
||||
goto err;
|
||||
memcpy(ctx->p + (half_key_sz - len), ptr, len);
|
||||
@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
|
||||
qat_rsa_drop_leading_zeros(&ptr, &len);
|
||||
if (!len)
|
||||
goto free_p;
|
||||
ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
|
||||
ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
|
||||
if (!ctx->q)
|
||||
goto free_p;
|
||||
memcpy(ctx->q + (half_key_sz - len), ptr, len);
|
||||
@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
|
||||
qat_rsa_drop_leading_zeros(&ptr, &len);
|
||||
if (!len)
|
||||
goto free_q;
|
||||
ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
|
||||
GFP_KERNEL);
|
||||
ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
|
||||
GFP_KERNEL);
|
||||
if (!ctx->dp)
|
||||
goto free_q;
|
||||
memcpy(ctx->dp + (half_key_sz - len), ptr, len);
|
||||
@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
|
||||
qat_rsa_drop_leading_zeros(&ptr, &len);
|
||||
if (!len)
|
||||
goto free_dp;
|
||||
ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
|
||||
GFP_KERNEL);
|
||||
ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
|
||||
GFP_KERNEL);
|
||||
if (!ctx->dq)
|
||||
goto free_dp;
|
||||
memcpy(ctx->dq + (half_key_sz - len), ptr, len);
|
||||
@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
|
||||
qat_rsa_drop_leading_zeros(&ptr, &len);
|
||||
if (!len)
|
||||
goto free_dq;
|
||||
ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
|
||||
GFP_KERNEL);
|
||||
ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
|
||||
GFP_KERNEL);
|
||||
if (!ctx->qinv)
|
||||
goto free_dq;
|
||||
memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
|
||||
|
@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
|
||||
{
|
||||
int ret = -EBUSY;
|
||||
|
||||
sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
|
||||
GFP_NOWAIT);
|
||||
sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
|
||||
GFP_NOWAIT);
|
||||
if (!sdma->bd0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
|
||||
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
|
||||
int ret = 0;
|
||||
|
||||
desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
|
||||
GFP_NOWAIT);
|
||||
desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
|
||||
GFP_NOWAIT);
|
||||
if (!desc->bd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
|
||||
* and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
|
||||
*/
|
||||
pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
|
||||
ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
|
||||
&ring->tphys, GFP_NOWAIT);
|
||||
ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
|
||||
&ring->tphys, GFP_NOWAIT);
|
||||
if (!ring->txd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||
int ret;
|
||||
|
||||
mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
|
||||
CCW_BLOCK_SIZE,
|
||||
&mxs_chan->ccw_phys, GFP_KERNEL);
|
||||
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
|
||||
CCW_BLOCK_SIZE,
|
||||
&mxs_chan->ccw_phys, GFP_KERNEL);
|
||||
if (!mxs_chan->ccw) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
|
||||
ring->size = ret;
|
||||
|
||||
/* Allocate memory for DMA ring descriptor */
|
||||
ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
|
||||
&ring->desc_paddr, GFP_KERNEL);
|
||||
ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
|
||||
&ring->desc_paddr, GFP_KERNEL);
|
||||
if (!ring->desc_vaddr) {
|
||||
chan_err(chan, "Failed to allocate ring desc\n");
|
||||
return -ENOMEM;
|
||||
|
@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
*/
|
||||
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
||||
/* Allocate the buffer descriptors. */
|
||||
chan->seg_v = dma_zalloc_coherent(chan->dev,
|
||||
sizeof(*chan->seg_v) *
|
||||
XILINX_DMA_NUM_DESCS,
|
||||
&chan->seg_p, GFP_KERNEL);
|
||||
chan->seg_v = dma_alloc_coherent(chan->dev,
|
||||
sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
|
||||
&chan->seg_p, GFP_KERNEL);
|
||||
if (!chan->seg_v) {
|
||||
dev_err(chan->dev,
|
||||
"unable to allocate channel %d descriptors\n",
|
||||
@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
* so allocating a desc segment during channel allocation for
|
||||
* programming tail descriptor.
|
||||
*/
|
||||
chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
|
||||
sizeof(*chan->cyclic_seg_v),
|
||||
&chan->cyclic_seg_p, GFP_KERNEL);
|
||||
chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
|
||||
sizeof(*chan->cyclic_seg_v),
|
||||
&chan->cyclic_seg_p,
|
||||
GFP_KERNEL);
|
||||
if (!chan->cyclic_seg_v) {
|
||||
dev_err(chan->dev,
|
||||
"unable to allocate desc segment for cyclic DMA\n");
|
||||
|
@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
list_add_tail(&desc->node, &chan->free_list);
|
||||
}
|
||||
|
||||
chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
|
||||
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
|
||||
&chan->desc_pool_p, GFP_KERNEL);
|
||||
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
|
||||
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
|
||||
&chan->desc_pool_p, GFP_KERNEL);
|
||||
if (!chan->desc_pool_v)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
|
||||
return NULL;
|
||||
|
||||
dmah->size = size;
|
||||
dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
|
||||
GFP_KERNEL | __GFP_COMP);
|
||||
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
|
||||
&dmah->busaddr,
|
||||
GFP_KERNEL | __GFP_COMP);
|
||||
|
||||
if (dmah->vaddr == NULL) {
|
||||
kfree(dmah);
|
||||
|
@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
|
||||
return NULL;
|
||||
|
||||
sbuf->size = size;
|
||||
sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
|
||||
&sbuf->dma_addr, GFP_ATOMIC);
|
||||
sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
|
||||
&sbuf->dma_addr, GFP_ATOMIC);
|
||||
if (!sbuf->sb)
|
||||
goto bail;
|
||||
|
||||
|
@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
|
||||
|
||||
if (!sghead) {
|
||||
for (i = 0; i < pages; i++) {
|
||||
pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
|
||||
pbl->pg_size,
|
||||
&pbl->pg_map_arr[i],
|
||||
GFP_KERNEL);
|
||||
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
|
||||
pbl->pg_size,
|
||||
&pbl->pg_map_arr[i],
|
||||
GFP_KERNEL);
|
||||
if (!pbl->pg_arr[i])
|
||||
goto fail;
|
||||
pbl->pg_count++;
|
||||
|
@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
|
||||
if (!wq->sq)
|
||||
goto err3;
|
||||
|
||||
wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev),
|
||||
depth * sizeof(union t3_wr),
|
||||
&(wq->dma_addr), GFP_KERNEL);
|
||||
wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
|
||||
depth * sizeof(union t3_wr),
|
||||
&(wq->dma_addr), GFP_KERNEL);
|
||||
if (!wq->queue)
|
||||
goto err4;
|
||||
|
||||
|
@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
|
||||
wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
|
||||
T4_RQT_ENTRY_SHIFT;
|
||||
|
||||
wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev,
|
||||
wq->memsize, &wq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
|
||||
&wq->dma_addr, GFP_KERNEL);
|
||||
if (!wq->queue)
|
||||
goto err_free_rqtpool;
|
||||
|
||||
|
@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
|
||||
goto done;
|
||||
|
||||
/* allocate dummy tail memory for all receive contexts */
|
||||
dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev, sizeof(u64),
|
||||
&dd->rcvhdrtail_dummy_dma,
|
||||
GFP_KERNEL);
|
||||
dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
sizeof(u64),
|
||||
&dd->rcvhdrtail_dummy_dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!dd->rcvhdrtail_dummy_kvaddr) {
|
||||
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
|
||||
@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
||||
gfp_flags = GFP_KERNEL;
|
||||
else
|
||||
gfp_flags = GFP_USER;
|
||||
rcd->rcvhdrq = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
|
||||
gfp_flags | __GFP_COMP);
|
||||
rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
|
||||
&rcd->rcvhdrq_dma,
|
||||
gfp_flags | __GFP_COMP);
|
||||
|
||||
if (!rcd->rcvhdrq) {
|
||||
dd_dev_err(dd,
|
||||
@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
||||
|
||||
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
|
||||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
|
||||
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev, PAGE_SIZE,
|
||||
&rcd->rcvhdrqtailaddr_dma, gfp_flags);
|
||||
rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&rcd->rcvhdrqtailaddr_dma,
|
||||
gfp_flags);
|
||||
if (!rcd->rcvhdrtail_kvaddr)
|
||||
goto bail_free;
|
||||
}
|
||||
@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
|
||||
while (alloced_bytes < rcd->egrbufs.size &&
|
||||
rcd->egrbufs.alloced < rcd->egrbufs.count) {
|
||||
rcd->egrbufs.buffers[idx].addr =
|
||||
dma_zalloc_coherent(&dd->pcidev->dev,
|
||||
rcd->egrbufs.rcvtid_size,
|
||||
&rcd->egrbufs.buffers[idx].dma,
|
||||
gfp_flags);
|
||||
dma_alloc_coherent(&dd->pcidev->dev,
|
||||
rcd->egrbufs.rcvtid_size,
|
||||
&rcd->egrbufs.buffers[idx].dma,
|
||||
gfp_flags);
|
||||
if (rcd->egrbufs.buffers[idx].addr) {
|
||||
rcd->egrbufs.buffers[idx].len =
|
||||
rcd->egrbufs.rcvtid_size;
|
||||
|
@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
|
||||
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
|
||||
|
||||
set_dev_node(&dd->pcidev->dev, i);
|
||||
dd->cr_base[i].va = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
bytes,
|
||||
&dd->cr_base[i].dma,
|
||||
GFP_KERNEL);
|
||||
dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
bytes,
|
||||
&dd->cr_base[i].dma,
|
||||
GFP_KERNEL);
|
||||
if (!dd->cr_base[i].va) {
|
||||
set_dev_node(&dd->pcidev->dev, dd->node);
|
||||
dd_dev_err(dd,
|
||||
|
@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
timer_setup(&sde->err_progress_check_timer,
|
||||
sdma_err_progress_check, 0);
|
||||
|
||||
sde->descq = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
descq_cnt * sizeof(u64[2]),
|
||||
&sde->descq_phys,
|
||||
GFP_KERNEL
|
||||
);
|
||||
sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
descq_cnt * sizeof(u64[2]),
|
||||
&sde->descq_phys, GFP_KERNEL);
|
||||
if (!sde->descq)
|
||||
goto bail;
|
||||
sde->tx_ring =
|
||||
@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
|
||||
dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
|
||||
/* Allocate memory for DMA of head registers to memory */
|
||||
dd->sdma_heads_dma = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
dd->sdma_heads_size,
|
||||
&dd->sdma_heads_phys,
|
||||
GFP_KERNEL
|
||||
);
|
||||
dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
dd->sdma_heads_size,
|
||||
&dd->sdma_heads_phys,
|
||||
GFP_KERNEL);
|
||||
if (!dd->sdma_heads_dma) {
|
||||
dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Allocate memory for pad */
|
||||
dd->sdma_pad_dma = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
sizeof(u32),
|
||||
&dd->sdma_pad_phys,
|
||||
GFP_KERNEL
|
||||
);
|
||||
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
|
||||
&dd->sdma_pad_phys, GFP_KERNEL);
|
||||
if (!dd->sdma_pad_dma) {
|
||||
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
|
||||
goto bail;
|
||||
|
@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
buf->npages = 1 << order;
|
||||
buf->page_shift = page_shift;
|
||||
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
|
||||
buf->direct.buf = dma_zalloc_coherent(dev,
|
||||
size, &t, GFP_KERNEL);
|
||||
buf->direct.buf = dma_alloc_coherent(dev, size, &t,
|
||||
GFP_KERNEL);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf = dma_zalloc_coherent(dev,
|
||||
page_size, &t,
|
||||
GFP_KERNEL);
|
||||
buf->page_list[i].buf = dma_alloc_coherent(dev,
|
||||
page_size,
|
||||
&t,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
|
||||
eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
|
||||
size = (eq->entries - eqe_alloc) * eq->eqe_size;
|
||||
}
|
||||
eq->buf[i] = dma_zalloc_coherent(dev, size,
|
||||
eq->buf[i] = dma_alloc_coherent(dev, size,
|
||||
&(eq->buf_dma[i]),
|
||||
GFP_KERNEL);
|
||||
if (!eq->buf[i])
|
||||
@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
|
||||
size = (eq->entries - eqe_alloc)
|
||||
* eq->eqe_size;
|
||||
}
|
||||
eq->buf[idx] = dma_zalloc_coherent(dev, size,
|
||||
&(eq->buf_dma[idx]),
|
||||
GFP_KERNEL);
|
||||
eq->buf[idx] = dma_alloc_coherent(dev, size,
|
||||
&(eq->buf_dma[idx]),
|
||||
GFP_KERNEL);
|
||||
if (!eq->buf[idx])
|
||||
goto err_dma_alloc_buf;
|
||||
|
||||
@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
|
||||
goto free_cmd_mbox;
|
||||
}
|
||||
|
||||
eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz,
|
||||
eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
|
||||
&(eq->buf_list->map),
|
||||
GFP_KERNEL);
|
||||
if (!eq->buf_list->buf) {
|
||||
|
@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
|
||||
if (!mem)
|
||||
return I40IW_ERR_PARAM;
|
||||
mem->size = ALIGN(size, alignment);
|
||||
mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
|
||||
(dma_addr_t *)&mem->pa, GFP_KERNEL);
|
||||
mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
|
||||
(dma_addr_t *)&mem->pa, GFP_KERNEL);
|
||||
if (!mem->va)
|
||||
return I40IW_ERR_NO_MEMORY;
|
||||
return 0;
|
||||
|
@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
|
||||
page = dev->db_tab->page + end;
|
||||
|
||||
alloc:
|
||||
page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
|
||||
&page->mapping, GFP_KERNEL);
|
||||
page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
|
||||
MTHCA_ICM_PAGE_SIZE, &page->mapping,
|
||||
GFP_KERNEL);
|
||||
if (!page->db_rec) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
|
||||
q->len = len;
|
||||
q->entry_size = entry_size;
|
||||
q->size = len * entry_size;
|
||||
q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
|
||||
&q->dma, GFP_KERNEL);
|
||||
q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
|
||||
GFP_KERNEL);
|
||||
if (!q->va)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
|
||||
return -ENOMEM;
|
||||
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
|
||||
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
|
||||
cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
|
||||
cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
|
||||
if (!cq->va) {
|
||||
status = -ENOMEM;
|
||||
goto mem_err;
|
||||
@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
|
||||
qp->sq.max_cnt = max_wqe_allocated;
|
||||
len = (hw_pages * hw_page_size);
|
||||
|
||||
qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
if (!qp->sq.va)
|
||||
return -EINVAL;
|
||||
qp->sq.len = len;
|
||||
@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
|
||||
qp->rq.max_cnt = max_rqe_allocated;
|
||||
len = (hw_pages * hw_page_size);
|
||||
|
||||
qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
if (!qp->rq.va)
|
||||
return -ENOMEM;
|
||||
qp->rq.pa = pa;
|
||||
@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
|
||||
if (dev->attr.ird == 0)
|
||||
return 0;
|
||||
|
||||
qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
|
||||
GFP_KERNEL);
|
||||
qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
|
||||
GFP_KERNEL);
|
||||
if (!qp->ird_q_va)
|
||||
return -ENOMEM;
|
||||
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
|
||||
|
@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
|
||||
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
|
||||
sizeof(struct ocrdma_rdma_stats_resp));
|
||||
|
||||
mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
if (!mem->va) {
|
||||
pr_err("%s: stats mbox allocation failed\n", __func__);
|
||||
return false;
|
||||
|
@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&ctx->mm_head);
|
||||
mutex_init(&ctx->mm_list_lock);
|
||||
|
||||
ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
|
||||
&ctx->ah_tbl.pa, GFP_KERNEL);
|
||||
ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
|
||||
&ctx->ah_tbl.pa, GFP_KERNEL);
|
||||
if (!ctx->ah_tbl.va) {
|
||||
kfree(ctx);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < mr->num_pbls; i++) {
|
||||
va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
|
||||
va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
|
||||
if (!va) {
|
||||
ocrdma_free_mr_pbl_tbl(dev, mr);
|
||||
status = -ENOMEM;
|
||||
|
@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < pbl_info->num_pbls; i++) {
|
||||
va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
|
||||
&pa, flags);
|
||||
va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
|
||||
flags);
|
||||
if (!va)
|
||||
goto err;
|
||||
|
||||
|
@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
||||
dev_info(&pdev->dev, "device version %d, driver version %d\n",
|
||||
dev->dsr_version, PVRDMA_VERSION);
|
||||
|
||||
dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
|
||||
&dev->dsrbase, GFP_KERNEL);
|
||||
dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
|
||||
&dev->dsrbase, GFP_KERNEL);
|
||||
if (!dev->dsr) {
|
||||
dev_err(&pdev->dev, "failed to allocate shared region\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
ts->pdev = pdev;
|
||||
|
||||
ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
|
||||
GFP_KERNEL);
|
||||
ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
|
||||
GFP_KERNEL);
|
||||
if (!ts->fw_regs_va) {
|
||||
dev_err(dev, "failed to dma_alloc_coherent\n");
|
||||
return -ENOMEM;
|
||||
|
@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
|
||||
|
||||
spin_lock_init(&dom->pgtlock);
|
||||
|
||||
dom->pgt_va = dma_zalloc_coherent(data->dev,
|
||||
M2701_IOMMU_PGT_SIZE,
|
||||
&dom->pgt_pa, GFP_KERNEL);
|
||||
dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
|
||||
&dom->pgt_pa, GFP_KERNEL);
|
||||
if (!dom->pgt_va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
|
||||
{
|
||||
struct device *dev = &cio2->pci_dev->dev;
|
||||
|
||||
q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
|
||||
GFP_KERNEL);
|
||||
q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
|
||||
GFP_KERNEL);
|
||||
if (!q->fbpt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
|
||||
struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
|
||||
struct device *dev = &ctx->dev->plat_dev->dev;
|
||||
|
||||
mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
|
||||
mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
|
||||
if (!mem->va) {
|
||||
mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
|
||||
size);
|
||||
|
@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
|
||||
if (get_order(size) >= MAX_ORDER)
|
||||
return NULL;
|
||||
|
||||
return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
|
||||
GFP_KERNEL);
|
||||
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
|
||||
|
@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host)
|
||||
* Use zalloc to zero the reserved high 32-bits of 128-bit
|
||||
* descriptors so that they never need to be written.
|
||||
*/
|
||||
buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
|
||||
host->adma_table_sz, &dma, GFP_KERNEL);
|
||||
buf = dma_alloc_coherent(mmc_dev(mmc),
|
||||
host->align_buffer_sz + host->adma_table_sz,
|
||||
&dma, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
|
||||
mmc_hostname(mmc));
|
||||
|
@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev)
|
||||
}
|
||||
|
||||
/* Allocate TX descriptor ring in coherent memory */
|
||||
greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
|
||||
&greth->tx_bd_base_phys,
|
||||
GFP_KERNEL);
|
||||
greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
|
||||
&greth->tx_bd_base_phys,
|
||||
GFP_KERNEL);
|
||||
if (!greth->tx_bd_base) {
|
||||
err = -ENOMEM;
|
||||
goto error3;
|
||||
}
|
||||
|
||||
/* Allocate RX descriptor ring in coherent memory */
|
||||
greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
|
||||
&greth->rx_bd_base_phys,
|
||||
GFP_KERNEL);
|
||||
greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
|
||||
&greth->rx_bd_base_phys,
|
||||
GFP_KERNEL);
|
||||
if (!greth->rx_bd_base) {
|
||||
err = -ENOMEM;
|
||||
goto error4;
|
||||
|
@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev)
|
||||
size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
|
||||
|
||||
for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
|
||||
descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr,
|
||||
GFP_KERNEL);
|
||||
descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
|
||||
GFP_KERNEL);
|
||||
if (!descs) {
|
||||
netdev_err(sdev->netdev,
|
||||
"failed to allocate status descriptors\n");
|
||||
@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev)
|
||||
struct slic_shmem_data *sm_data;
|
||||
dma_addr_t paddr;
|
||||
|
||||
sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
|
||||
&paddr, GFP_KERNEL);
|
||||
sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
|
||||
&paddr, GFP_KERNEL);
|
||||
if (!sm_data) {
|
||||
dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
|
||||
return -ENOMEM;
|
||||
@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev)
|
||||
int err = 0;
|
||||
u8 *mac[2];
|
||||
|
||||
eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
|
||||
&paddr, GFP_KERNEL);
|
||||
eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
|
||||
&paddr, GFP_KERNEL);
|
||||
if (!eeprom)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
|
||||
struct ena_com_admin_sq *sq = &queue->sq;
|
||||
u16 size = ADMIN_SQ_SIZE(queue->q_depth);
|
||||
|
||||
sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!sq->entries) {
|
||||
pr_err("memory allocation failed");
|
||||
@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
|
||||
struct ena_com_admin_cq *cq = &queue->cq;
|
||||
u16 size = ADMIN_CQ_SIZE(queue->q_depth);
|
||||
|
||||
cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!cq->entries) {
|
||||
pr_err("memory allocation failed");
|
||||
@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
|
||||
|
||||
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
|
||||
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
|
||||
aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!aenq->entries) {
|
||||
pr_err("memory allocation failed");
|
||||
@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
|
||||
dev_node = dev_to_node(ena_dev->dmadev);
|
||||
set_dev_node(ena_dev->dmadev, ctx->numa_node);
|
||||
io_sq->desc_addr.virt_addr =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, size,
|
||||
&io_sq->desc_addr.phys_addr,
|
||||
GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, size,
|
||||
&io_sq->desc_addr.phys_addr,
|
||||
GFP_KERNEL);
|
||||
set_dev_node(ena_dev->dmadev, dev_node);
|
||||
if (!io_sq->desc_addr.virt_addr) {
|
||||
io_sq->desc_addr.virt_addr =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, size,
|
||||
&io_sq->desc_addr.phys_addr,
|
||||
GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, size,
|
||||
&io_sq->desc_addr.phys_addr,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (!io_sq->desc_addr.virt_addr) {
|
||||
@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
|
||||
prev_node = dev_to_node(ena_dev->dmadev);
|
||||
set_dev_node(ena_dev->dmadev, ctx->numa_node);
|
||||
io_cq->cdesc_addr.virt_addr =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, size,
|
||||
&io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, size,
|
||||
&io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
|
||||
set_dev_node(ena_dev->dmadev, prev_node);
|
||||
if (!io_cq->cdesc_addr.virt_addr) {
|
||||
io_cq->cdesc_addr.virt_addr =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, size,
|
||||
&io_cq->cdesc_addr.phys_addr,
|
||||
GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, size,
|
||||
&io_cq->cdesc_addr.phys_addr,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (!io_cq->cdesc_addr.virt_addr) {
|
||||
@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
|
||||
struct ena_rss *rss = &ena_dev->rss;
|
||||
|
||||
rss->hash_key =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
|
||||
&rss->hash_key_dma_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
|
||||
&rss->hash_key_dma_addr, GFP_KERNEL);
|
||||
|
||||
if (unlikely(!rss->hash_key))
|
||||
return -ENOMEM;
|
||||
@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
|
||||
struct ena_rss *rss = &ena_dev->rss;
|
||||
|
||||
rss->hash_ctrl =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
|
||||
&rss->hash_ctrl_dma_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
|
||||
&rss->hash_ctrl_dma_addr, GFP_KERNEL);
|
||||
|
||||
if (unlikely(!rss->hash_ctrl))
|
||||
return -ENOMEM;
|
||||
@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
|
||||
sizeof(struct ena_admin_rss_ind_table_entry);
|
||||
|
||||
rss->rss_ind_tbl =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
|
||||
&rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, tbl_size,
|
||||
&rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
|
||||
if (unlikely(!rss->rss_ind_tbl))
|
||||
goto mem_err1;
|
||||
|
||||
@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
|
||||
|
||||
spin_lock_init(&mmio_read->lock);
|
||||
mmio_read->read_resp =
|
||||
dma_zalloc_coherent(ena_dev->dmadev,
|
||||
sizeof(*mmio_read->read_resp),
|
||||
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev,
|
||||
sizeof(*mmio_read->read_resp),
|
||||
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
|
||||
if (unlikely(!mmio_read->read_resp))
|
||||
goto err;
|
||||
|
||||
@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
|
||||
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
|
||||
|
||||
host_attr->host_info =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
|
||||
&host_attr->host_info_dma_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
|
||||
&host_attr->host_info_dma_addr, GFP_KERNEL);
|
||||
if (unlikely(!host_attr->host_info))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
|
||||
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
|
||||
|
||||
host_attr->debug_area_virt_addr =
|
||||
dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
|
||||
&host_attr->debug_area_dma_addr, GFP_KERNEL);
|
||||
dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
|
||||
&host_attr->debug_area_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!host_attr->debug_area_virt_addr)) {
|
||||
host_attr->debug_area_size = 0;
|
||||
return -ENOMEM;
|
||||
|
@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
|
||||
/* Packet buffers should be 64B aligned */
|
||||
pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
|
||||
GFP_ATOMIC);
|
||||
pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!pkt_buf)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
|
||||
ring->ndev = ndev;
|
||||
|
||||
size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
|
||||
ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
|
||||
GFP_KERNEL);
|
||||
ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ring->desc_addr)
|
||||
goto err;
|
||||
|
||||
|
@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
|
||||
alx->num_txq +
|
||||
sizeof(struct alx_rrd) * alx->rx_ringsz +
|
||||
sizeof(struct alx_rfd) * alx->rx_ringsz;
|
||||
alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
|
||||
alx->descmem.size,
|
||||
&alx->descmem.dma,
|
||||
GFP_KERNEL);
|
||||
alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
|
||||
alx->descmem.size,
|
||||
&alx->descmem.dma, GFP_KERNEL);
|
||||
if (!alx->descmem.virt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
|
||||
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
|
||||
8 * 4;
|
||||
|
||||
ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
|
||||
&ring_header->dma, GFP_KERNEL);
|
||||
ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
|
||||
&ring_header->dma, GFP_KERNEL);
|
||||
if (unlikely(!ring_header->desc)) {
|
||||
dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
|
||||
goto err_nomem;
|
||||
|
@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
|
||||
|
||||
/* allocate rx dma ring */
|
||||
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
|
||||
p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
|
||||
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out_freeirq_tx;
|
||||
@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
|
||||
|
||||
/* allocate tx dma ring */
|
||||
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
|
||||
p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
|
||||
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
|
||||
if (!p) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_rx_ring;
|
||||
@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
|
||||
|
||||
/* allocate rx dma ring */
|
||||
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
|
||||
p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
|
||||
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
|
||||
if (!p) {
|
||||
dev_err(kdev, "cannot allocate rx ring %u\n", size);
|
||||
ret = -ENOMEM;
|
||||
@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
|
||||
|
||||
/* allocate tx dma ring */
|
||||
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
|
||||
p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
|
||||
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
|
||||
if (!p) {
|
||||
dev_err(kdev, "cannot allocate tx ring\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -1506,8 +1506,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
|
||||
/* We just need one DMA descriptor which is DMA-able, since writing to
|
||||
* the port will allocate a new descriptor in its internal linked-list
|
||||
*/
|
||||
p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
|
||||
GFP_KERNEL);
|
||||
p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
|
||||
GFP_KERNEL);
|
||||
if (!p) {
|
||||
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
|
||||
return -ENOMEM;
|
||||
|
@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
|
||||
|
||||
/* Alloc ring of descriptors */
|
||||
size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
|
||||
ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
|
||||
&ring->dma_base,
|
||||
GFP_KERNEL);
|
||||
ring->cpu_base = dma_alloc_coherent(dma_dev, size,
|
||||
&ring->dma_base,
|
||||
GFP_KERNEL);
|
||||
if (!ring->cpu_base) {
|
||||
dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
|
||||
ring->mmio_base);
|
||||
@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
|
||||
|
||||
/* Alloc ring of descriptors */
|
||||
size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
|
||||
ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
|
||||
&ring->dma_base,
|
||||
GFP_KERNEL);
|
||||
ring->cpu_base = dma_alloc_coherent(dma_dev, size,
|
||||
&ring->dma_base,
|
||||
GFP_KERNEL);
|
||||
if (!ring->cpu_base) {
|
||||
dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
|
||||
ring->mmio_base);
|
||||
|
@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev)
|
||||
BNX2_SBLK_MSIX_ALIGN_SIZE);
|
||||
bp->status_stats_size = status_blk_size +
|
||||
sizeof(struct statistics_block);
|
||||
status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
|
||||
&bp->status_blk_mapping, GFP_KERNEL);
|
||||
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
|
||||
&bp->status_blk_mapping, GFP_KERNEL);
|
||||
if (!status_blk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2081,7 +2081,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
|
||||
bool is_pf);
|
||||
|
||||
#define BNX2X_ILT_ZALLOC(x, y, size) \
|
||||
x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
|
||||
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
|
||||
|
||||
#define BNX2X_ILT_FREE(x, y, size) \
|
||||
do { \
|
||||
|
@ -52,7 +52,7 @@ extern int bnx2x_num_queues;
|
||||
|
||||
#define BNX2X_PCI_ALLOC(y, size) \
|
||||
({ \
|
||||
void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
|
||||
void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
|
||||
if (x) \
|
||||
DP(NETIF_MSG_HW, \
|
||||
"BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
|
||||
|
@ -3449,10 +3449,10 @@ alloc_ext_stats:
|
||||
goto alloc_tx_ext_stats;
|
||||
|
||||
bp->hw_rx_port_stats_ext =
|
||||
dma_zalloc_coherent(&pdev->dev,
|
||||
sizeof(struct rx_port_stats_ext),
|
||||
&bp->hw_rx_port_stats_ext_map,
|
||||
GFP_KERNEL);
|
||||
dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(struct rx_port_stats_ext),
|
||||
&bp->hw_rx_port_stats_ext_map,
|
||||
GFP_KERNEL);
|
||||
if (!bp->hw_rx_port_stats_ext)
|
||||
return 0;
|
||||
|
||||
@ -3462,10 +3462,10 @@ alloc_tx_ext_stats:
|
||||
|
||||
if (bp->hwrm_spec_code >= 0x10902) {
|
||||
bp->hw_tx_port_stats_ext =
|
||||
dma_zalloc_coherent(&pdev->dev,
|
||||
sizeof(struct tx_port_stats_ext),
|
||||
&bp->hw_tx_port_stats_ext_map,
|
||||
GFP_KERNEL);
|
||||
dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(struct tx_port_stats_ext),
|
||||
&bp->hw_tx_port_stats_ext_map,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
|
||||
}
|
||||
|
@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
|
||||
|
||||
n = IEEE_8021QAZ_MAX_TCS;
|
||||
data_len = sizeof(*data) + sizeof(*fw_app) * n;
|
||||
data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
|
||||
GFP_KERNEL);
|
||||
data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
|
||||
GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
|
||||
&data_dma_addr, GFP_KERNEL);
|
||||
data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
|
||||
&data_dma_addr, GFP_KERNEL);
|
||||
if (!data_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
|
||||
if (!i && tg3_flag(tp, ENABLE_RSS))
|
||||
continue;
|
||||
|
||||
tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
|
||||
TG3_RX_RCB_RING_BYTES(tp),
|
||||
&tnapi->rx_rcb_mapping,
|
||||
GFP_KERNEL);
|
||||
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
|
||||
TG3_RX_RCB_RING_BYTES(tp),
|
||||
&tnapi->rx_rcb_mapping,
|
||||
GFP_KERNEL);
|
||||
if (!tnapi->rx_rcb)
|
||||
goto err_out;
|
||||
}
|
||||
@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
|
||||
{
|
||||
int i;
|
||||
|
||||
tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
|
||||
sizeof(struct tg3_hw_stats),
|
||||
&tp->stats_mapping, GFP_KERNEL);
|
||||
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
|
||||
sizeof(struct tg3_hw_stats),
|
||||
&tp->stats_mapping, GFP_KERNEL);
|
||||
if (!tp->hw_stats)
|
||||
goto err_out;
|
||||
|
||||
@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
|
||||
struct tg3_napi *tnapi = &tp->napi[i];
|
||||
struct tg3_hw_status *sblk;
|
||||
|
||||
tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
|
||||
TG3_HW_STATUS_SIZE,
|
||||
&tnapi->status_mapping,
|
||||
GFP_KERNEL);
|
||||
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
|
||||
TG3_HW_STATUS_SIZE,
|
||||
&tnapi->status_mapping,
|
||||
GFP_KERNEL);
|
||||
if (!tnapi->hw_status)
|
||||
goto err_out;
|
||||
|
||||
|
@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
|
||||
dmem->q_len = q_len;
|
||||
dmem->size = (desc_size * q_len) + align_bytes;
|
||||
/* Save address, need it while freeing */
|
||||
dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
|
||||
dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
|
||||
&dmem->dma, GFP_KERNEL);
|
||||
if (!dmem->unalign_base)
|
||||
return -ENOMEM;
|
||||
|
@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
|
||||
{
|
||||
size_t len = nelem * elem_size;
|
||||
void *s = NULL;
|
||||
void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
|
||||
void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
|
||||
{
|
||||
size_t len = nelem * elem_size + stat_size;
|
||||
void *s = NULL;
|
||||
void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL);
|
||||
void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
|
||||
* Allocate the hardware ring and PCI DMA bus address space for said.
|
||||
*/
|
||||
size_t hwlen = nelem * hwsize + stat_size;
|
||||
void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
|
||||
void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
|
||||
|
||||
if (!hwring)
|
||||
return NULL;
|
||||
|
@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
|
||||
total_size = buf_len;
|
||||
|
||||
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
|
||||
get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
get_fat_cmd.size,
|
||||
&get_fat_cmd.dma, GFP_ATOMIC);
|
||||
get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
get_fat_cmd.size,
|
||||
&get_fat_cmd.dma, GFP_ATOMIC);
|
||||
if (!get_fat_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
|
||||
return -EINVAL;
|
||||
|
||||
cmd.size = sizeof(struct be_cmd_resp_port_type);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
|
||||
return -ENOMEM;
|
||||
@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
|
||||
|
||||
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
|
||||
+ LANCER_FW_DOWNLOAD_CHUNK;
|
||||
flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
|
||||
&flash_cmd.dma, GFP_KERNEL);
|
||||
flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!flash_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
|
||||
}
|
||||
|
||||
flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
|
||||
flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!flash_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
|
||||
goto err;
|
||||
}
|
||||
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
|
||||
status = -ENOMEM;
|
||||
@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
|
||||
|
||||
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
|
||||
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
|
||||
attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
attribs_cmd.size,
|
||||
&attribs_cmd.dma, GFP_ATOMIC);
|
||||
attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
attribs_cmd.size,
|
||||
&attribs_cmd.dma, GFP_ATOMIC);
|
||||
if (!attribs_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
|
||||
status = -ENOMEM;
|
||||
@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
|
||||
|
||||
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
|
||||
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
|
||||
get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
get_mac_list_cmd.size,
|
||||
&get_mac_list_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
get_mac_list_cmd.size,
|
||||
&get_mac_list_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (!get_mac_list_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
|
||||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_KERNEL);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
|
||||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
|
||||
status = -ENOMEM;
|
||||
@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
|
||||
|
||||
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
|
||||
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
|
||||
extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
extfat_cmd.size, &extfat_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
extfat_cmd.size, &extfat_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!extfat_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
|
||||
|
||||
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
|
||||
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
|
||||
extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
extfat_cmd.size, &extfat_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
extfat_cmd.size, &extfat_cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (!extfat_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
|
||||
@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
|
||||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va) {
|
||||
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
|
||||
status = -ENOMEM;
|
||||
@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
|
||||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
|
||||
|
||||
memset(&cmd, 0, sizeof(struct be_dma_mem));
|
||||
cmd.size = sizeof(struct be_cmd_req_set_profile_config);
|
||||
cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
|
||||
GFP_ATOMIC);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
|
||||
int status = 0;
|
||||
|
||||
read_cmd.size = LANCER_READ_FILE_CHUNK;
|
||||
read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
|
||||
&read_cmd.dma, GFP_ATOMIC);
|
||||
read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
|
||||
&read_cmd.dma, GFP_ATOMIC);
|
||||
|
||||
if (!read_cmd.va) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
|
||||
}
|
||||
|
||||
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
|
||||
cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
|
||||
cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
|
||||
if (!cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
|
||||
};
|
||||
|
||||
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
|
||||
ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
ddrdma_cmd.size, &ddrdma_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
ddrdma_cmd.size, &ddrdma_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
if (!ddrdma_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev,
|
||||
|
||||
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
|
||||
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
|
||||
eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
|
||||
eeprom_cmd.size, &eeprom_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
|
||||
eeprom_cmd.size, &eeprom_cmd.dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!eeprom_cmd.va)
|
||||
return -ENOMEM;
|
||||
|
@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
|
||||
q->len = len;
|
||||
q->entry_size = entry_size;
|
||||
mem->size = len * entry_size;
|
||||
mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
|
||||
GFP_KERNEL);
|
||||
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
|
||||
&mem->dma, GFP_KERNEL);
|
||||
if (!mem->va)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter)
|
||||
int status = 0;
|
||||
|
||||
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
|
||||
mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
|
||||
&mbox_mem_alloc->dma,
|
||||
GFP_KERNEL);
|
||||
mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
|
||||
&mbox_mem_alloc->dma,
|
||||
GFP_KERNEL);
|
||||
if (!mbox_mem_alloc->va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter)
|
||||
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
|
||||
|
||||
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
|
||||
rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
|
||||
&rx_filter->dma, GFP_KERNEL);
|
||||
rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
|
||||
&rx_filter->dma, GFP_KERNEL);
|
||||
if (!rx_filter->va) {
|
||||
status = -ENOMEM;
|
||||
goto free_mbox;
|
||||
@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter)
|
||||
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
|
||||
else
|
||||
stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
|
||||
stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
|
||||
&stats_cmd->dma, GFP_KERNEL);
|
||||
stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
|
||||
&stats_cmd->dma, GFP_KERNEL);
|
||||
if (!stats_cmd->va) {
|
||||
status = -ENOMEM;
|
||||
goto free_rx_filter;
|
||||
|
@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate descriptors */
|
||||
priv->rxdes = dma_zalloc_coherent(priv->dev,
|
||||
MAX_RX_QUEUE_ENTRIES *
|
||||
sizeof(struct ftgmac100_rxdes),
|
||||
&priv->rxdes_dma, GFP_KERNEL);
|
||||
priv->rxdes = dma_alloc_coherent(priv->dev,
|
||||
MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
|
||||
&priv->rxdes_dma, GFP_KERNEL);
|
||||
if (!priv->rxdes)
|
||||
return -ENOMEM;
|
||||
priv->txdes = dma_zalloc_coherent(priv->dev,
|
||||
MAX_TX_QUEUE_ENTRIES *
|
||||
sizeof(struct ftgmac100_txdes),
|
||||
&priv->txdes_dma, GFP_KERNEL);
|
||||
priv->txdes = dma_alloc_coherent(priv->dev,
|
||||
MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
|
||||
&priv->txdes_dma, GFP_KERNEL);
|
||||
if (!priv->txdes)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
priv->descs = dma_zalloc_coherent(priv->dev,
|
||||
sizeof(struct ftmac100_descs),
|
||||
&priv->descs_dma_addr,
|
||||
GFP_KERNEL);
|
||||
priv->descs = dma_alloc_coherent(priv->dev,
|
||||
sizeof(struct ftmac100_descs),
|
||||
&priv->descs_dma_addr, GFP_KERNEL);
|
||||
if (!priv->descs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
|
||||
|
||||
for (i = 0; i < QUEUE_NUMS; i++) {
|
||||
size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
|
||||
virt_addr = dma_zalloc_coherent(dev, size, &phys_addr,
|
||||
GFP_KERNEL);
|
||||
virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
|
||||
GFP_KERNEL);
|
||||
if (virt_addr == NULL)
|
||||
goto error_free_pool;
|
||||
|
||||
|
@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
|
||||
{
|
||||
int size = ring->desc_num * sizeof(ring->desc[0]);
|
||||
|
||||
ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
|
||||
&ring->desc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
|
||||
&ring->desc_dma_addr, GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
|
||||
{
|
||||
int size = ring->desc_num * sizeof(struct hclge_desc);
|
||||
|
||||
ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
|
||||
size, &ring->desc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
|
||||
&ring->desc_dma_addr, GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
|
||||
{
|
||||
int size = ring->desc_num * sizeof(struct hclgevf_desc);
|
||||
|
||||
ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
|
||||
size, &ring->desc_dma_addr,
|
||||
GFP_KERNEL);
|
||||
ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
|
||||
&ring->desc_dma_addr, GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
|
||||
u8 *cmd_vaddr;
|
||||
int err = 0;
|
||||
|
||||
cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
|
||||
&cmd_paddr, GFP_KERNEL);
|
||||
cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
|
||||
&cmd_paddr, GFP_KERNEL);
|
||||
if (!cmd_vaddr) {
|
||||
dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
|
||||
return -ENOMEM;
|
||||
@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
|
||||
dma_addr_t node_paddr;
|
||||
int err;
|
||||
|
||||
node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
|
||||
&node_paddr, GFP_KERNEL);
|
||||
node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
|
||||
GFP_KERNEL);
|
||||
if (!node) {
|
||||
dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
|
||||
return -ENOMEM;
|
||||
@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
|
||||
if (!chain->cell_ctxt)
|
||||
return -ENOMEM;
|
||||
|
||||
chain->wb_status = dma_zalloc_coherent(&pdev->dev,
|
||||
sizeof(*chain->wb_status),
|
||||
&chain->wb_status_paddr,
|
||||
GFP_KERNEL);
|
||||
chain->wb_status = dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(*chain->wb_status),
|
||||
&chain->wb_status_paddr,
|
||||
GFP_KERNEL);
|
||||
if (!chain->wb_status) {
|
||||
dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
|
||||
return -ENOMEM;
|
||||
|
@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
|
||||
}
|
||||
|
||||
for (pg = 0; pg < eq->num_pages; pg++) {
|
||||
eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
|
||||
eq->page_size,
|
||||
&eq->dma_addr[pg],
|
||||
GFP_KERNEL);
|
||||
eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
|
||||
eq->page_size,
|
||||
&eq->dma_addr[pg],
|
||||
GFP_KERNEL);
|
||||
if (!eq->virt_addr[pg]) {
|
||||
err = -ENOMEM;
|
||||
goto err_dma_alloc;
|
||||
|
@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
|
||||
goto err_sq_db;
|
||||
}
|
||||
|
||||
ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
|
||||
&func_to_io->ci_dma_base,
|
||||
GFP_KERNEL);
|
||||
ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
|
||||
&func_to_io->ci_dma_base,
|
||||
GFP_KERNEL);
|
||||
if (!ci_addr_base) {
|
||||
dev_err(&pdev->dev, "Failed to allocate CI area\n");
|
||||
err = -ENOMEM;
|
||||
|
@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
|
||||
goto err_cqe_dma_arr_alloc;
|
||||
|
||||
for (i = 0; i < wq->q_depth; i++) {
|
||||
rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
|
||||
sizeof(*rq->cqe[i]),
|
||||
&rq->cqe_dma[i], GFP_KERNEL);
|
||||
rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(*rq->cqe[i]),
|
||||
&rq->cqe_dma[i], GFP_KERNEL);
|
||||
if (!rq->cqe[i])
|
||||
goto err_cqe_alloc;
|
||||
}
|
||||
@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
|
||||
|
||||
/* HW requirements: Must be at least 32 bit */
|
||||
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
|
||||
rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
|
||||
&rq->pi_dma_addr, GFP_KERNEL);
|
||||
rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
|
||||
&rq->pi_dma_addr, GFP_KERNEL);
|
||||
if (!rq->pi_virt_addr) {
|
||||
dev_err(&pdev->dev, "Failed to allocate PI address\n");
|
||||
err = -ENOMEM;
|
||||
|
@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
|
||||
struct pci_dev *pdev = hwif->pdev;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
*vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
|
||||
GFP_KERNEL);
|
||||
*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!*vaddr) {
|
||||
dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
|
||||
return -ENOMEM;
|
||||
@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
|
||||
u64 *paddr = &wq->block_vaddr[i];
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
*vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
|
||||
&dma_addr, GFP_KERNEL);
|
||||
*vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
|
||||
&dma_addr, GFP_KERNEL);
|
||||
if (!*vaddr) {
|
||||
dev_err(&pdev->dev, "Failed to allocate wq page\n");
|
||||
goto err_alloc_wq_pages;
|
||||
|
@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev)
|
||||
bd_size = sizeof(struct mal_descriptor) *
|
||||
(NUM_TX_BUFF * mal->num_tx_chans +
|
||||
NUM_RX_BUFF * mal->num_rx_chans);
|
||||
mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
|
||||
GFP_KERNEL);
|
||||
mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
|
||||
GFP_KERNEL);
|
||||
if (mal->bd_virt == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto fail_unmap;
|
||||
|
@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
||||
|
||||
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
|
||||
txdr->size = ALIGN(txdr->size, 4096);
|
||||
txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
|
||||
GFP_KERNEL);
|
||||
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
|
||||
GFP_KERNEL);
|
||||
if (!txdr->desc) {
|
||||
ret_val = 2;
|
||||
goto err_nomem;
|
||||
@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
||||
}
|
||||
|
||||
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
|
||||
rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
|
||||
GFP_KERNEL);
|
||||
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxdr->desc) {
|
||||
ret_val = 6;
|
||||
goto err_nomem;
|
||||
|
@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
|
||||
ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
|
||||
GFP_KERNEL);
|
||||
ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
|
||||
GFP_KERNEL);
|
||||
if (!ring->desc)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
|
||||
struct i40e_pf *pf = (struct i40e_pf *)hw->back;
|
||||
|
||||
mem->size = ALIGN(size, alignment);
|
||||
mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
|
||||
GFP_KERNEL);
|
||||
if (!mem->va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
|
||||
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
|
||||
txdr->size = ALIGN(txdr->size, 4096);
|
||||
|
||||
txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
|
||||
GFP_KERNEL);
|
||||
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
|
||||
GFP_KERNEL);
|
||||
if (!txdr->desc) {
|
||||
vfree(txdr->buffer_info);
|
||||
return -ENOMEM;
|
||||
@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
|
||||
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
|
||||
rxdr->size = ALIGN(rxdr->size, 4096);
|
||||
|
||||
rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
|
||||
GFP_KERNEL);
|
||||
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!rxdr->desc) {
|
||||
vfree(rxdr->buffer_info);
|
||||
|
@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
|
||||
u32 txq_dma;
|
||||
|
||||
/* Allocate memory for TX descriptors */
|
||||
aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
|
||||
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
|
||||
&aggr_txq->descs_dma, GFP_KERNEL);
|
||||
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
|
||||
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
|
||||
&aggr_txq->descs_dma, GFP_KERNEL);
|
||||
if (!aggr_txq->descs)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -64,7 +64,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
|
||||
|
||||
qmem->entry_sz = entry_sz;
|
||||
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
|
||||
qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
|
||||
qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
|
||||
&qmem->iova, GFP_KERNEL);
|
||||
if (!qmem->base)
|
||||
return -ENOMEM;
|
||||
|
@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
|
||||
* table is full.
|
||||
*/
|
||||
if (!pep->htpr) {
|
||||
pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
|
||||
HASH_ADDR_TABLE_SIZE,
|
||||
&pep->htpr_dma, GFP_KERNEL);
|
||||
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
|
||||
HASH_ADDR_TABLE_SIZE,
|
||||
&pep->htpr_dma, GFP_KERNEL);
|
||||
if (!pep->htpr)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev)
|
||||
pep->rx_desc_count = 0;
|
||||
size = pep->rx_ring_size * sizeof(struct rx_desc);
|
||||
pep->rx_desc_area_size = size;
|
||||
pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
|
||||
&pep->rx_desc_dma,
|
||||
GFP_KERNEL);
|
||||
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
|
||||
&pep->rx_desc_dma,
|
||||
GFP_KERNEL);
|
||||
if (!pep->p_rx_desc_area)
|
||||
goto out;
|
||||
|
||||
@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev)
|
||||
pep->tx_desc_count = 0;
|
||||
size = pep->tx_ring_size * sizeof(struct tx_desc);
|
||||
pep->tx_desc_area_size = size;
|
||||
pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
|
||||
&pep->tx_desc_dma,
|
||||
GFP_KERNEL);
|
||||
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
|
||||
&pep->tx_desc_dma,
|
||||
GFP_KERNEL);
|
||||
if (!pep->p_tx_desc_area)
|
||||
goto out;
|
||||
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
|
||||
|
@ -598,10 +598,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
|
||||
eth->scratch_ring = dma_zalloc_coherent(eth->dev,
|
||||
cnt * sizeof(struct mtk_tx_dma),
|
||||
ð->phy_scratch_ring,
|
||||
GFP_ATOMIC);
|
||||
eth->scratch_ring = dma_alloc_coherent(eth->dev,
|
||||
cnt * sizeof(struct mtk_tx_dma),
|
||||
ð->phy_scratch_ring,
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!eth->scratch_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1213,8 +1213,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
if (!ring->buf)
|
||||
goto no_tx_mem;
|
||||
|
||||
ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
|
||||
&ring->phys, GFP_ATOMIC);
|
||||
ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
|
||||
&ring->phys, GFP_ATOMIC);
|
||||
if (!ring->dma)
|
||||
goto no_tx_mem;
|
||||
|
||||
@ -1310,9 +1310,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ring->dma = dma_zalloc_coherent(eth->dev,
|
||||
rx_dma_size * sizeof(*ring->dma),
|
||||
&ring->phys, GFP_ATOMIC);
|
||||
ring->dma = dma_alloc_coherent(eth->dev,
|
||||
rx_dma_size * sizeof(*ring->dma),
|
||||
&ring->phys, GFP_ATOMIC);
|
||||
if (!ring->dma)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
|
||||
buf->npages = 1;
|
||||
buf->page_shift = get_order(size) + PAGE_SHIFT;
|
||||
buf->direct.buf =
|
||||
dma_zalloc_coherent(&dev->persist->pdev->dev,
|
||||
size, &t, GFP_KERNEL);
|
||||
dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
|
||||
GFP_KERNEL);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf =
|
||||
dma_zalloc_coherent(&dev->persist->pdev->dev,
|
||||
PAGE_SIZE, &t, GFP_KERNEL);
|
||||
dma_alloc_coherent(&dev->persist->pdev->dev,
|
||||
PAGE_SIZE, &t, GFP_KERNEL);
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
|
@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
|
||||
mutex_lock(&priv->alloc_mutex);
|
||||
original_node = dev_to_node(&dev->pdev->dev);
|
||||
set_dev_node(&dev->pdev->dev, node);
|
||||
cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
|
||||
dma_handle, GFP_KERNEL);
|
||||
cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
|
||||
GFP_KERNEL);
|
||||
set_dev_node(&dev->pdev->dev, original_node);
|
||||
mutex_unlock(&priv->alloc_mutex);
|
||||
return cpu_handle;
|
||||
|
@ -1789,8 +1789,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
|
||||
{
|
||||
struct device *ddev = &dev->pdev->dev;
|
||||
|
||||
cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
|
||||
&cmd->alloc_dma, GFP_KERNEL);
|
||||
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
|
||||
&cmd->alloc_dma, GFP_KERNEL);
|
||||
if (!cmd->cmd_alloc_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1804,9 +1804,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
|
||||
|
||||
dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
|
||||
cmd->alloc_dma);
|
||||
cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
|
||||
2 * MLX5_ADAPTER_PAGE_SIZE - 1,
|
||||
&cmd->alloc_dma, GFP_KERNEL);
|
||||
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
|
||||
2 * MLX5_ADAPTER_PAGE_SIZE - 1,
|
||||
&cmd->alloc_dma, GFP_KERNEL);
|
||||
if (!cmd->cmd_alloc_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
|
||||
for (i = 0; i < mgp->num_slices; i++) {
|
||||
ss = &mgp->ss[i];
|
||||
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
|
||||
ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
|
||||
&ss->rx_done.bus,
|
||||
GFP_KERNEL);
|
||||
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
|
||||
&ss->rx_done.bus,
|
||||
GFP_KERNEL);
|
||||
if (ss->rx_done.entry == NULL)
|
||||
goto abort;
|
||||
bytes = sizeof(*ss->fw_stats);
|
||||
|
@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
|
||||
tx_ring->cnt = dp->txd_cnt;
|
||||
|
||||
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
|
||||
tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
|
||||
&tx_ring->dma,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
|
||||
&tx_ring->dma,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!tx_ring->txds) {
|
||||
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
|
||||
tx_ring->cnt);
|
||||
@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
|
||||
|
||||
rx_ring->cnt = dp->rxd_cnt;
|
||||
rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
|
||||
rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
|
||||
&rx_ring->dma,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
|
||||
&rx_ring->dma,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!rx_ring->rxds) {
|
||||
netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
|
||||
rx_ring->cnt);
|
||||
|
@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
|
||||
priv->rx_bd_ci = 0;
|
||||
|
||||
/* Allocate the Tx and Rx buffer descriptors. */
|
||||
priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
|
||||
sizeof(*priv->tx_bd_v) * TX_BD_NUM,
|
||||
&priv->tx_bd_p, GFP_KERNEL);
|
||||
priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
|
||||
sizeof(*priv->tx_bd_v) * TX_BD_NUM,
|
||||
&priv->tx_bd_p, GFP_KERNEL);
|
||||
if (!priv->tx_bd_v)
|
||||
goto out;
|
||||
|
||||
@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
|
||||
if (!priv->tx_skb)
|
||||
goto out;
|
||||
|
||||
priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
|
||||
sizeof(*priv->rx_bd_v) * RX_BD_NUM,
|
||||
&priv->rx_bd_p, GFP_KERNEL);
|
||||
priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
|
||||
sizeof(*priv->rx_bd_v) * RX_BD_NUM,
|
||||
&priv->rx_bd_p, GFP_KERNEL);
|
||||
if (!priv->rx_bd_v)
|
||||
goto out;
|
||||
|
||||
|
@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
|
||||
|
||||
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
|
||||
rx_ring->rx_buff_pool =
|
||||
dma_zalloc_coherent(&pdev->dev, size,
|
||||
&rx_ring->rx_buff_pool_logic, GFP_KERNEL);
|
||||
dma_alloc_coherent(&pdev->dev, size,
|
||||
&rx_ring->rx_buff_pool_logic, GFP_KERNEL);
|
||||
if (!rx_ring->rx_buff_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
|
||||
|
||||
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
|
||||
|
||||
tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
|
||||
&tx_ring->dma, GFP_KERNEL);
|
||||
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
|
||||
&tx_ring->dma, GFP_KERNEL);
|
||||
if (!tx_ring->desc) {
|
||||
vfree(tx_ring->buffer_info);
|
||||
return -ENOMEM;
|
||||
@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
|
||||
return -ENOMEM;
|
||||
|
||||
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
|
||||
rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
|
||||
&rx_ring->dma, GFP_KERNEL);
|
||||
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
|
||||
&rx_ring->dma, GFP_KERNEL);
|
||||
if (!rx_ring->desc) {
|
||||
vfree(rx_ring->buffer_info);
|
||||
return -ENOMEM;
|
||||
|
@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
|
||||
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
|
||||
goto out_ring_desc;
|
||||
|
||||
ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
|
||||
RX_RING_SIZE * sizeof(u64),
|
||||
&ring->buf_dma, GFP_KERNEL);
|
||||
ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
|
||||
RX_RING_SIZE * sizeof(u64),
|
||||
&ring->buf_dma, GFP_KERNEL);
|
||||
if (!ring->buffers)
|
||||
goto out_ring_desc;
|
||||
|
||||
|
@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
|
||||
u32 size = min_t(u32, total_size, psz);
|
||||
void **p_virt = &p_mngr->t2[i].p_virt;
|
||||
|
||||
*p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
size, &p_mngr->t2[i].p_phys,
|
||||
GFP_KERNEL);
|
||||
*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
|
||||
&p_mngr->t2[i].p_phys,
|
||||
GFP_KERNEL);
|
||||
if (!p_mngr->t2[i].p_virt) {
|
||||
rc = -ENOMEM;
|
||||
goto t2_fail;
|
||||
@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
|
||||
u32 size;
|
||||
|
||||
size = min_t(u32, sz_left, p_blk->real_size_in_page);
|
||||
p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
|
||||
&p_phys, GFP_KERNEL);
|
||||
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
|
||||
&p_phys, GFP_KERNEL);
|
||||
if (!p_virt)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
|
||||
goto out0;
|
||||
}
|
||||
|
||||
p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
p_blk->real_size_in_page, &p_phys,
|
||||
GFP_KERNEL);
|
||||
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
p_blk->real_size_in_page, &p_phys,
|
||||
GFP_KERNEL);
|
||||
if (!p_virt) {
|
||||
rc = -ENOMEM;
|
||||
goto out1;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user