mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
RDMA: Add ib_virt_dma_to_page()
Make it clearer what is going on by adding a function to go back from the "virtual" dma_addr to a kva and another to a struct page. This is used in the ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib). Call them instead of a naked casting and virt_to_page() when working with dma_addr values encoded by the various ib_map functions. This also fixes the virt_to_page() casting problem Linus Walleij has been chasing. Cc: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/0-v2-05ea785520ed+10-ib_virt_page_jgg@nvidia.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
b2b1ddc457
commit
8d7c7c0eeb
@ -210,10 +210,10 @@ err1:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
|
||||
static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
|
||||
{
|
||||
struct rxe_mr *mr = to_rmr(ibmr);
|
||||
struct page *page = virt_to_page(iova & mr->page_mask);
|
||||
struct page *page = ib_virt_dma_to_page(dma_addr);
|
||||
bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
|
||||
int err;
|
||||
|
||||
@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
|
||||
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
|
||||
unsigned int length, enum rxe_mr_copy_dir dir)
|
||||
{
|
||||
unsigned int page_offset = iova & (PAGE_SIZE - 1);
|
||||
unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
|
||||
unsigned int bytes;
|
||||
struct page *page;
|
||||
u8 *va;
|
||||
|
||||
while (length) {
|
||||
page = virt_to_page(iova & mr->page_mask);
|
||||
page = ib_virt_dma_to_page(dma_addr);
|
||||
bytes = min_t(unsigned int, length,
|
||||
PAGE_SIZE - page_offset);
|
||||
va = kmap_local_page(page);
|
||||
@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
|
||||
|
||||
kunmap_local(va);
|
||||
page_offset = 0;
|
||||
iova += bytes;
|
||||
dma_addr += bytes;
|
||||
addr += bytes;
|
||||
length -= bytes;
|
||||
}
|
||||
@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
|
||||
|
||||
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
|
||||
page_offset = iova & (PAGE_SIZE - 1);
|
||||
page = virt_to_page(iova & PAGE_MASK);
|
||||
page = ib_virt_dma_to_page(iova);
|
||||
} else {
|
||||
unsigned long index;
|
||||
int err;
|
||||
@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
|
||||
|
||||
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
|
||||
page_offset = iova & (PAGE_SIZE - 1);
|
||||
page = virt_to_page(iova & PAGE_MASK);
|
||||
page = ib_virt_dma_to_page(iova);
|
||||
} else {
|
||||
unsigned long index;
|
||||
int err;
|
||||
|
@ -760,7 +760,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ibwr->num_sge; i++, sge++) {
|
||||
memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
|
||||
memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
|
||||
p += sge->length;
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
|
||||
break;
|
||||
|
||||
bytes = min(bytes, len);
|
||||
if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
|
||||
if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
|
||||
bytes) {
|
||||
copied += bytes;
|
||||
offset += bytes;
|
||||
@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp)
|
||||
mem_p = *mem;
|
||||
if (mem_p->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(uintptr_t)(sge->laddr + frx->sge_off),
|
||||
ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
|
||||
sge_bytes);
|
||||
else if (!mem_p->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem_p->umem,
|
||||
@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp)
|
||||
|
||||
if (mem_p->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(uintptr_t)(sge->laddr + wqe->processed),
|
||||
ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
|
||||
bytes);
|
||||
else if (!mem_p->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
|
||||
|
@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
|
||||
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
|
||||
|
||||
if (paddr)
|
||||
return virt_to_page((void *)(uintptr_t)paddr);
|
||||
return ib_virt_dma_to_page(paddr);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
|
||||
|
||||
if (!mem->mem_obj) {
|
||||
/* Kernel client using kva */
|
||||
memcpy(paddr,
|
||||
(const void *)(uintptr_t)sge->laddr, bytes);
|
||||
memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
|
||||
} else if (c_tx->in_syscall) {
|
||||
if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
|
||||
bytes))
|
||||
@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
* or memory region with assigned kernel buffer
|
||||
*/
|
||||
iov[seg].iov_base =
|
||||
(void *)(uintptr_t)(sge->laddr + sge_off);
|
||||
ib_virt_dma_to_ptr(sge->laddr + sge_off);
|
||||
iov[seg].iov_len = sge_len;
|
||||
|
||||
if (do_crc)
|
||||
@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
* Cast to an uintptr_t to preserve all 64 bits
|
||||
* in sge->laddr.
|
||||
*/
|
||||
uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
|
||||
u64 va = sge->laddr + sge_off;
|
||||
|
||||
/*
|
||||
* virt_to_page() takes a (void *) pointer
|
||||
* so cast to a (void *) meaning it will be 64
|
||||
* bits on a 64 bit platform and 32 bits on a
|
||||
* 32 bit platform.
|
||||
*/
|
||||
page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
|
||||
page_array[seg] = ib_virt_dma_to_page(va);
|
||||
if (do_crc)
|
||||
crypto_shash_update(
|
||||
c_tx->mpa_crc_hd,
|
||||
(void *)va,
|
||||
ib_virt_dma_to_ptr(va),
|
||||
plen);
|
||||
}
|
||||
|
||||
|
@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
|
||||
bytes = -EINVAL;
|
||||
break;
|
||||
}
|
||||
memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
|
||||
memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
|
||||
core_sge->length);
|
||||
|
||||
kbuf += core_sge->length;
|
||||
@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
|
||||
}
|
||||
siw_dbg_mem(mem,
|
||||
"sge[%d], size %u, addr 0x%p, total %lu\n",
|
||||
i, pble->size, (void *)(uintptr_t)pble->addr,
|
||||
i, pble->size, ib_virt_dma_to_ptr(pble->addr),
|
||||
pbl_size);
|
||||
}
|
||||
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
|
||||
|
@ -4035,6 +4035,31 @@ static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
|
||||
return dma_pci_p2pdma_supported(dev->dma_device);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
|
||||
* @dma_addr: The DMA address
|
||||
*
|
||||
* Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
|
||||
* going through the dma_addr marshalling.
|
||||
*/
|
||||
static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
|
||||
{
|
||||
/* virt_dma mode maps the kvs's directly into the dma addr */
|
||||
return (void *)(uintptr_t)dma_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_virt_dma_to_page - Convert a dma_addr to a struct page
|
||||
* @dma_addr: The DMA address
|
||||
*
|
||||
* Used by ib_uses_virt_dma() device to get back to the struct page after going
|
||||
* through the dma_addr marshalling.
|
||||
*/
|
||||
static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
|
||||
{
|
||||
return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_dma_mapping_error - check a DMA addr for error
|
||||
* @dev: The device for which the dma_addr was created
|
||||
|
Loading…
Reference in New Issue
Block a user