forked from Minki/linux
RDMA: Use the sg_table directly and remove the opencoded version from umem
This allows using the normal sg_table APIs and makes all the code cleaner. Remove sgt, nents and nmapd from ib_umem. Link: https://lore.kernel.org/r/20210824142531.3877007-4-maorg@nvidia.com Signed-off-by: Maor Gottlieb <maorg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
3e302dbc67
commit
79fbd3e124
@ -51,11 +51,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
||||
struct scatterlist *sg;
|
||||
unsigned int i;
|
||||
|
||||
if (umem->nmap > 0)
|
||||
ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dirty)
|
||||
ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
|
||||
for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
|
||||
unpin_user_page_range_dirty_lock(sg_page(sg),
|
||||
DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
|
||||
|
||||
@ -111,7 +111,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
||||
/* offset into first SGL */
|
||||
pgoff = umem->address & ~PAGE_MASK;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
|
||||
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
|
||||
/* Walk SGL and reduce max page size if VA/PA bits differ
|
||||
* for any address.
|
||||
*/
|
||||
@ -121,7 +121,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
||||
* the maximum possible page size as the low bits of the iova
|
||||
* must be zero when starting the next chunk.
|
||||
*/
|
||||
if (i != (umem->nmap - 1))
|
||||
if (i != (umem->sgt_append.sgt.nents - 1))
|
||||
mask |= va;
|
||||
pgoff = 0;
|
||||
}
|
||||
@ -231,30 +231,19 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
|
||||
&umem->sgt_append, page_list, pinned, 0,
|
||||
pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
|
||||
npages, GFP_KERNEL);
|
||||
umem->sg_nents = umem->sgt_append.sgt.nents;
|
||||
if (ret) {
|
||||
memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt,
|
||||
sizeof(umem->sgt_append.sgt));
|
||||
unpin_user_pages_dirty_lock(page_list, pinned, 0);
|
||||
goto umem_release;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt,
|
||||
sizeof(umem->sgt_append.sgt));
|
||||
if (access & IB_ACCESS_RELAXED_ORDERING)
|
||||
dma_attr |= DMA_ATTR_WEAK_ORDERING;
|
||||
|
||||
umem->nmap =
|
||||
ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
|
||||
DMA_BIDIRECTIONAL, dma_attr);
|
||||
|
||||
if (!umem->nmap) {
|
||||
ret = -ENOMEM;
|
||||
ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
|
||||
DMA_BIDIRECTIONAL, dma_attr);
|
||||
if (ret)
|
||||
goto umem_release;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
umem_release:
|
||||
@ -314,7 +303,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
|
||||
ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
|
||||
umem->sgt_append.sgt.orig_nents, dst, length,
|
||||
offset + ib_umem_offset(umem));
|
||||
|
||||
if (ret < 0)
|
||||
|
@ -55,9 +55,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
|
||||
cur += sg_dma_len(sg);
|
||||
}
|
||||
|
||||
umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
|
||||
umem_dmabuf->umem.sg_head.nents = nmap;
|
||||
umem_dmabuf->umem.nmap = nmap;
|
||||
umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
|
||||
umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
|
||||
umem_dmabuf->sgt = sgt;
|
||||
|
||||
wait_fence:
|
||||
|
@ -42,8 +42,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
|
||||
|
||||
found:
|
||||
offset = virt - page_addr;
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
|
||||
db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
|
||||
db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
|
||||
db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
|
||||
db->u.user_page = page;
|
||||
refcount_inc(&page->refcount);
|
||||
|
||||
|
@ -2235,7 +2235,7 @@ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
|
||||
pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
|
||||
|
||||
if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
|
||||
iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
|
||||
iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
|
||||
|
||||
rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
|
||||
*pbl = rdma_block_iter_dma_address(&biter);
|
||||
|
@ -75,7 +75,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
|
||||
list_add(&page->list, &context->db_page_list);
|
||||
|
||||
found:
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
|
||||
db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
|
||||
(virt & ~PAGE_MASK);
|
||||
db->u.user_page = page;
|
||||
++page->refcnt;
|
||||
|
||||
|
@ -200,7 +200,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
|
||||
mtt_shift = mtt->page_shift;
|
||||
mtt_size = 1ULL << mtt_shift;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
|
||||
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
|
||||
if (cur_start_addr + len == sg_dma_address(sg)) {
|
||||
/* still the same block */
|
||||
len += sg_dma_len(sg);
|
||||
@ -273,7 +273,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
|
||||
|
||||
*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
|
||||
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
|
||||
/*
|
||||
* Initialization - save the first chunk start as the
|
||||
* current_block_start - block means contiguous pages.
|
||||
|
@ -78,7 +78,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
|
||||
list_add(&page->list, &context->db_page_list);
|
||||
|
||||
found:
|
||||
db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
|
||||
db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
|
||||
(virt & ~PAGE_MASK);
|
||||
db->u.user_page = page;
|
||||
++page->refcnt;
|
||||
|
||||
|
@ -1226,7 +1226,8 @@ int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
|
||||
orig_sg_length = sg.length;
|
||||
|
||||
cur_mtt = mtt;
|
||||
rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
|
||||
rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter,
|
||||
mr->umem->sgt_append.sgt.nents,
|
||||
BIT(mr->page_shift)) {
|
||||
if (cur_mtt == (void *)mtt + sg.length) {
|
||||
dma_sync_single_for_device(ddev, sg.addr, sg.length,
|
||||
|
@ -1481,7 +1481,7 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
|
||||
return PTR_ERR(srq->prod_umem);
|
||||
}
|
||||
|
||||
sg = srq->prod_umem->sg_head.sgl;
|
||||
sg = srq->prod_umem->sgt_append.sgt.sgl;
|
||||
srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
|
||||
|
||||
return 0;
|
||||
|
@ -410,7 +410,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
mr->mr.page_shift = PAGE_SHIFT;
|
||||
m = 0;
|
||||
n = 0;
|
||||
for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
|
||||
for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
|
||||
void *vaddr;
|
||||
|
||||
vaddr = page_address(sg_page_iter_page(&sg_iter));
|
||||
|
@ -143,7 +143,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
|
||||
if (length > 0) {
|
||||
buf = map[0]->buf;
|
||||
|
||||
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
|
||||
for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
|
||||
if (num_buf >= RXE_BUF_PER_MAP) {
|
||||
map++;
|
||||
buf = map[0]->buf;
|
||||
|
@ -26,10 +26,7 @@ struct ib_umem {
|
||||
u32 is_odp : 1;
|
||||
u32 is_dmabuf : 1;
|
||||
struct work_struct work;
|
||||
struct sg_append_table sgt_append;
|
||||
struct sg_table sg_head;
|
||||
int nmap;
|
||||
unsigned int sg_nents;
|
||||
struct sg_append_table sgt_append;
|
||||
};
|
||||
|
||||
struct ib_umem_dmabuf {
|
||||
@ -57,7 +54,7 @@ static inline int ib_umem_offset(struct ib_umem *umem)
|
||||
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
|
||||
unsigned long pgsz)
|
||||
{
|
||||
return (sg_dma_address(umem->sg_head.sgl) + ib_umem_offset(umem)) &
|
||||
return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
|
||||
(pgsz - 1);
|
||||
}
|
||||
|
||||
@ -78,7 +75,8 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
|
||||
struct ib_umem *umem,
|
||||
unsigned long pgsz)
|
||||
{
|
||||
__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
|
||||
__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
|
||||
umem->sgt_append.sgt.nents, pgsz);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -129,7 +127,7 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
|
||||
unsigned long pgsz_bitmap,
|
||||
u64 pgoff_bitmask)
|
||||
{
|
||||
struct scatterlist *sg = umem->sg_head.sgl;
|
||||
struct scatterlist *sg = umem->sgt_append.sgt.sgl;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
|
||||
|
@ -4057,6 +4057,34 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
||||
dma_attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
|
||||
* @dev: The device for which the DMA addresses are to be created
|
||||
* @sg: The sg_table object describing the buffer
|
||||
* @direction: The direction of the DMA
|
||||
* @attrs: Optional DMA attributes for the map operation
|
||||
*/
|
||||
static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long dma_attrs)
|
||||
{
|
||||
if (ib_uses_virt_dma(dev)) {
|
||||
ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
|
||||
return 0;
|
||||
}
|
||||
return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
|
||||
}
|
||||
|
||||
static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long dma_attrs)
|
||||
{
|
||||
if (!ib_uses_virt_dma(dev))
|
||||
dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_dma_map_sg - Map a scatter/gather list to DMA addresses
|
||||
* @dev: The device for which the DMA addresses are to be created
|
||||
|
Loading…
Reference in New Issue
Block a user