drm: drop drm_[cm]alloc* helpers
Now that drm_[cm]alloc* helpers are simple one line wrappers around kvmalloc_array and drm_free_large is just kvfree alias we can drop them and replace by their native forms. This shouldn't introduce any functional change. Changes since v1 - fix typo in drivers/gpu//drm/etnaviv/etnaviv_gem.c - noticed by 0day build robot Suggested-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Michal Hocko <mhocko@suse.com>drm: drop drm_[cm]alloc* helpers [danvet: Fixup vgem which grew another user very recently.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Christian König <christian.koenig@amd.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170517122312.GK18247@dhcp22.suse.cz
This commit is contained in:
parent
c4f51dc872
commit
2098105ec6
@ -96,7 +96,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||||||
int r;
|
int r;
|
||||||
unsigned long total_size = 0;
|
unsigned long total_size = 0;
|
||||||
|
|
||||||
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
|
array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
|
||||||
if (!array)
|
if (!array)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
|
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
|
||||||
@ -148,7 +148,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||||||
for (i = 0; i < list->num_entries; ++i)
|
for (i = 0; i < list->num_entries; ++i)
|
||||||
amdgpu_bo_unref(&list->array[i].robj);
|
amdgpu_bo_unref(&list->array[i].robj);
|
||||||
|
|
||||||
drm_free_large(list->array);
|
kvfree(list->array);
|
||||||
|
|
||||||
list->gds_obj = gds_obj;
|
list->gds_obj = gds_obj;
|
||||||
list->gws_obj = gws_obj;
|
list->gws_obj = gws_obj;
|
||||||
@ -163,7 +163,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||||||
error_free:
|
error_free:
|
||||||
while (i--)
|
while (i--)
|
||||||
amdgpu_bo_unref(&array[i].robj);
|
amdgpu_bo_unref(&array[i].robj);
|
||||||
drm_free_large(array);
|
kvfree(array);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,7 +224,7 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
|
|||||||
amdgpu_bo_unref(&list->array[i].robj);
|
amdgpu_bo_unref(&list->array[i].robj);
|
||||||
|
|
||||||
mutex_destroy(&list->lock);
|
mutex_destroy(&list->lock);
|
||||||
drm_free_large(list->array);
|
kvfree(list->array);
|
||||||
kfree(list);
|
kfree(list);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,8 +244,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
info = drm_malloc_ab(args->in.bo_number,
|
info = kvmalloc_array(args->in.bo_number,
|
||||||
sizeof(struct drm_amdgpu_bo_list_entry));
|
sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
|
||||||
if (!info)
|
if (!info)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -311,11 +311,11 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
memset(args, 0, sizeof(*args));
|
memset(args, 0, sizeof(*args));
|
||||||
args->out.list_handle = handle;
|
args->out.list_handle = handle;
|
||||||
drm_free_large(info);
|
kvfree(info);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
drm_free_large(info);
|
kvfree(info);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -194,7 +194,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||||||
size = p->chunks[i].length_dw;
|
size = p->chunks[i].length_dw;
|
||||||
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
|
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
|
||||||
|
|
||||||
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
||||||
if (p->chunks[i].kdata == NULL) {
|
if (p->chunks[i].kdata == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
i--;
|
i--;
|
||||||
@ -247,7 +247,7 @@ free_all_kdata:
|
|||||||
i = p->nchunks - 1;
|
i = p->nchunks - 1;
|
||||||
free_partial_kdata:
|
free_partial_kdata:
|
||||||
for (; i >= 0; i--)
|
for (; i >= 0; i--)
|
||||||
drm_free_large(p->chunks[i].kdata);
|
kvfree(p->chunks[i].kdata);
|
||||||
kfree(p->chunks);
|
kfree(p->chunks);
|
||||||
p->chunks = NULL;
|
p->chunks = NULL;
|
||||||
p->nchunks = 0;
|
p->nchunks = 0;
|
||||||
@ -505,7 +505,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (binding_userptr) {
|
if (binding_userptr) {
|
||||||
drm_free_large(lobj->user_pages);
|
kvfree(lobj->user_pages);
|
||||||
lobj->user_pages = NULL;
|
lobj->user_pages = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -571,7 +571,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
release_pages(e->user_pages,
|
release_pages(e->user_pages,
|
||||||
e->robj->tbo.ttm->num_pages,
|
e->robj->tbo.ttm->num_pages,
|
||||||
false);
|
false);
|
||||||
drm_free_large(e->user_pages);
|
kvfree(e->user_pages);
|
||||||
e->user_pages = NULL;
|
e->user_pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -601,8 +601,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
list_for_each_entry(e, &need_pages, tv.head) {
|
list_for_each_entry(e, &need_pages, tv.head) {
|
||||||
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
struct ttm_tt *ttm = e->robj->tbo.ttm;
|
||||||
|
|
||||||
e->user_pages = drm_calloc_large(ttm->num_pages,
|
e->user_pages = kvmalloc_array(ttm->num_pages,
|
||||||
sizeof(struct page*));
|
sizeof(struct page*),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!e->user_pages) {
|
if (!e->user_pages) {
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
DRM_ERROR("calloc failure in %s\n", __func__);
|
DRM_ERROR("calloc failure in %s\n", __func__);
|
||||||
@ -612,7 +613,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
|
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
|
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
|
||||||
drm_free_large(e->user_pages);
|
kvfree(e->user_pages);
|
||||||
e->user_pages = NULL;
|
e->user_pages = NULL;
|
||||||
goto error_free_pages;
|
goto error_free_pages;
|
||||||
}
|
}
|
||||||
@ -708,7 +709,7 @@ error_free_pages:
|
|||||||
release_pages(e->user_pages,
|
release_pages(e->user_pages,
|
||||||
e->robj->tbo.ttm->num_pages,
|
e->robj->tbo.ttm->num_pages,
|
||||||
false);
|
false);
|
||||||
drm_free_large(e->user_pages);
|
kvfree(e->user_pages);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -761,7 +762,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
|
|||||||
amdgpu_bo_list_put(parser->bo_list);
|
amdgpu_bo_list_put(parser->bo_list);
|
||||||
|
|
||||||
for (i = 0; i < parser->nchunks; i++)
|
for (i = 0; i < parser->nchunks; i++)
|
||||||
drm_free_large(parser->chunks[i].kdata);
|
kvfree(parser->chunks[i].kdata);
|
||||||
kfree(parser->chunks);
|
kfree(parser->chunks);
|
||||||
if (parser->job)
|
if (parser->job)
|
||||||
amdgpu_job_free(parser->job);
|
amdgpu_job_free(parser->job);
|
||||||
|
@ -279,8 +279,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||||||
if (!parent->entries) {
|
if (!parent->entries) {
|
||||||
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
||||||
|
|
||||||
parent->entries = drm_calloc_large(num_entries,
|
parent->entries = kvmalloc_array(num_entries,
|
||||||
sizeof(struct amdgpu_vm_pt));
|
sizeof(struct amdgpu_vm_pt),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!parent->entries)
|
if (!parent->entries)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
|
memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
|
||||||
@ -2198,7 +2199,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
|
|||||||
for (i = 0; i <= level->last_entry_used; i++)
|
for (i = 0; i <= level->last_entry_used; i++)
|
||||||
amdgpu_vm_free_levels(&level->entries[i]);
|
amdgpu_vm_free_levels(&level->entries[i]);
|
||||||
|
|
||||||
drm_free_large(level->entries);
|
kvfree(level->entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -521,7 +521,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
|
|||||||
|
|
||||||
npages = obj->size >> PAGE_SHIFT;
|
npages = obj->size >> PAGE_SHIFT;
|
||||||
|
|
||||||
pages = drm_malloc_ab(npages, sizeof(struct page *));
|
pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (pages == NULL)
|
if (pages == NULL)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
@ -546,7 +546,7 @@ fail:
|
|||||||
while (i--)
|
while (i--)
|
||||||
put_page(pages[i]);
|
put_page(pages[i]);
|
||||||
|
|
||||||
drm_free_large(pages);
|
kvfree(pages);
|
||||||
return ERR_CAST(p);
|
return ERR_CAST(p);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_get_pages);
|
EXPORT_SYMBOL(drm_gem_get_pages);
|
||||||
@ -582,7 +582,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
|
|||||||
put_page(pages[i]);
|
put_page(pages[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free_large(pages);
|
kvfree(pages);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_put_pages);
|
EXPORT_SYMBOL(drm_gem_put_pages);
|
||||||
|
|
||||||
|
@ -748,7 +748,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
|
|||||||
uintptr_t ptr;
|
uintptr_t ptr;
|
||||||
unsigned int flags = 0;
|
unsigned int flags = 0;
|
||||||
|
|
||||||
pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!pvec)
|
if (!pvec)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
@ -772,7 +772,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
|
|||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
release_pages(pvec, pinned, 0);
|
release_pages(pvec, pinned, 0);
|
||||||
drm_free_large(pvec);
|
kvfree(pvec);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -823,7 +823,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|||||||
mm = get_task_mm(etnaviv_obj->userptr.task);
|
mm = get_task_mm(etnaviv_obj->userptr.task);
|
||||||
pinned = 0;
|
pinned = 0;
|
||||||
if (mm == current->mm) {
|
if (mm == current->mm) {
|
||||||
pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!pvec) {
|
if (!pvec) {
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -832,7 +832,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|||||||
pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
|
pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
|
||||||
!etnaviv_obj->userptr.ro, pvec);
|
!etnaviv_obj->userptr.ro, pvec);
|
||||||
if (pinned < 0) {
|
if (pinned < 0) {
|
||||||
drm_free_large(pvec);
|
kvfree(pvec);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
return pinned;
|
return pinned;
|
||||||
}
|
}
|
||||||
@ -845,7 +845,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|||||||
}
|
}
|
||||||
|
|
||||||
release_pages(pvec, pinned, 0);
|
release_pages(pvec, pinned, 0);
|
||||||
drm_free_large(pvec);
|
kvfree(pvec);
|
||||||
|
|
||||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||||
if (!work) {
|
if (!work) {
|
||||||
@ -879,7 +879,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
|
|||||||
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
||||||
|
|
||||||
release_pages(etnaviv_obj->pages, npages, 0);
|
release_pages(etnaviv_obj->pages, npages, 0);
|
||||||
drm_free_large(etnaviv_obj->pages);
|
kvfree(etnaviv_obj->pages);
|
||||||
}
|
}
|
||||||
put_task_struct(etnaviv_obj->userptr.task);
|
put_task_struct(etnaviv_obj->userptr.task);
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
|
|||||||
* ours, just free the array we allocated:
|
* ours, just free the array we allocated:
|
||||||
*/
|
*/
|
||||||
if (etnaviv_obj->pages)
|
if (etnaviv_obj->pages)
|
||||||
drm_free_large(etnaviv_obj->pages);
|
kvfree(etnaviv_obj->pages);
|
||||||
|
|
||||||
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
|
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
|
||||||
}
|
}
|
||||||
@ -128,7 +128,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
npages = size / PAGE_SIZE;
|
npages = size / PAGE_SIZE;
|
||||||
|
|
||||||
etnaviv_obj->sgt = sgt;
|
etnaviv_obj->sgt = sgt;
|
||||||
etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!etnaviv_obj->pages) {
|
if (!etnaviv_obj->pages) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -343,9 +343,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
* Copy the command submission and bo array to kernel space in
|
* Copy the command submission and bo array to kernel space in
|
||||||
* one go, and do this outside of any locks.
|
* one go, and do this outside of any locks.
|
||||||
*/
|
*/
|
||||||
bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
|
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
|
||||||
relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
|
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
|
||||||
stream = drm_malloc_ab(1, args->stream_size);
|
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
|
||||||
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
|
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
|
||||||
ALIGN(args->stream_size, 8) + 8,
|
ALIGN(args->stream_size, 8) + 8,
|
||||||
args->nr_bos);
|
args->nr_bos);
|
||||||
@ -487,11 +487,11 @@ err_submit_cmds:
|
|||||||
if (cmdbuf)
|
if (cmdbuf)
|
||||||
etnaviv_cmdbuf_free(cmdbuf);
|
etnaviv_cmdbuf_free(cmdbuf);
|
||||||
if (stream)
|
if (stream)
|
||||||
drm_free_large(stream);
|
kvfree(stream);
|
||||||
if (bos)
|
if (bos)
|
||||||
drm_free_large(bos);
|
kvfree(bos);
|
||||||
if (relocs)
|
if (relocs)
|
||||||
drm_free_large(relocs);
|
kvfree(relocs);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -59,7 +59,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
|
|||||||
|
|
||||||
nr_pages = exynos_gem->size >> PAGE_SHIFT;
|
nr_pages = exynos_gem->size >> PAGE_SHIFT;
|
||||||
|
|
||||||
exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
|
exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!exynos_gem->pages) {
|
if (!exynos_gem->pages) {
|
||||||
DRM_ERROR("failed to allocate pages.\n");
|
DRM_ERROR("failed to allocate pages.\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -101,7 +102,7 @@ err_dma_free:
|
|||||||
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
|
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
|
||||||
exynos_gem->dma_addr, exynos_gem->dma_attrs);
|
exynos_gem->dma_addr, exynos_gem->dma_attrs);
|
||||||
err_free:
|
err_free:
|
||||||
drm_free_large(exynos_gem->pages);
|
kvfree(exynos_gem->pages);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -122,7 +123,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
|
|||||||
(dma_addr_t)exynos_gem->dma_addr,
|
(dma_addr_t)exynos_gem->dma_addr,
|
||||||
exynos_gem->dma_attrs);
|
exynos_gem->dma_attrs);
|
||||||
|
|
||||||
drm_free_large(exynos_gem->pages);
|
kvfree(exynos_gem->pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
|
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
|
||||||
@ -559,7 +560,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
|
exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
|
||||||
|
|
||||||
npages = exynos_gem->size >> PAGE_SHIFT;
|
npages = exynos_gem->size >> PAGE_SHIFT;
|
||||||
exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!exynos_gem->pages) {
|
if (!exynos_gem->pages) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
@ -588,7 +589,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
return &exynos_gem->base;
|
return &exynos_gem->base;
|
||||||
|
|
||||||
err_free_large:
|
err_free_large:
|
||||||
drm_free_large(exynos_gem->pages);
|
kvfree(exynos_gem->pages);
|
||||||
err:
|
err:
|
||||||
drm_gem_object_release(&exynos_gem->base);
|
drm_gem_object_release(&exynos_gem->base);
|
||||||
kfree(exynos_gem);
|
kfree(exynos_gem);
|
||||||
|
@ -229,7 +229,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
total = READ_ONCE(dev_priv->mm.object_count);
|
total = READ_ONCE(dev_priv->mm.object_count);
|
||||||
objects = drm_malloc_ab(total, sizeof(*objects));
|
objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
|
||||||
if (!objects)
|
if (!objects)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -274,7 +274,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
|||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
out:
|
out:
|
||||||
drm_free_large(objects);
|
kvfree(objects);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2504,7 +2504,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
|
|||||||
|
|
||||||
if (n_pages > ARRAY_SIZE(stack_pages)) {
|
if (n_pages > ARRAY_SIZE(stack_pages)) {
|
||||||
/* Too big for stack -- allocate temporary array instead */
|
/* Too big for stack -- allocate temporary array instead */
|
||||||
pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
|
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY);
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -2526,7 +2526,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
|
|||||||
addr = vmap(pages, n_pages, 0, pgprot);
|
addr = vmap(pages, n_pages, 0, pgprot);
|
||||||
|
|
||||||
if (pages != stack_pages)
|
if (pages != stack_pages)
|
||||||
drm_free_large(pages);
|
kvfree(pages);
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
@ -1019,11 +1019,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < count; i++)
|
||||||
total += exec[i].relocation_count;
|
total += exec[i].relocation_count;
|
||||||
|
|
||||||
reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
|
reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
|
||||||
reloc = drm_malloc_ab(total, sizeof(*reloc));
|
reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
|
||||||
if (reloc == NULL || reloc_offset == NULL) {
|
if (reloc == NULL || reloc_offset == NULL) {
|
||||||
drm_free_large(reloc);
|
kvfree(reloc);
|
||||||
drm_free_large(reloc_offset);
|
kvfree(reloc_offset);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -1099,8 +1099,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
err:
|
err:
|
||||||
drm_free_large(reloc);
|
kvfree(reloc);
|
||||||
drm_free_large(reloc_offset);
|
kvfree(reloc_offset);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1859,13 +1859,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Copy in the exec list from userland */
|
/* Copy in the exec list from userland */
|
||||||
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
|
exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
|
||||||
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
|
exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
|
||||||
if (exec_list == NULL || exec2_list == NULL) {
|
if (exec_list == NULL || exec2_list == NULL) {
|
||||||
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
|
||||||
args->buffer_count);
|
args->buffer_count);
|
||||||
drm_free_large(exec_list);
|
kvfree(exec_list);
|
||||||
drm_free_large(exec2_list);
|
kvfree(exec2_list);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
ret = copy_from_user(exec_list,
|
ret = copy_from_user(exec_list,
|
||||||
@ -1874,8 +1874,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||||
args->buffer_count, ret);
|
args->buffer_count, ret);
|
||||||
drm_free_large(exec_list);
|
kvfree(exec_list);
|
||||||
drm_free_large(exec2_list);
|
kvfree(exec2_list);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1924,8 +1924,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free_large(exec_list);
|
kvfree(exec_list);
|
||||||
drm_free_large(exec2_list);
|
kvfree(exec2_list);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1943,7 +1943,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
exec2_list = drm_malloc_gfp(args->buffer_count,
|
exec2_list = kvmalloc_array(args->buffer_count,
|
||||||
sizeof(*exec2_list),
|
sizeof(*exec2_list),
|
||||||
GFP_TEMPORARY);
|
GFP_TEMPORARY);
|
||||||
if (exec2_list == NULL) {
|
if (exec2_list == NULL) {
|
||||||
@ -1957,7 +1957,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||||
args->buffer_count, ret);
|
args->buffer_count, ret);
|
||||||
drm_free_large(exec2_list);
|
kvfree(exec2_list);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1984,6 +1984,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free_large(exec2_list);
|
kvfree(exec2_list);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -3102,7 +3102,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
|
|||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
/* Allocate a temporary list of source pages for random access. */
|
/* Allocate a temporary list of source pages for random access. */
|
||||||
page_addr_list = drm_malloc_gfp(n_pages,
|
page_addr_list = kvmalloc_array(n_pages,
|
||||||
sizeof(dma_addr_t),
|
sizeof(dma_addr_t),
|
||||||
GFP_TEMPORARY);
|
GFP_TEMPORARY);
|
||||||
if (!page_addr_list)
|
if (!page_addr_list)
|
||||||
@ -3135,14 +3135,14 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
|
|||||||
DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
|
DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
|
||||||
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
|
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
|
||||||
|
|
||||||
drm_free_large(page_addr_list);
|
kvfree(page_addr_list);
|
||||||
|
|
||||||
return st;
|
return st;
|
||||||
|
|
||||||
err_sg_alloc:
|
err_sg_alloc:
|
||||||
kfree(st);
|
kfree(st);
|
||||||
err_st_alloc:
|
err_st_alloc:
|
||||||
drm_free_large(page_addr_list);
|
kvfree(page_addr_list);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
|
DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
|
||||||
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
|
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
|
||||||
|
@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
pinned = 0;
|
pinned = 0;
|
||||||
|
|
||||||
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
|
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY);
|
||||||
if (pvec != NULL) {
|
if (pvec != NULL) {
|
||||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||||
unsigned int flags = 0;
|
unsigned int flags = 0;
|
||||||
@ -555,7 +555,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||||||
mutex_unlock(&obj->mm.lock);
|
mutex_unlock(&obj->mm.lock);
|
||||||
|
|
||||||
release_pages(pvec, pinned, 0);
|
release_pages(pvec, pinned, 0);
|
||||||
drm_free_large(pvec);
|
kvfree(pvec);
|
||||||
|
|
||||||
i915_gem_object_put(obj);
|
i915_gem_object_put(obj);
|
||||||
put_task_struct(work->task);
|
put_task_struct(work->task);
|
||||||
@ -642,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|||||||
pinned = 0;
|
pinned = 0;
|
||||||
|
|
||||||
if (mm == current->mm) {
|
if (mm == current->mm) {
|
||||||
pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
|
pvec = kvmalloc_array(num_pages, sizeof(struct page *),
|
||||||
GFP_TEMPORARY |
|
GFP_TEMPORARY |
|
||||||
__GFP_NORETRY |
|
__GFP_NORETRY |
|
||||||
__GFP_NOWARN);
|
__GFP_NOWARN);
|
||||||
@ -669,7 +669,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|||||||
|
|
||||||
if (IS_ERR(pages))
|
if (IS_ERR(pages))
|
||||||
release_pages(pvec, pinned, 0);
|
release_pages(pvec, pinned, 0);
|
||||||
drm_free_large(pvec);
|
kvfree(pvec);
|
||||||
|
|
||||||
return pages;
|
return pages;
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ static int igt_random_insert_remove(void *arg)
|
|||||||
|
|
||||||
mock_engine_reset(engine);
|
mock_engine_reset(engine);
|
||||||
|
|
||||||
waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
|
waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
|
||||||
if (!waiters)
|
if (!waiters)
|
||||||
goto out_engines;
|
goto out_engines;
|
||||||
|
|
||||||
@ -169,7 +169,7 @@ out_order:
|
|||||||
out_bitmap:
|
out_bitmap:
|
||||||
kfree(bitmap);
|
kfree(bitmap);
|
||||||
out_waiters:
|
out_waiters:
|
||||||
drm_free_large(waiters);
|
kvfree(waiters);
|
||||||
out_engines:
|
out_engines:
|
||||||
mock_engine_flush(engine);
|
mock_engine_flush(engine);
|
||||||
return err;
|
return err;
|
||||||
@ -187,7 +187,7 @@ static int igt_insert_complete(void *arg)
|
|||||||
|
|
||||||
mock_engine_reset(engine);
|
mock_engine_reset(engine);
|
||||||
|
|
||||||
waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
|
waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
|
||||||
if (!waiters)
|
if (!waiters)
|
||||||
goto out_engines;
|
goto out_engines;
|
||||||
|
|
||||||
@ -254,7 +254,7 @@ static int igt_insert_complete(void *arg)
|
|||||||
out_bitmap:
|
out_bitmap:
|
||||||
kfree(bitmap);
|
kfree(bitmap);
|
||||||
out_waiters:
|
out_waiters:
|
||||||
drm_free_large(waiters);
|
kvfree(waiters);
|
||||||
out_engines:
|
out_engines:
|
||||||
mock_engine_flush(engine);
|
mock_engine_flush(engine);
|
||||||
return err;
|
return err;
|
||||||
@ -368,7 +368,7 @@ static int igt_wakeup(void *arg)
|
|||||||
|
|
||||||
mock_engine_reset(engine);
|
mock_engine_reset(engine);
|
||||||
|
|
||||||
waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
|
waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
|
||||||
if (!waiters)
|
if (!waiters)
|
||||||
goto out_engines;
|
goto out_engines;
|
||||||
|
|
||||||
@ -454,7 +454,7 @@ out_waiters:
|
|||||||
put_task_struct(waiters[n].tsk);
|
put_task_struct(waiters[n].tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_free_large(waiters);
|
kvfree(waiters);
|
||||||
out_engines:
|
out_engines:
|
||||||
mock_engine_flush(engine);
|
mock_engine_flush(engine);
|
||||||
return err;
|
return err;
|
||||||
|
@ -50,13 +50,13 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
|
|||||||
struct page **p;
|
struct page **p;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
p = drm_malloc_ab(npages, sizeof(struct page *));
|
p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!p)
|
if (!p)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
|
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
drm_free_large(p);
|
kvfree(p);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ static void put_pages(struct drm_gem_object *obj)
|
|||||||
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
||||||
else {
|
else {
|
||||||
drm_mm_remove_node(msm_obj->vram_node);
|
drm_mm_remove_node(msm_obj->vram_node);
|
||||||
drm_free_large(msm_obj->pages);
|
kvfree(msm_obj->pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
msm_obj->pages = NULL;
|
msm_obj->pages = NULL;
|
||||||
@ -707,7 +707,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
|||||||
* ours, just free the array we allocated:
|
* ours, just free the array we allocated:
|
||||||
*/
|
*/
|
||||||
if (msm_obj->pages)
|
if (msm_obj->pages)
|
||||||
drm_free_large(msm_obj->pages);
|
kvfree(msm_obj->pages);
|
||||||
|
|
||||||
drm_prime_gem_destroy(obj, msm_obj->sgt);
|
drm_prime_gem_destroy(obj, msm_obj->sgt);
|
||||||
} else {
|
} else {
|
||||||
@ -863,7 +863,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||||||
|
|
||||||
msm_obj = to_msm_bo(obj);
|
msm_obj = to_msm_bo(obj);
|
||||||
msm_obj->sgt = sgt;
|
msm_obj->sgt = sgt;
|
||||||
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!msm_obj->pages) {
|
if (!msm_obj->pages) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -87,7 +87,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||||||
p->dma_reloc_idx = 0;
|
p->dma_reloc_idx = 0;
|
||||||
/* FIXME: we assume that each relocs use 4 dwords */
|
/* FIXME: we assume that each relocs use 4 dwords */
|
||||||
p->nrelocs = chunk->length_dw / 4;
|
p->nrelocs = chunk->length_dw / 4;
|
||||||
p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
|
p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (p->relocs == NULL) {
|
if (p->relocs == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@ -341,7 +342,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
|
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
||||||
size *= sizeof(uint32_t);
|
size *= sizeof(uint32_t);
|
||||||
if (p->chunks[i].kdata == NULL) {
|
if (p->chunks[i].kdata == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -440,10 +441,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
kfree(parser->track);
|
kfree(parser->track);
|
||||||
drm_free_large(parser->relocs);
|
kvfree(parser->relocs);
|
||||||
drm_free_large(parser->vm_bos);
|
kvfree(parser->vm_bos);
|
||||||
for (i = 0; i < parser->nchunks; i++)
|
for (i = 0; i < parser->nchunks; i++)
|
||||||
drm_free_large(parser->chunks[i].kdata);
|
kvfree(parser->chunks[i].kdata);
|
||||||
kfree(parser->chunks);
|
kfree(parser->chunks);
|
||||||
kfree(parser->chunks_array);
|
kfree(parser->chunks_array);
|
||||||
radeon_ib_free(parser->rdev, &parser->ib);
|
radeon_ib_free(parser->rdev, &parser->ib);
|
||||||
|
@ -587,7 +587,7 @@ error_unreserve:
|
|||||||
ttm_eu_backoff_reservation(&ticket, &list);
|
ttm_eu_backoff_reservation(&ticket, &list);
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
drm_free_large(vm_bos);
|
kvfree(vm_bos);
|
||||||
|
|
||||||
if (r && r != -ERESTARTSYS)
|
if (r && r != -ERESTARTSYS)
|
||||||
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
||||||
|
@ -314,7 +314,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* and then save the content of the ring */
|
/* and then save the content of the ring */
|
||||||
*data = drm_malloc_ab(size, sizeof(uint32_t));
|
*data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
||||||
if (!*data) {
|
if (!*data) {
|
||||||
mutex_unlock(&rdev->ring_lock);
|
mutex_unlock(&rdev->ring_lock);
|
||||||
return 0;
|
return 0;
|
||||||
@ -356,7 +356,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
|
|||||||
}
|
}
|
||||||
|
|
||||||
radeon_ring_unlock_commit(rdev, ring, false);
|
radeon_ring_unlock_commit(rdev, ring, false);
|
||||||
drm_free_large(data);
|
kvfree(data);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
|||||||
struct radeon_bo_list *list;
|
struct radeon_bo_list *list;
|
||||||
unsigned i, idx;
|
unsigned i, idx;
|
||||||
|
|
||||||
list = drm_malloc_ab(vm->max_pde_used + 2,
|
list = kvmalloc_array(vm->max_pde_used + 2,
|
||||||
sizeof(struct radeon_bo_list));
|
sizeof(struct radeon_bo_list), GFP_KERNEL);
|
||||||
if (!list)
|
if (!list)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <drm/drm_cache.h>
|
#include <drm/drm_cache.h>
|
||||||
#include <drm/drm_mem_util.h>
|
|
||||||
#include <drm/ttm/ttm_module.h>
|
#include <drm/ttm/ttm_module.h>
|
||||||
#include <drm/ttm/ttm_bo_driver.h>
|
#include <drm/ttm/ttm_bo_driver.h>
|
||||||
#include <drm/ttm/ttm_placement.h>
|
#include <drm/ttm/ttm_placement.h>
|
||||||
@ -53,14 +52,16 @@
|
|||||||
*/
|
*/
|
||||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
|
ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||||
{
|
{
|
||||||
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
|
ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
|
||||||
sizeof(*ttm->ttm.pages) +
|
sizeof(*ttm->ttm.pages) +
|
||||||
sizeof(*ttm->dma_address));
|
sizeof(*ttm->dma_address),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
|
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,7 +209,7 @@ EXPORT_SYMBOL(ttm_tt_init);
|
|||||||
|
|
||||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
drm_free_large(ttm->pages);
|
kvfree(ttm->pages);
|
||||||
ttm->pages = NULL;
|
ttm->pages = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_tt_fini);
|
EXPORT_SYMBOL(ttm_tt_fini);
|
||||||
@ -243,7 +244,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
|||||||
{
|
{
|
||||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||||
|
|
||||||
drm_free_large(ttm->pages);
|
kvfree(ttm->pages);
|
||||||
ttm->pages = NULL;
|
ttm->pages = NULL;
|
||||||
ttm_dma->dma_address = NULL;
|
ttm_dma->dma_address = NULL;
|
||||||
}
|
}
|
||||||
|
@ -228,7 +228,7 @@ static int udl_prime_create(struct drm_device *dev,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
obj->sg = sg;
|
obj->sg = sg;
|
||||||
obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (obj->pages == NULL) {
|
if (obj->pages == NULL) {
|
||||||
DRM_ERROR("obj pages is NULL %d\n", npages);
|
DRM_ERROR("obj pages is NULL %d\n", npages);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -146,7 +146,7 @@ int udl_gem_get_pages(struct udl_gem_object *obj)
|
|||||||
void udl_gem_put_pages(struct udl_gem_object *obj)
|
void udl_gem_put_pages(struct udl_gem_object *obj)
|
||||||
{
|
{
|
||||||
if (obj->base.import_attach) {
|
if (obj->base.import_attach) {
|
||||||
drm_free_large(obj->pages);
|
kvfree(obj->pages);
|
||||||
obj->pages = NULL;
|
obj->pages = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -660,14 +660,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
exec->bo = drm_calloc_large(exec->bo_count,
|
exec->bo = kvmalloc_array(exec->bo_count,
|
||||||
sizeof(struct drm_gem_cma_object *));
|
sizeof(struct drm_gem_cma_object *),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!exec->bo) {
|
if (!exec->bo) {
|
||||||
DRM_ERROR("Failed to allocate validated BO pointers\n");
|
DRM_ERROR("Failed to allocate validated BO pointers\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
|
handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
|
||||||
if (!handles) {
|
if (!handles) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
DRM_ERROR("Failed to allocate incoming GEM handles\n");
|
DRM_ERROR("Failed to allocate incoming GEM handles\n");
|
||||||
@ -699,7 +700,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
|
|||||||
spin_unlock(&file_priv->table_lock);
|
spin_unlock(&file_priv->table_lock);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
drm_free_large(handles);
|
kvfree(handles);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -737,7 +738,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
* read the contents back for validation, and I think the
|
* read the contents back for validation, and I think the
|
||||||
* bo->vaddr is uncached access.
|
* bo->vaddr is uncached access.
|
||||||
*/
|
*/
|
||||||
temp = drm_malloc_ab(temp_size, 1);
|
temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
|
||||||
if (!temp) {
|
if (!temp) {
|
||||||
DRM_ERROR("Failed to allocate storage for copying "
|
DRM_ERROR("Failed to allocate storage for copying "
|
||||||
"in bin/render CLs.\n");
|
"in bin/render CLs.\n");
|
||||||
@ -812,7 +813,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
|
ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
drm_free_large(temp);
|
kvfree(temp);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -832,7 +833,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
if (exec->bo) {
|
if (exec->bo) {
|
||||||
for (i = 0; i < exec->bo_count; i++)
|
for (i = 0; i < exec->bo_count; i++)
|
||||||
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
|
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
|
||||||
drm_free_large(exec->bo);
|
kvfree(exec->bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!list_empty(&exec->unref_list)) {
|
while (!list_empty(&exec->unref_list)) {
|
||||||
|
@ -51,7 +51,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
|
|||||||
{
|
{
|
||||||
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
|
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
|
||||||
|
|
||||||
drm_free_large(vgem_obj->pages);
|
kvfree(vgem_obj->pages);
|
||||||
|
|
||||||
if (obj->import_attach)
|
if (obj->import_attach)
|
||||||
drm_prime_gem_destroy(obj, vgem_obj->table);
|
drm_prime_gem_destroy(obj, vgem_obj->table);
|
||||||
@ -328,7 +328,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
|
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
|
||||||
|
|
||||||
obj->table = sg;
|
obj->table = sg;
|
||||||
obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||||
if (!obj->pages) {
|
if (!obj->pages) {
|
||||||
__vgem_gem_destroy(obj);
|
__vgem_gem_destroy(obj);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
@ -120,13 +120,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||||||
INIT_LIST_HEAD(&validate_list);
|
INIT_LIST_HEAD(&validate_list);
|
||||||
if (exbuf->num_bo_handles) {
|
if (exbuf->num_bo_handles) {
|
||||||
|
|
||||||
bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
|
bo_handles = kvmalloc_array(exbuf->num_bo_handles,
|
||||||
sizeof(uint32_t));
|
sizeof(uint32_t), GFP_KERNEL);
|
||||||
buflist = drm_calloc_large(exbuf->num_bo_handles,
|
buflist = kvmalloc_array(exbuf->num_bo_handles,
|
||||||
sizeof(struct ttm_validate_buffer));
|
sizeof(struct ttm_validate_buffer),
|
||||||
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!bo_handles || !buflist) {
|
if (!bo_handles || !buflist) {
|
||||||
drm_free_large(bo_handles);
|
kvfree(bo_handles);
|
||||||
drm_free_large(buflist);
|
kvfree(buflist);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,16 +135,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||||||
if (copy_from_user(bo_handles, user_bo_handles,
|
if (copy_from_user(bo_handles, user_bo_handles,
|
||||||
exbuf->num_bo_handles * sizeof(uint32_t))) {
|
exbuf->num_bo_handles * sizeof(uint32_t))) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
drm_free_large(bo_handles);
|
kvfree(bo_handles);
|
||||||
drm_free_large(buflist);
|
kvfree(buflist);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < exbuf->num_bo_handles; i++) {
|
for (i = 0; i < exbuf->num_bo_handles; i++) {
|
||||||
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
|
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
|
||||||
if (!gobj) {
|
if (!gobj) {
|
||||||
drm_free_large(bo_handles);
|
kvfree(bo_handles);
|
||||||
drm_free_large(buflist);
|
kvfree(buflist);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,7 +153,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
list_add(&buflist[i].head, &validate_list);
|
list_add(&buflist[i].head, &validate_list);
|
||||||
}
|
}
|
||||||
drm_free_large(bo_handles);
|
kvfree(bo_handles);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
||||||
@ -172,7 +173,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
/* fence the command bo */
|
/* fence the command bo */
|
||||||
virtio_gpu_unref_list(&validate_list);
|
virtio_gpu_unref_list(&validate_list);
|
||||||
drm_free_large(buflist);
|
kvfree(buflist);
|
||||||
dma_fence_put(&fence->f);
|
dma_fence_put(&fence->f);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -180,7 +181,7 @@ out_unresv:
|
|||||||
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
||||||
out_free:
|
out_free:
|
||||||
virtio_gpu_unref_list(&validate_list);
|
virtio_gpu_unref_list(&validate_list);
|
||||||
drm_free_large(buflist);
|
kvfree(buflist);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,6 @@
|
|||||||
#include <drm/drm_fourcc.h>
|
#include <drm/drm_fourcc.h>
|
||||||
#include <drm/drm_global.h>
|
#include <drm/drm_global.h>
|
||||||
#include <drm/drm_hashtab.h>
|
#include <drm/drm_hashtab.h>
|
||||||
#include <drm/drm_mem_util.h>
|
|
||||||
#include <drm/drm_mm.h>
|
#include <drm/drm_mm.h>
|
||||||
#include <drm/drm_os_linux.h>
|
#include <drm/drm_os_linux.h>
|
||||||
#include <drm/drm_sarea.h>
|
#include <drm/drm_sarea.h>
|
||||||
|
Loading…
Reference in New Issue
Block a user