Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

From: Lucas Stach <l.stach@pengutronix.de>
"not much to de-stage this time. Changes from Philipp and Souptick to
use memset32 more and switch the fault handler to the new vm_fault_t
and two small fixes for issues that can be hit in rare corner cases
from me."

Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1533563808.2809.7.camel@pengutronix.de
This commit is contained in:
Dave Airlie 2018-08-08 06:07:06 +10:00
commit 569f0a8694
7 changed files with 37 additions and 45 deletions

View File

@ -18,6 +18,7 @@
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@ -53,7 +54,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int etnaviv_gem_fault(struct vm_fault *vmf);
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);

View File

@ -169,31 +169,30 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
int etnaviv_gem_fault(struct vm_fault *vmf)
vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
pgoff_t pgoff;
int ret;
int err;
/*
* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet. Note that vm_insert_page() is
* something from beneath our feet. Note that vmf_insert_page() is
* specifically coded to take care of this, so we don't have to.
*/
ret = mutex_lock_interruptible(&etnaviv_obj->lock);
if (ret)
goto out;
err = mutex_lock_interruptible(&etnaviv_obj->lock);
if (err)
return VM_FAULT_NOPAGE;
/* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out;
err = PTR_ERR(pages);
return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
@ -204,25 +203,7 @@ int etnaviv_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
ret = vm_insert_page(vma, vmf->address, page);
out:
switch (ret) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
/*
* EBUSY is ok: this just means that another thread
* already did the job.
*/
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_page(vma, vmf->address, page);
}
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)

View File

@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
mutex_lock(&submit->gpu->fence_idr_lock);
mutex_lock(&submit->gpu->fence_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
mutex_unlock(&submit->gpu->fence_idr_lock);
mutex_unlock(&submit->gpu->fence_lock);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);

View File

@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
gpu->buffer.suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@ -1726,7 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
mutex_init(&gpu->fence_idr_lock);
mutex_init(&gpu->fence_lock);
/* Map registers: */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View File

@ -118,7 +118,7 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
struct mutex fence_idr_lock;
struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
u32 active_fence;

View File

@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
u32 *p;
int ret, i;
int ret;
/* allocate scratch page */
etnaviv_domain->base.bad_page_cpu =
@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
ret = -ENOMEM;
goto fail_mem;
}
p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
SZ_4K / sizeof(u32));
etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
SZ_4K, &etnaviv_domain->pta_dma,

View File

@ -140,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
{
int ret;
int ret = 0;
/*
* Hold the fence lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_init.
*/
mutex_lock(&submit->gpu->fence_lock);
ret = drm_sched_job_init(&submit->sched_job, sched_entity,
submit->cmdbuf.ctx);
if (ret)
return ret;
goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
mutex_unlock(&submit->gpu->fence_idr_lock);
if (submit->out_fence_id < 0)
return -ENOMEM;
if (submit->out_fence_id < 0) {
ret = -ENOMEM;
goto out_unlock;
}
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
return 0;
out_unlock:
mutex_unlock(&submit->gpu->fence_lock);
return ret;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)