forked from Minki/linux
Merge tag 'drm-msm-fixes-2018-08-10' of git://people.freedesktop.org/~robclark/linux into drm-next
Some small msm fixes. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGuZE0VEpatrtxGZtUB6FaQYr6Gf07UVpMsD15ook+5_WQ@mail.gmail.com
This commit is contained in:
commit
d32e2c6de7
@ -11,6 +11,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
@ -20,6 +21,7 @@
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/slab.h>
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
#include "a5xx_gpu.h"
|
||||
@ -92,12 +94,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
|
||||
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
|
||||
mem_region, mem_phys, mem_size, NULL);
|
||||
} else {
|
||||
char newname[strlen("qcom/") + strlen(fwname) + 1];
|
||||
char *newname;
|
||||
|
||||
sprintf(newname, "qcom/%s", fwname);
|
||||
newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
|
||||
|
||||
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
|
||||
mem_region, mem_phys, mem_size, NULL);
|
||||
kfree(newname);
|
||||
}
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -18,7 +18,9 @@
|
||||
*/
|
||||
|
||||
#include <linux/ascii85.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/slab.h>
|
||||
#include "adreno_gpu.h"
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
@ -71,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
{
|
||||
struct drm_device *drm = adreno_gpu->base.dev;
|
||||
const struct firmware *fw = NULL;
|
||||
char newname[strlen("qcom/") + strlen(fwname) + 1];
|
||||
char *newname;
|
||||
int ret;
|
||||
|
||||
sprintf(newname, "qcom/%s", fwname);
|
||||
newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
|
||||
if (!newname)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/*
|
||||
* Try first to load from qcom/$fwfile using a direct load (to avoid
|
||||
@ -88,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
dev_info(drm->dev, "loaded %s from new location\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_NEW;
|
||||
return fw;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
newname, ret);
|
||||
return ERR_PTR(ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
dev_info(drm->dev, "loaded %s from legacy location\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
|
||||
return fw;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
fwname, ret);
|
||||
return ERR_PTR(ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,16 +133,20 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
||||
dev_info(drm->dev, "loaded %s with helper\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_HELPER;
|
||||
return fw;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
newname, ret);
|
||||
return ERR_PTR(ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
dev_err(drm->dev, "failed to load %s\n", fwname);
|
||||
return ERR_PTR(-ENOENT);
|
||||
fw = ERR_PTR(-ENOENT);
|
||||
out:
|
||||
kfree(newname);
|
||||
return fw;
|
||||
}
|
||||
|
||||
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
|
||||
|
@ -421,7 +421,7 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
|
||||
|
||||
ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
|
||||
DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
|
||||
DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
|
||||
irq->irq_idx, ret);
|
||||
}
|
||||
@ -2444,6 +2444,8 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
||||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
if (!phys)
|
||||
continue;
|
||||
|
||||
switch (event) {
|
||||
case MSM_ENC_COMMIT_DONE:
|
||||
@ -2461,7 +2463,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
||||
return -EINVAL;
|
||||
};
|
||||
|
||||
if (phys && fn_wait) {
|
||||
if (fn_wait) {
|
||||
DPU_ATRACE_BEGIN("wait_for_completion_event");
|
||||
ret = fn_wait(phys);
|
||||
DPU_ATRACE_END("wait_for_completion_event");
|
||||
|
@ -121,7 +121,7 @@ void dpu_power_resource_deinit(struct platform_device *pdev,
|
||||
mutex_lock(&phandle->phandle_lock);
|
||||
list_for_each_entry_safe(curr_client, next_client,
|
||||
&phandle->power_client_clist, list) {
|
||||
pr_err("cliend:%s-%d still registered with refcount:%d\n",
|
||||
pr_err("client:%s-%d still registered with refcount:%d\n",
|
||||
curr_client->name, curr_client->id,
|
||||
curr_client->refcount);
|
||||
curr_client->active = false;
|
||||
|
@ -263,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
|
||||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
int msm_gem_fault(struct vm_fault *vmf);
|
||||
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
|
@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return msm_gem_mmap_obj(vma->vm_private_data, vma);
|
||||
}
|
||||
|
||||
int msm_gem_fault(struct vm_fault *vmf)
|
||||
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
|
||||
struct page **pages;
|
||||
unsigned long pfn;
|
||||
pgoff_t pgoff;
|
||||
int ret;
|
||||
int err;
|
||||
vm_fault_t ret;
|
||||
|
||||
/*
|
||||
* vm_ops.open/drm_gem_mmap_obj and close get and put
|
||||
* a reference on obj. So, we dont need to hold one here.
|
||||
*/
|
||||
ret = mutex_lock_interruptible(&msm_obj->lock);
|
||||
if (ret)
|
||||
err = mutex_lock_interruptible(&msm_obj->lock);
|
||||
if (err) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
|
||||
/* make sure we have pages attached now */
|
||||
pages = get_pages(obj);
|
||||
if (IS_ERR(pages)) {
|
||||
ret = PTR_ERR(pages);
|
||||
ret = vmf_error(PTR_ERR(pages));
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
|
||||
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
|
||||
pfn, pfn << PAGE_SHIFT);
|
||||
|
||||
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
|
||||
ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
out_unlock:
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
out:
|
||||
switch (ret) {
|
||||
case -EAGAIN:
|
||||
case 0:
|
||||
case -ERESTARTSYS:
|
||||
case -EINTR:
|
||||
case -EBUSY:
|
||||
/*
|
||||
* EBUSY is ok: this just means that another thread
|
||||
* already did the job.
|
||||
*/
|
||||
return VM_FAULT_NOPAGE;
|
||||
case -ENOMEM:
|
||||
return VM_FAULT_OOM;
|
||||
default:
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** get mmap offset */
|
||||
|
Loading…
Reference in New Issue
Block a user