drm fixes for 4.8-rc5
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXyRRiAAoJEAx081l5xIa+1T0QAI5Al5CF6pEaQkiDywJkQV9f O20m97uknFOikFk8XlwSX1V+3aY5emNmJ/bcD4iKweptmeoZzLmuU0QPkaZNb5jq 9zyjatJcjwCFO+fHifrL3jfZjwq5Hn9L9RD+aDfKZeMJrD5ds0BIqxW00ensxOEz w7oT85b7OHhWThvDkXbTcAVCcO7Aj0lpqxtj10ejbI+yzfmju2YCKKpWTusMskub 9xTcCvsKD1GylRyATN/sA/fbEN820lP6ySPU++zge3ObXmPAt95BeIDcLaFd0WA3 bSU6ms30Fg5NuSpE9rOkcbsLjAvGQAbVrGzeMgTY+7SkmreJH21JmjKeKois+bb6 pQc1eePeDxr0sitj5ItS9DE3Ar/KH7aY5WfsK3iU6+TOzimceo6GeYzz/zoeX8Lc NWEX9K7WmvIH4d4/r7D9j43Q23GOPftT1X07EEy+GOoLWIFZZczwfTp3DCqVDwhF wCedbAhidn8NDTd0eWck6qS1XCtUMH6u4gC933+Kub4Q3eS/9YRIlizRe3/qzwGw WcNniZ4ZCm6/6BWftP+c/lTaMH33ui7I6ZIhB0BG1K3SSPik/fT4W2+vGElXkFdb nYapYej+4Mpyq7dSdp4SRekt2ZHnXDdYVuDmMld74oy5O5G7jHHG8QimaveHITrz cKxoQTcjA8rPPKex1EnU =QKse -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-4.8-rc5' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "Contains fixes for imx, amdgpu, vc4, msm and one nouveau ACPI fix" * tag 'drm-fixes-for-4.8-rc5' of git://people.freedesktop.org/~airlied/linux: drm/amdgpu: record error code when ring test failed drm/amd/amdgpu: compute ring test fail during S4 on CI drm/amd/amdgpu: sdma resume fail during S4 on CI drm/nouveau/acpi: use DSM if bridge does not support D3cold drm/imx: fix crtc vblank state regression drm/imx: Add active plane reconfiguration support drm/msm: protect against faults from copy_from_user() in submit ioctl drm/msm: fix use of copy_from_user() while holding spinlock drm/vc4: Fix oops when userspace hands in a bad BO. drm/vc4: Fix overflow mem unreferencing when the binner runs dry. drm/vc4: Free hang state before destroying BO cache. drm/vc4: Fix handling of a pm_runtime_get_sync() success case. drm/vc4: Use drm_malloc_ab to fix large rendering jobs. drm/vc4: Use drm_free_large() on handles to match its allocation.
This commit is contained in:
commit
b0be76bf54
@ -280,7 +280,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
|
|||||||
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r, ret = 0;
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
@ -301,10 +301,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||||||
} else {
|
} else {
|
||||||
/* still not good, but we can live with it */
|
/* still not good, but we can live with it */
|
||||||
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
|
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
|
||||||
|
ret = r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
|
|||||||
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
|
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
|
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
|
||||||
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
|
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
|
||||||
|
static int cik_sdma_soft_reset(void *handle);
|
||||||
|
|
||||||
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
|
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
|
||||||
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
|
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
|
||||||
@ -1037,6 +1038,8 @@ static int cik_sdma_resume(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
cik_sdma_soft_reset(handle);
|
||||||
|
|
||||||
return cik_sdma_hw_init(adev);
|
return cik_sdma_hw_init(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2755,8 +2755,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||||||
u64 wb_gpu_addr;
|
u64 wb_gpu_addr;
|
||||||
u32 *buf;
|
u32 *buf;
|
||||||
struct bonaire_mqd *mqd;
|
struct bonaire_mqd *mqd;
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
gfx_v7_0_cp_compute_enable(adev, true);
|
|
||||||
|
|
||||||
/* fix up chicken bits */
|
/* fix up chicken bits */
|
||||||
tmp = RREG32(mmCP_CPF_DEBUG);
|
tmp = RREG32(mmCP_CPF_DEBUG);
|
||||||
@ -2791,7 +2790,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* init the queues. Just two for now. */
|
/* init the queues. Just two for now. */
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
|
ring = &adev->gfx.compute_ring[i];
|
||||||
|
|
||||||
if (ring->mqd_obj == NULL) {
|
if (ring->mqd_obj == NULL) {
|
||||||
r = amdgpu_bo_create(adev,
|
r = amdgpu_bo_create(adev,
|
||||||
@ -2970,6 +2969,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||||
|
|
||||||
ring->ready = true;
|
ring->ready = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
gfx_v7_0_cp_compute_enable(adev, true);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
|
ring = &adev->gfx.compute_ring[i];
|
||||||
|
|
||||||
r = amdgpu_ring_test_ring(ring);
|
r = amdgpu_ring_test_ring(ring);
|
||||||
if (r)
|
if (r)
|
||||||
ring->ready = false;
|
ring->ready = false;
|
||||||
|
@ -171,10 +171,34 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
|
|||||||
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
|
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int imx_drm_atomic_check(struct drm_device *dev,
|
||||||
|
struct drm_atomic_state *state)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = drm_atomic_helper_check_modeset(dev, state);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = drm_atomic_helper_check_planes(dev, state);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check modeset again in case crtc_state->mode_changed is
|
||||||
|
* updated in plane's ->atomic_check callback.
|
||||||
|
*/
|
||||||
|
ret = drm_atomic_helper_check_modeset(dev, state);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
|
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
|
||||||
.fb_create = drm_fb_cma_create,
|
.fb_create = drm_fb_cma_create,
|
||||||
.output_poll_changed = imx_drm_output_poll_changed,
|
.output_poll_changed = imx_drm_output_poll_changed,
|
||||||
.atomic_check = drm_atomic_helper_check,
|
.atomic_check = imx_drm_atomic_check,
|
||||||
.atomic_commit = drm_atomic_helper_commit,
|
.atomic_commit = drm_atomic_helper_commit,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -76,6 +76,8 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
|
|||||||
crtc->state->event = NULL;
|
crtc->state->event = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&crtc->dev->event_lock);
|
spin_unlock_irq(&crtc->dev->event_lock);
|
||||||
|
|
||||||
|
drm_crtc_vblank_off(crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
|
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
|
||||||
@ -175,6 +177,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
|
|||||||
static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
|
static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *old_crtc_state)
|
struct drm_crtc_state *old_crtc_state)
|
||||||
{
|
{
|
||||||
|
drm_crtc_vblank_on(crtc);
|
||||||
|
|
||||||
spin_lock_irq(&crtc->dev->event_lock);
|
spin_lock_irq(&crtc->dev->event_lock);
|
||||||
if (crtc->state->event) {
|
if (crtc->state->event) {
|
||||||
WARN_ON(drm_crtc_vblank_get(crtc));
|
WARN_ON(drm_crtc_vblank_get(crtc));
|
||||||
|
@ -319,13 +319,14 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* since we cannot touch active IDMAC channels, we do not support
|
* We support resizing active plane or changing its format by
|
||||||
* resizing the enabled plane or changing its format
|
* forcing CRTC mode change and disabling-enabling plane in plane's
|
||||||
|
* ->atomic_update callback.
|
||||||
*/
|
*/
|
||||||
if (old_fb && (state->src_w != old_state->src_w ||
|
if (old_fb && (state->src_w != old_state->src_w ||
|
||||||
state->src_h != old_state->src_h ||
|
state->src_h != old_state->src_h ||
|
||||||
fb->pixel_format != old_fb->pixel_format))
|
fb->pixel_format != old_fb->pixel_format))
|
||||||
return -EINVAL;
|
crtc_state->mode_changed = true;
|
||||||
|
|
||||||
eba = drm_plane_state_to_eba(state);
|
eba = drm_plane_state_to_eba(state);
|
||||||
|
|
||||||
@ -336,7 +337,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (old_fb && fb->pitches[0] != old_fb->pitches[0])
|
if (old_fb && fb->pitches[0] != old_fb->pitches[0])
|
||||||
return -EINVAL;
|
crtc_state->mode_changed = true;
|
||||||
|
|
||||||
switch (fb->pixel_format) {
|
switch (fb->pixel_format) {
|
||||||
case DRM_FORMAT_YUV420:
|
case DRM_FORMAT_YUV420:
|
||||||
@ -372,7 +373,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
|
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
|
||||||
return -EINVAL;
|
crtc_state->mode_changed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -392,8 +393,14 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
|||||||
enum ipu_color_space ics;
|
enum ipu_color_space ics;
|
||||||
|
|
||||||
if (old_state->fb) {
|
if (old_state->fb) {
|
||||||
ipu_plane_atomic_set_base(ipu_plane, old_state);
|
struct drm_crtc_state *crtc_state = state->crtc->state;
|
||||||
return;
|
|
||||||
|
if (!crtc_state->mode_changed) {
|
||||||
|
ipu_plane_atomic_set_base(ipu_plane, old_state);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ipu_disable_plane(plane);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (ipu_plane->dp_flow) {
|
switch (ipu_plane->dp_flow) {
|
||||||
|
@ -157,6 +157,12 @@ struct msm_drm_private {
|
|||||||
struct shrinker shrinker;
|
struct shrinker shrinker;
|
||||||
|
|
||||||
struct msm_vblank_ctrl vblank_ctrl;
|
struct msm_vblank_ctrl vblank_ctrl;
|
||||||
|
|
||||||
|
/* task holding struct_mutex.. currently only used in submit path
|
||||||
|
* to detect and reject faults from copy_from_user() for submit
|
||||||
|
* ioctl.
|
||||||
|
*/
|
||||||
|
struct task_struct *struct_mutex_task;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msm_format {
|
struct msm_format {
|
||||||
|
@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
{
|
{
|
||||||
struct drm_gem_object *obj = vma->vm_private_data;
|
struct drm_gem_object *obj = vma->vm_private_data;
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* This should only happen if userspace tries to pass a mmap'd
|
||||||
|
* but unfaulted gem bo vaddr into submit ioctl, triggering
|
||||||
|
* a page fault while struct_mutex is already held. This is
|
||||||
|
* not a valid use-case so just bail.
|
||||||
|
*/
|
||||||
|
if (priv->struct_mutex_task == current)
|
||||||
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
/* Make sure we don't parallel update on a fault, nor move or remove
|
/* Make sure we don't parallel update on a fault, nor move or remove
|
||||||
* something from beneath our feet
|
* something from beneath our feet
|
||||||
*/
|
*/
|
||||||
|
@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
|
|||||||
kfree(submit);
|
kfree(submit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
||||||
|
{
|
||||||
|
if (access_ok(VERIFY_READ, from, n))
|
||||||
|
return __copy_from_user_inatomic(to, from, n);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
static int submit_lookup_objects(struct msm_gem_submit *submit,
|
static int submit_lookup_objects(struct msm_gem_submit *submit,
|
||||||
struct drm_msm_gem_submit *args, struct drm_file *file)
|
struct drm_msm_gem_submit *args, struct drm_file *file)
|
||||||
{
|
{
|
||||||
@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
spin_lock(&file->table_lock);
|
spin_lock(&file->table_lock);
|
||||||
|
pagefault_disable();
|
||||||
|
|
||||||
for (i = 0; i < args->nr_bos; i++) {
|
for (i = 0; i < args->nr_bos; i++) {
|
||||||
struct drm_msm_gem_submit_bo submit_bo;
|
struct drm_msm_gem_submit_bo submit_bo;
|
||||||
@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|||||||
*/
|
*/
|
||||||
submit->bos[i].flags = 0;
|
submit->bos[i].flags = 0;
|
||||||
|
|
||||||
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
|
||||||
if (ret) {
|
if (unlikely(ret)) {
|
||||||
ret = -EFAULT;
|
pagefault_enable();
|
||||||
goto out_unlock;
|
spin_unlock(&file->table_lock);
|
||||||
|
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
spin_lock(&file->table_lock);
|
||||||
|
pagefault_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
|
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
|
||||||
@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
submit->nr_bos = i;
|
pagefault_enable();
|
||||||
spin_unlock(&file->table_lock);
|
spin_unlock(&file->table_lock);
|
||||||
|
|
||||||
|
out:
|
||||||
|
submit->nr_bos = i;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
priv->struct_mutex_task = current;
|
||||||
|
|
||||||
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
|
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
|
||||||
if (!submit) {
|
if (!submit) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@ -468,6 +487,7 @@ out:
|
|||||||
if (ret)
|
if (ret)
|
||||||
msm_gem_submit_free(submit);
|
msm_gem_submit_free(submit);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
priv->struct_mutex_task = NULL;
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
|
|||||||
if (!parent_pdev)
|
if (!parent_pdev)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (!parent_pdev->bridge_d3) {
|
||||||
|
/*
|
||||||
|
* Parent PCI bridge is currently not power managed.
|
||||||
|
* Since userspace can change these afterwards to be on
|
||||||
|
* the safe side we stick with _DSM and prevent usage of
|
||||||
|
* _PR3 from the bridge.
|
||||||
|
*/
|
||||||
|
pci_d3cold_disable(pdev);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
parent_adev = ACPI_COMPANION(&parent_pdev->dev);
|
parent_adev = ACPI_COMPANION(&parent_pdev->dev);
|
||||||
if (!parent_adev)
|
if (!parent_adev)
|
||||||
return false;
|
return false;
|
||||||
|
@ -57,21 +57,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
|
|||||||
switch (args->param) {
|
switch (args->param) {
|
||||||
case DRM_VC4_PARAM_V3D_IDENT0:
|
case DRM_VC4_PARAM_V3D_IDENT0:
|
||||||
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
|
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
args->value = V3D_READ(V3D_IDENT0);
|
args->value = V3D_READ(V3D_IDENT0);
|
||||||
pm_runtime_put(&vc4->v3d->pdev->dev);
|
pm_runtime_put(&vc4->v3d->pdev->dev);
|
||||||
break;
|
break;
|
||||||
case DRM_VC4_PARAM_V3D_IDENT1:
|
case DRM_VC4_PARAM_V3D_IDENT1:
|
||||||
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
|
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
args->value = V3D_READ(V3D_IDENT1);
|
args->value = V3D_READ(V3D_IDENT1);
|
||||||
pm_runtime_put(&vc4->v3d->pdev->dev);
|
pm_runtime_put(&vc4->v3d->pdev->dev);
|
||||||
break;
|
break;
|
||||||
case DRM_VC4_PARAM_V3D_IDENT2:
|
case DRM_VC4_PARAM_V3D_IDENT2:
|
||||||
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
|
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
args->value = V3D_READ(V3D_IDENT2);
|
args->value = V3D_READ(V3D_IDENT2);
|
||||||
pm_runtime_put(&vc4->v3d->pdev->dev);
|
pm_runtime_put(&vc4->v3d->pdev->dev);
|
||||||
|
@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4)
|
|||||||
struct vc4_exec_info, head);
|
struct vc4_exec_info, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct vc4_exec_info *
|
||||||
|
vc4_last_render_job(struct vc4_dev *vc4)
|
||||||
|
{
|
||||||
|
if (list_empty(&vc4->render_job_list))
|
||||||
|
return NULL;
|
||||||
|
return list_last_entry(&vc4->render_job_list,
|
||||||
|
struct vc4_exec_info, head);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct vc4_texture_sample_info - saves the offsets into the UBO for texture
|
* struct vc4_texture_sample_info - saves the offsets into the UBO for texture
|
||||||
* setup parameters.
|
* setup parameters.
|
||||||
|
@ -534,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
|
exec->bo = drm_calloc_large(exec->bo_count,
|
||||||
GFP_KERNEL);
|
sizeof(struct drm_gem_cma_object *));
|
||||||
if (!exec->bo) {
|
if (!exec->bo) {
|
||||||
DRM_ERROR("Failed to allocate validated BO pointers\n");
|
DRM_ERROR("Failed to allocate validated BO pointers\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -572,8 +572,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
|
|||||||
spin_unlock(&file_priv->table_lock);
|
spin_unlock(&file_priv->table_lock);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kfree(handles);
|
drm_free_large(handles);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -608,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
* read the contents back for validation, and I think the
|
* read the contents back for validation, and I think the
|
||||||
* bo->vaddr is uncached access.
|
* bo->vaddr is uncached access.
|
||||||
*/
|
*/
|
||||||
temp = kmalloc(temp_size, GFP_KERNEL);
|
temp = drm_malloc_ab(temp_size, 1);
|
||||||
if (!temp) {
|
if (!temp) {
|
||||||
DRM_ERROR("Failed to allocate storage for copying "
|
DRM_ERROR("Failed to allocate storage for copying "
|
||||||
"in bin/render CLs.\n");
|
"in bin/render CLs.\n");
|
||||||
@ -675,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
ret = vc4_validate_shader_recs(dev, exec);
|
ret = vc4_validate_shader_recs(dev, exec);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kfree(temp);
|
drm_free_large(temp);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -688,7 +688,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
if (exec->bo) {
|
if (exec->bo) {
|
||||||
for (i = 0; i < exec->bo_count; i++)
|
for (i = 0; i < exec->bo_count; i++)
|
||||||
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
|
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
|
||||||
kfree(exec->bo);
|
drm_free_large(exec->bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!list_empty(&exec->unref_list)) {
|
while (!list_empty(&exec->unref_list)) {
|
||||||
@ -942,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev)
|
|||||||
vc4->overflow_mem = NULL;
|
vc4->overflow_mem = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
vc4_bo_cache_destroy(dev);
|
|
||||||
|
|
||||||
if (vc4->hang_state)
|
if (vc4->hang_state)
|
||||||
vc4_free_hang_state(dev, vc4->hang_state);
|
vc4_free_hang_state(dev, vc4->hang_state);
|
||||||
|
|
||||||
|
vc4_bo_cache_destroy(dev);
|
||||||
}
|
}
|
||||||
|
@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work)
|
|||||||
|
|
||||||
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
||||||
current_exec = vc4_first_bin_job(vc4);
|
current_exec = vc4_first_bin_job(vc4);
|
||||||
|
if (!current_exec)
|
||||||
|
current_exec = vc4_last_render_job(vc4);
|
||||||
if (current_exec) {
|
if (current_exec) {
|
||||||
vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
|
vc4->overflow_mem->seqno = current_exec->seqno;
|
||||||
list_add_tail(&vc4->overflow_mem->unref_head,
|
list_add_tail(&vc4->overflow_mem->unref_head,
|
||||||
¤t_exec->unref_list);
|
¤t_exec->unref_list);
|
||||||
vc4->overflow_mem = NULL;
|
vc4->overflow_mem = NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user