2019-05-29 07:12:36 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-12-15 07:14:24 +10:00
|
|
|
/*
|
|
|
|
|
* Copyright (C) 2012 Red Hat
|
|
|
|
|
*
|
|
|
|
|
* based in parts on udlfb.c:
|
|
|
|
|
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
|
|
|
|
|
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
|
|
|
|
|
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
|
|
|
|
|
*/
|
2019-07-16 08:42:09 +02:00
|
|
|
|
|
|
|
|
#include <linux/moduleparam.h>
|
2012-07-30 14:06:55 +10:00
|
|
|
#include <linux/dma-buf.h>
|
2010-12-15 07:14:24 +10:00
|
|
|
|
2012-10-02 18:01:07 +01:00
|
|
|
#include <drm/drm_crtc_helper.h>
|
2019-07-16 08:42:09 +02:00
|
|
|
#include <drm/drm_drv.h>
|
|
|
|
|
#include <drm/drm_fourcc.h>
|
2019-11-07 10:43:06 +01:00
|
|
|
#include <drm/drm_gem_shmem_helper.h>
|
2019-07-16 08:42:09 +02:00
|
|
|
#include <drm/drm_modeset_helper.h>
|
|
|
|
|
|
|
|
|
|
#include "udl_drv.h"
|
2010-12-15 07:14:24 +10:00
|
|
|
|
|
|
|
|
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
|
2017-04-11 20:08:34 +02:00
|
|
|
#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
|
2010-12-15 07:14:24 +10:00
|
|
|
|
|
|
|
|
/** Read the red component (0..255) of a 32 bpp colour. */
|
|
|
|
|
#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
|
|
|
|
|
|
|
|
|
|
/** Read the green component (0..255) of a 32 bpp colour. */
|
|
|
|
|
#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
|
|
|
|
|
|
|
|
|
|
/** Read the blue component (0..255) of a 32 bpp colour. */
|
|
|
|
|
#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
|
|
|
|
|
|
|
|
|
|
/** Return red/green component of a 16 bpp colour number. */
|
|
|
|
|
#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
|
|
|
|
|
|
|
|
|
|
/** Return green/blue component of a 16 bpp colour number. */
|
|
|
|
|
#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
|
|
|
|
|
|
|
|
|
|
/** Return 8 bpp colour number from red, green and blue components. */
|
|
|
|
|
#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
|
|
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
|
static uint8_t rgb8(uint32_t col)
|
|
|
|
|
{
|
|
|
|
|
uint8_t red = DLO_RGB_GETRED(col);
|
|
|
|
|
uint8_t grn = DLO_RGB_GETGRN(col);
|
|
|
|
|
uint8_t blu = DLO_RGB_GETBLU(col);
|
|
|
|
|
|
|
|
|
|
return DLO_RGB8(red, grn, blu);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static uint16_t rgb16(uint32_t col)
|
|
|
|
|
{
|
|
|
|
|
uint8_t red = DLO_RGB_GETRED(col);
|
|
|
|
|
uint8_t grn = DLO_RGB_GETGRN(col);
|
|
|
|
|
uint8_t blu = DLO_RGB_GETBLU(col);
|
|
|
|
|
|
|
|
|
|
return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
|
|
|
|
int width, int height)
|
|
|
|
|
{
|
|
|
|
|
struct drm_device *dev = fb->base.dev;
|
2019-04-05 13:17:14 +10:00
|
|
|
struct udl_device *udl = to_udl(dev);
|
2010-12-15 07:14:24 +10:00
|
|
|
int i, ret;
|
|
|
|
|
char *cmd;
|
|
|
|
|
cycles_t start_cycles, end_cycles;
|
|
|
|
|
int bytes_sent = 0;
|
|
|
|
|
int bytes_identical = 0;
|
|
|
|
|
struct urb *urb;
|
|
|
|
|
int aligned_x;
|
2018-06-03 16:41:00 +02:00
|
|
|
int log_bpp;
|
|
|
|
|
|
|
|
|
|
BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
|
|
|
|
|
log_bpp = __ffs(fb->base.format->cpp[0]);
|
2010-12-15 07:14:24 +10:00
|
|
|
|
|
|
|
|
if (!fb->active_16)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2019-11-07 10:43:06 +01:00
|
|
|
if (!fb->shmem->vaddr) {
|
|
|
|
|
void *vaddr;
|
|
|
|
|
|
|
|
|
|
vaddr = drm_gem_shmem_vmap(&fb->shmem->base);
|
|
|
|
|
if (IS_ERR(vaddr)) {
|
2012-03-26 14:36:56 +01:00
|
|
|
DRM_ERROR("failed to vmap fb\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-12-15 07:14:24 +10:00
|
|
|
|
|
|
|
|
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
|
|
|
|
|
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
|
|
|
|
|
x = aligned_x;
|
|
|
|
|
|
|
|
|
|
if ((width <= 0) ||
|
|
|
|
|
(x + width > fb->base.width) ||
|
|
|
|
|
(y + height > fb->base.height))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2013-02-07 11:19:15 +10:00
|
|
|
start_cycles = get_cycles();
|
|
|
|
|
|
2010-12-15 07:14:24 +10:00
|
|
|
urb = udl_get_urb(dev);
|
|
|
|
|
if (!urb)
|
|
|
|
|
return 0;
|
|
|
|
|
cmd = urb->transfer_buffer;
|
|
|
|
|
|
2016-09-23 12:36:02 +02:00
|
|
|
for (i = y; i < y + height ; i++) {
|
2010-12-15 07:14:24 +10:00
|
|
|
const int line_offset = fb->base.pitches[0] * i;
|
2018-06-03 16:41:00 +02:00
|
|
|
const int byte_offset = line_offset + (x << log_bpp);
|
|
|
|
|
const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
|
|
|
|
|
if (udl_render_hline(dev, log_bpp, &urb,
|
2019-11-07 10:43:06 +01:00
|
|
|
(char *) fb->shmem->vaddr,
|
2012-11-01 13:47:09 +10:00
|
|
|
&cmd, byte_offset, dev_byte_offset,
|
2018-06-03 16:41:00 +02:00
|
|
|
width << log_bpp,
|
2010-12-15 07:14:24 +10:00
|
|
|
&bytes_identical, &bytes_sent))
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cmd > (char *) urb->transfer_buffer) {
|
|
|
|
|
/* Send partial buffer remaining before exiting */
|
2018-06-03 16:40:54 +02:00
|
|
|
int len;
|
|
|
|
|
if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
|
|
|
|
|
*cmd++ = 0xAF;
|
|
|
|
|
len = cmd - (char *) urb->transfer_buffer;
|
2010-12-15 07:14:24 +10:00
|
|
|
ret = udl_submit_urb(dev, urb, len);
|
|
|
|
|
bytes_sent += len;
|
|
|
|
|
} else
|
|
|
|
|
udl_urb_completion(urb);
|
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
atomic_add(bytes_sent, &udl->bytes_sent);
|
|
|
|
|
atomic_add(bytes_identical, &udl->bytes_identical);
|
2018-06-03 16:41:00 +02:00
|
|
|
atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
|
2010-12-15 07:14:24 +10:00
|
|
|
end_cycles = get_cycles();
|
|
|
|
|
atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
|
|
|
|
>> 10)), /* Kcycles */
|
|
|
|
|
&udl->cpu_kcycles_used);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
|
|
|
|
|
struct drm_file *file,
|
|
|
|
|
unsigned flags, unsigned color,
|
|
|
|
|
struct drm_clip_rect *clips,
|
|
|
|
|
unsigned num_clips)
|
|
|
|
|
{
|
|
|
|
|
struct udl_framebuffer *ufb = to_udl_fb(fb);
|
2019-11-07 10:43:06 +01:00
|
|
|
struct dma_buf_attachment *import_attach;
|
2010-12-15 07:14:24 +10:00
|
|
|
int i;
|
2012-07-30 14:06:55 +10:00
|
|
|
int ret = 0;
|
2010-12-15 07:14:24 +10:00
|
|
|
|
2013-12-04 14:13:58 +02:00
|
|
|
drm_modeset_lock_all(fb->dev);
|
|
|
|
|
|
2010-12-15 07:14:24 +10:00
|
|
|
if (!ufb->active_16)
|
2013-12-04 14:13:58 +02:00
|
|
|
goto unlock;
|
2010-12-15 07:14:24 +10:00
|
|
|
|
2019-11-07 10:43:06 +01:00
|
|
|
import_attach = ufb->shmem->base.import_attach;
|
|
|
|
|
|
|
|
|
|
if (import_attach) {
|
|
|
|
|
ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
|
2012-07-30 14:06:55 +10:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
if (ret)
|
2013-12-04 14:13:58 +02:00
|
|
|
goto unlock;
|
2012-07-30 14:06:55 +10:00
|
|
|
}
|
|
|
|
|
|
2010-12-15 07:14:24 +10:00
|
|
|
for (i = 0; i < num_clips; i++) {
|
2012-07-30 14:06:55 +10:00
|
|
|
ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
|
2010-12-15 07:14:24 +10:00
|
|
|
clips[i].x2 - clips[i].x1,
|
|
|
|
|
clips[i].y2 - clips[i].y1);
|
2012-07-30 14:06:55 +10:00
|
|
|
if (ret)
|
2014-01-20 19:52:29 +01:00
|
|
|
break;
|
2010-12-15 07:14:24 +10:00
|
|
|
}
|
2012-07-30 14:06:55 +10:00
|
|
|
|
2019-11-07 10:43:06 +01:00
|
|
|
if (import_attach)
|
|
|
|
|
ret = dma_buf_end_cpu_access(import_attach->dmabuf,
|
dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
Drivers, especially i915.ko, can fail during the initial migration of a
dma-buf for CPU access. However, the error code from the driver was not
being propagated back to ioctl and so userspace was blissfully ignorant
of the failure. Rendering corruption ensues.
Whilst fixing the ioctl to return the error code from
dma_buf_start_cpu_access(), also do the same for
dma_buf_end_cpu_access(). For most drivers, dma_buf_end_cpu_access()
cannot fail. i915.ko however, as most drivers would, wants to avoid being
uninterruptible (as would be required to guarrantee no failure when
flushing the buffer to the device). As userspace already has to handle
errors from the SYNC_IOCTL, take advantage of this to be able to restart
the syscall across signals.
This fixes a coherency issue for i915.ko as well as reducing the
uninterruptible hold upon its BKL, the struct_mutex.
Fixes commit c11e391da2a8fe973c3c2398452000bed505851e
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu Feb 11 20:04:51 2016 -0200
dma-buf: Add ioctls to allow userspace to flush
Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible
Testcase: igt/prime_mmap_coherency/ioctl-errors
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tiago Vignatti <tiago.vignatti@intel.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
CC: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: intel-gfx@lists.freedesktop.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk
2016-03-18 20:02:39 +00:00
|
|
|
DMA_FROM_DEVICE);
|
2013-12-04 14:13:58 +02:00
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
|
drm_modeset_unlock_all(fb->dev);
|
|
|
|
|
|
2012-07-30 14:06:55 +10:00
|
|
|
return ret;
|
2010-12-15 07:14:24 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|
|
|
|
{
|
|
|
|
|
struct udl_framebuffer *ufb = to_udl_fb(fb);
|
|
|
|
|
|
2019-11-07 10:43:06 +01:00
|
|
|
if (ufb->shmem)
|
|
|
|
|
drm_gem_object_put_unlocked(&ufb->shmem->base);
|
2010-12-15 07:14:24 +10:00
|
|
|
|
|
|
|
|
drm_framebuffer_cleanup(fb);
|
|
|
|
|
kfree(ufb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct drm_framebuffer_funcs udlfb_funcs = {
|
|
|
|
|
.destroy = udl_user_framebuffer_destroy,
|
|
|
|
|
.dirty = udl_user_framebuffer_dirty,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
udl_framebuffer_init(struct drm_device *dev,
|
|
|
|
|
struct udl_framebuffer *ufb,
|
2015-11-11 19:11:29 +02:00
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd,
|
2019-11-07 10:43:06 +01:00
|
|
|
struct drm_gem_shmem_object *shmem)
|
2010-12-15 07:14:24 +10:00
|
|
|
{
|
|
|
|
|
int ret;
|
|
|
|
|
|
2019-11-07 10:43:06 +01:00
|
|
|
ufb->shmem = shmem;
|
drm: Pass 'dev' to drm_helper_mode_fill_fb_struct()
Pass the drm_device to drm_helper_mode_fill_fb_struct() so that we can
populate fb->dev early. Will make it easier to use the fb before we
register it.
@@
identifier fb, mode_cmd;
@@
void drm_helper_mode_fill_fb_struct(
+ struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd
);
@@
identifier fb, mode_cmd;
@@
void drm_helper_mode_fill_fb_struct(
+ struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd
)
{ ... }
@@
function func;
identifier dev;
expression E1, E2;
@@
func(struct drm_device *dev, ...)
{
...
drm_helper_mode_fill_fb_struct(
+ dev,
E1, E2);
...
}
@@
expression E1, E2;
@@
drm_helper_mode_fill_fb_struct(
+ dev,
E1, E2);
v2: Rerun spatch due to code changes
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1481748539-18283-1-git-send-email-ville.syrjala@linux.intel.com
2016-12-14 22:48:59 +02:00
|
|
|
drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
|
2012-12-13 23:38:38 +01:00
|
|
|
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
|
2010-12-15 07:14:24 +10:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct drm_framebuffer *
|
|
|
|
|
udl_fb_user_fb_create(struct drm_device *dev,
|
|
|
|
|
struct drm_file *file,
|
2015-11-11 19:11:29 +02:00
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd)
|
2010-12-15 07:14:24 +10:00
|
|
|
{
|
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
|
struct udl_framebuffer *ufb;
|
|
|
|
|
int ret;
|
2011-12-21 11:23:44 +00:00
|
|
|
uint32_t size;
|
2010-12-15 07:14:24 +10:00
|
|
|
|
2016-05-09 11:04:54 +01:00
|
|
|
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
|
2010-12-15 07:14:24 +10:00
|
|
|
if (obj == NULL)
|
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
2011-12-21 11:23:44 +00:00
|
|
|
size = mode_cmd->pitches[0] * mode_cmd->height;
|
|
|
|
|
size = ALIGN(size, PAGE_SIZE);
|
|
|
|
|
|
|
|
|
|
if (size > obj->size) {
|
|
|
|
|
DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
}
|
|
|
|
|
|
2010-12-15 07:14:24 +10:00
|
|
|
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
|
|
|
|
|
if (ufb == NULL)
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
2019-11-07 10:43:06 +01:00
|
|
|
ret = udl_framebuffer_init(dev, ufb, mode_cmd,
|
|
|
|
|
to_drm_gem_shmem_obj(obj));
|
2010-12-15 07:14:24 +10:00
|
|
|
if (ret) {
|
|
|
|
|
kfree(ufb);
|
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
}
|
|
|
|
|
return &ufb->base;
|
|
|
|
|
}
|