drm: omapdrm: Remove legacy buffer synchronization support

The omapdrm driver uses a custom API to synchronize with the SGX GPU.
This is unusable as such in the mainline kernel as the API is only
partially implemented and requires additional out-of-tree patches.
Furthermore, as no SGX driver is available in the mainline kernel, the
API can't be considered as a stable mainline API.

Now that the driver supports synchronization through fences, remove
legacy buffer synchronization support. The two userspace ioctls are
turned into no-ops to avoid breaking userspace and will be removed in
the future.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
This commit is contained in:
Laurent Pinchart 2017-05-09 01:27:11 +03:00 committed by Tomi Valkeinen
parent a9e6f9f7d6
commit d6f544f6bf
4 changed files with 6 additions and 270 deletions

View File

@ -365,53 +365,6 @@ static int ioctl_gem_new(struct drm_device *dev, void *data,
&args->handle);
}
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_cpu_prep *args = data;
struct drm_gem_object *obj;
int ret;
VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
ret = omap_gem_op_sync(obj, args->op);
if (!ret)
ret = omap_gem_op_start(obj, args->op);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_cpu_fini *args = data;
struct drm_gem_object *obj;
int ret;
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
/* XXX flushy, flushy */
ret = 0;
if (!ret)
ret = omap_gem_op_finish(obj, args->op);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@ -440,9 +393,11 @@ static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] =
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep,
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini,
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
DRM_AUTH | DRM_RENDER_ALLOW),

View File

@ -184,11 +184,6 @@ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int omap_gem_fault(struct vm_fault *vmf);
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
void (*fxn)(void *arg), void *arg);
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync(struct drm_gem_object *obj,

View File

@ -101,19 +101,6 @@ struct omap_gem_object {
* Virtual address, if mapped.
*/
void *vaddr;
/**
* sync-object allocated on demand (if needed)
*
* Per-buffer sync-object for tracking pending and completed hw/dma
* read and write operations.
*/
struct {
uint32_t write_pending;
uint32_t write_complete;
uint32_t read_pending;
uint32_t read_complete;
} *sync;
};
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
@ -1070,205 +1057,6 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
/* -----------------------------------------------------------------------------
* Buffer Synchronization
*/
static DEFINE_SPINLOCK(sync_lock);
struct omap_gem_sync_waiter {
struct list_head list;
struct omap_gem_object *omap_obj;
enum omap_gem_op op;
uint32_t read_target, write_target;
/* notify called w/ sync_lock held */
void (*notify)(void *arg);
void *arg;
};
/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
* the read and/or write target count is achieved which can call a user
* callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
* cpu access), etc.
*/
static LIST_HEAD(waiters);
static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
{
struct omap_gem_object *omap_obj = waiter->omap_obj;
if ((waiter->op & OMAP_GEM_READ) &&
(omap_obj->sync->write_complete < waiter->write_target))
return true;
if ((waiter->op & OMAP_GEM_WRITE) &&
(omap_obj->sync->read_complete < waiter->read_target))
return true;
return false;
}
/* macro for sync debug.. */
#define SYNCDBG 0
#define SYNC(fmt, ...) do { if (SYNCDBG) \
pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \
} while (0)
static void sync_op_update(void)
{
struct omap_gem_sync_waiter *waiter, *n;
list_for_each_entry_safe(waiter, n, &waiters, list) {
if (!is_waiting(waiter)) {
list_del(&waiter->list);
SYNC("notify: %p", waiter);
waiter->notify(waiter->arg);
kfree(waiter);
}
}
}
static inline int sync_op(struct drm_gem_object *obj,
enum omap_gem_op op, bool start)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
spin_lock(&sync_lock);
if (!omap_obj->sync) {
omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
if (!omap_obj->sync) {
ret = -ENOMEM;
goto unlock;
}
}
if (start) {
if (op & OMAP_GEM_READ)
omap_obj->sync->read_pending++;
if (op & OMAP_GEM_WRITE)
omap_obj->sync->write_pending++;
} else {
if (op & OMAP_GEM_READ)
omap_obj->sync->read_complete++;
if (op & OMAP_GEM_WRITE)
omap_obj->sync->write_complete++;
sync_op_update();
}
unlock:
spin_unlock(&sync_lock);
return ret;
}
/* mark the start of read and/or write operation */
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
{
return sync_op(obj, op, true);
}
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
{
return sync_op(obj, op, false);
}
static DECLARE_WAIT_QUEUE_HEAD(sync_event);
static void sync_notify(void *arg)
{
struct task_struct **waiter_task = arg;
*waiter_task = NULL;
wake_up_all(&sync_event);
}
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (omap_obj->sync) {
struct task_struct *waiter_task = current;
struct omap_gem_sync_waiter *waiter =
kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter)
return -ENOMEM;
waiter->omap_obj = omap_obj;
waiter->op = op;
waiter->read_target = omap_obj->sync->read_pending;
waiter->write_target = omap_obj->sync->write_pending;
waiter->notify = sync_notify;
waiter->arg = &waiter_task;
spin_lock(&sync_lock);
if (is_waiting(waiter)) {
SYNC("waited: %p", waiter);
list_add_tail(&waiter->list, &waiters);
spin_unlock(&sync_lock);
ret = wait_event_interruptible(sync_event,
(waiter_task == NULL));
spin_lock(&sync_lock);
if (waiter_task) {
SYNC("interrupted: %p", waiter);
/* we were interrupted */
list_del(&waiter->list);
waiter_task = NULL;
} else {
/* freed in sync_op_update() */
waiter = NULL;
}
}
spin_unlock(&sync_lock);
kfree(waiter);
}
return ret;
}
/* call fxn(arg), either synchronously or asynchronously if the op
* is currently blocked.. fxn() can be called from any context
*
* (TODO for now fxn is called back from whichever context calls
* omap_gem_op_finish().. but this could be better defined later
* if needed)
*
* TODO more code in common w/ _sync()..
*/
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
void (*fxn)(void *arg), void *arg)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (omap_obj->sync) {
struct omap_gem_sync_waiter *waiter =
kzalloc(sizeof(*waiter), GFP_ATOMIC);
if (!waiter)
return -ENOMEM;
waiter->omap_obj = omap_obj;
waiter->op = op;
waiter->read_target = omap_obj->sync->read_pending;
waiter->write_target = omap_obj->sync->write_pending;
waiter->notify = fxn;
waiter->arg = arg;
spin_lock(&sync_lock);
if (is_waiting(waiter)) {
SYNC("waited: %p", waiter);
list_add_tail(&waiter->list, &waiters);
spin_unlock(&sync_lock);
return 0;
}
spin_unlock(&sync_lock);
kfree(waiter);
}
/* no waiting.. */
fxn(arg);
return 0;
}
/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
@ -1308,8 +1096,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
kfree(omap_obj->sync);
drm_gem_object_release(obj);
kfree(omap_obj);

View File

@ -106,8 +106,8 @@ struct drm_omap_gem_info {
#define DRM_OMAP_GET_PARAM 0x00
#define DRM_OMAP_SET_PARAM 0x01
#define DRM_OMAP_GEM_NEW 0x03
#define DRM_OMAP_GEM_CPU_PREP 0x04
#define DRM_OMAP_GEM_CPU_FINI 0x05
#define DRM_OMAP_GEM_CPU_PREP 0x04 /* Deprecated, to be removed */
#define DRM_OMAP_GEM_CPU_FINI 0x05 /* Deprecated, to be removed */
#define DRM_OMAP_GEM_INFO 0x06
#define DRM_OMAP_NUM_IOCTLS 0x07