drm/i915: Move frontbuffer CS write tracking from ggtt vma to object

I tried to avoid having to track the write for every VMA by only
tracking writes to the ggtt. However, for the purposes of frontbuffer
tracking this is insufficient as we need to invalidate around writes not
just to the the ggtt but all aliased ppgtt views of the framebuffer. By
moving the critical section to the object and only doing so for
framebuffer writes we can reduce the tracking even further by only
watching framebuffers and not vma.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161116190704.5293-1-chris@chris-wilson.co.uk
Tested-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Chris Wilson
2016-11-16 19:07:04 +00:00
parent d806e6828b
commit 5b8c8aec8e
7 changed files with 19 additions and 20 deletions

View File

@@ -3886,6 +3886,16 @@ out:
return err;
}
static void
frontbuffer_retire(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
@@ -3903,6 +3913,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);

View File

@@ -1276,9 +1276,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&vma->vm_link, &vma->vm->active_list);
if (flags & EXEC_OBJECT_WRITE) {
i915_gem_active_set(&vma->last_write, req);
intel_fb_obj_invalidate(obj, ORIGIN_CS);
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
i915_gem_active_set(&obj->frontbuffer_write, req);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;

View File

@@ -103,6 +103,7 @@ struct drm_i915_gem_object {
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
struct i915_gem_active frontbuffer_write;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;

View File

@@ -886,8 +886,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
err->wseqno = __active_get_seqno(&vma->last_write);
err->engine = __active_get_engine_id(&vma->last_write);
err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
err->engine = __active_get_engine_id(&obj->frontbuffer_write);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;

View File

@@ -68,16 +68,6 @@ i915_vma_retire(struct i915_gem_active *active,
}
}
static void
i915_ggtt_retire__write(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
struct i915_vma *vma =
container_of(active, struct i915_vma, last_write);
intel_fb_obj_flush(vma->obj, true, ORIGIN_CS);
}
static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -96,8 +86,6 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_write,
i915_is_ggtt(vm) ? i915_ggtt_retire__write : NULL);
init_request_active(&vma->last_fence, NULL);
list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;

View File

@@ -80,7 +80,6 @@ struct i915_vma {
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_write;
struct i915_gem_active last_fence;
/**

View File

@@ -53,16 +53,17 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
if (!frontbuffer_bits)
return;
return false;
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
return true;
}
/**