2019-03-08 13:25:19 +00:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_gem_context.h"
|
|
|
|
#include "i915_globals.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
#include "intel_context.h"
|
2019-04-24 17:48:39 +00:00
|
|
|
#include "intel_engine.h"
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 20:07:17 +00:00
|
|
|
#include "intel_engine_pm.h"
|
2019-03-08 13:25:19 +00:00
|
|
|
|
|
|
|
static struct i915_global_context {
|
|
|
|
struct i915_global base;
|
|
|
|
struct kmem_cache *slab_ce;
|
|
|
|
} global;
|
|
|
|
|
|
|
|
struct intel_context *intel_context_alloc(void)
|
|
|
|
{
|
|
|
|
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void intel_context_free(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
kmem_cache_free(global.slab_ce, ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct intel_context *
|
|
|
|
intel_context_lookup(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct intel_context *ce = NULL;
|
|
|
|
struct rb_node *p;
|
|
|
|
|
|
|
|
spin_lock(&ctx->hw_contexts_lock);
|
|
|
|
p = ctx->hw_contexts.rb_node;
|
|
|
|
while (p) {
|
|
|
|
struct intel_context *this =
|
|
|
|
rb_entry(p, struct intel_context, node);
|
|
|
|
|
|
|
|
if (this->engine == engine) {
|
|
|
|
GEM_BUG_ON(this->gem_context != ctx);
|
|
|
|
ce = this;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (this->engine < engine)
|
|
|
|
p = p->rb_right;
|
|
|
|
else
|
|
|
|
p = p->rb_left;
|
|
|
|
}
|
|
|
|
spin_unlock(&ctx->hw_contexts_lock);
|
|
|
|
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct intel_context *
|
|
|
|
__intel_context_insert(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine,
|
|
|
|
struct intel_context *ce)
|
|
|
|
{
|
|
|
|
struct rb_node **p, *parent;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
spin_lock(&ctx->hw_contexts_lock);
|
|
|
|
|
|
|
|
parent = NULL;
|
|
|
|
p = &ctx->hw_contexts.rb_node;
|
|
|
|
while (*p) {
|
|
|
|
struct intel_context *this;
|
|
|
|
|
|
|
|
parent = *p;
|
|
|
|
this = rb_entry(parent, struct intel_context, node);
|
|
|
|
|
|
|
|
if (this->engine == engine) {
|
|
|
|
err = -EEXIST;
|
|
|
|
ce = this;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (this->engine < engine)
|
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
p = &parent->rb_left;
|
|
|
|
}
|
|
|
|
if (!err) {
|
|
|
|
rb_link_node(&ce->node, parent, p);
|
|
|
|
rb_insert_color(&ce->node, &ctx->hw_contexts);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock(&ctx->hw_contexts_lock);
|
|
|
|
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __intel_context_remove(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx = ce->gem_context;
|
|
|
|
|
|
|
|
spin_lock(&ctx->hw_contexts_lock);
|
|
|
|
rb_erase(&ce->node, &ctx->hw_contexts);
|
|
|
|
spin_unlock(&ctx->hw_contexts_lock);
|
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
static struct intel_context *
|
2019-03-08 13:25:19 +00:00
|
|
|
intel_context_instance(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct intel_context *ce, *pos;
|
|
|
|
|
|
|
|
ce = intel_context_lookup(ctx, engine);
|
|
|
|
if (likely(ce))
|
|
|
|
return ce;
|
|
|
|
|
|
|
|
ce = intel_context_alloc();
|
|
|
|
if (!ce)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
intel_context_init(ce, ctx, engine);
|
|
|
|
|
|
|
|
pos = __intel_context_insert(ctx, engine, ce);
|
|
|
|
if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
|
|
|
|
intel_context_free(ce);
|
|
|
|
|
|
|
|
GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
|
|
|
|
return pos;
|
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
struct intel_context *
|
|
|
|
intel_context_pin_lock(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
__acquires(ce->pin_mutex)
|
|
|
|
{
|
|
|
|
struct intel_context *ce;
|
|
|
|
|
|
|
|
ce = intel_context_instance(ctx, engine);
|
|
|
|
if (IS_ERR(ce))
|
|
|
|
return ce;
|
|
|
|
|
|
|
|
if (mutex_lock_interruptible(&ce->pin_mutex))
|
|
|
|
return ERR_PTR(-EINTR);
|
|
|
|
|
|
|
|
return ce;
|
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:20 +00:00
|
|
|
struct intel_context *
|
|
|
|
intel_context_pin(struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
|
|
|
struct intel_context *ce;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
ce = intel_context_instance(ctx, engine);
|
|
|
|
if (IS_ERR(ce))
|
|
|
|
return ce;
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
|
|
|
return ce;
|
|
|
|
|
|
|
|
if (mutex_lock_interruptible(&ce->pin_mutex))
|
|
|
|
return ERR_PTR(-EINTR);
|
|
|
|
|
|
|
|
if (likely(!atomic_read(&ce->pin_count))) {
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 20:07:17 +00:00
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
with_intel_runtime_pm(ce->engine->i915, wakeref)
|
|
|
|
err = ce->ops->pin(ce);
|
2019-03-08 13:25:20 +00:00
|
|
|
if (err)
|
2019-03-08 13:25:22 +00:00
|
|
|
goto err;
|
2019-03-08 13:25:20 +00:00
|
|
|
|
2019-03-18 21:23:46 +00:00
|
|
|
i915_gem_context_get(ctx);
|
|
|
|
GEM_BUG_ON(ce->gem_context != ctx);
|
|
|
|
|
2019-03-08 13:25:20 +00:00
|
|
|
mutex_lock(&ctx->mutex);
|
|
|
|
list_add(&ce->active_link, &ctx->active_engines);
|
|
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
2019-03-18 21:23:47 +00:00
|
|
|
intel_context_get(ce);
|
2019-03-08 13:25:22 +00:00
|
|
|
smp_mb__before_atomic(); /* flush pin before it is visible */
|
2019-03-08 13:25:20 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
atomic_inc(&ce->pin_count);
|
|
|
|
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
|
|
|
|
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
2019-03-08 13:25:20 +00:00
|
|
|
return ce;
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
err:
|
|
|
|
mutex_unlock(&ce->pin_mutex);
|
2019-03-08 13:25:20 +00:00
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
void intel_context_unpin(struct intel_context *ce)
|
|
|
|
{
|
|
|
|
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We may be called from inside intel_context_pin() to evict another */
|
2019-03-18 21:23:47 +00:00
|
|
|
intel_context_get(ce);
|
2019-03-08 13:25:22 +00:00
|
|
|
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
|
|
|
|
|
2019-03-18 21:23:46 +00:00
|
|
|
if (likely(atomic_dec_and_test(&ce->pin_count))) {
|
2019-03-08 13:25:22 +00:00
|
|
|
ce->ops->unpin(ce);
|
|
|
|
|
2019-03-18 21:23:46 +00:00
|
|
|
mutex_lock(&ce->gem_context->mutex);
|
|
|
|
list_del(&ce->active_link);
|
|
|
|
mutex_unlock(&ce->gem_context->mutex);
|
|
|
|
|
|
|
|
i915_gem_context_put(ce->gem_context);
|
2019-03-18 21:23:47 +00:00
|
|
|
intel_context_put(ce);
|
2019-03-18 21:23:46 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
mutex_unlock(&ce->pin_mutex);
|
2019-03-18 21:23:47 +00:00
|
|
|
intel_context_put(ce);
|
2019-03-08 13:25:22 +00:00
|
|
|
}
|
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
static void intel_context_retire(struct i915_active_request *active,
|
|
|
|
struct i915_request *rq)
|
|
|
|
{
|
|
|
|
struct intel_context *ce =
|
|
|
|
container_of(active, typeof(*ce), active_tracker);
|
|
|
|
|
|
|
|
intel_context_unpin(ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
intel_context_init(struct intel_context *ce,
|
|
|
|
struct i915_gem_context *ctx,
|
|
|
|
struct intel_engine_cs *engine)
|
|
|
|
{
|
2019-03-18 21:23:47 +00:00
|
|
|
kref_init(&ce->ref);
|
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
ce->gem_context = ctx;
|
|
|
|
ce->engine = engine;
|
|
|
|
ce->ops = engine->cops;
|
2019-04-24 09:51:34 +00:00
|
|
|
ce->sseu = engine->sseu;
|
2019-03-08 13:25:19 +00:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ce->signal_link);
|
|
|
|
INIT_LIST_HEAD(&ce->signals);
|
|
|
|
|
2019-03-08 13:25:22 +00:00
|
|
|
mutex_init(&ce->pin_mutex);
|
|
|
|
|
2019-03-08 13:25:19 +00:00
|
|
|
i915_active_request_init(&ce->active_tracker,
|
|
|
|
NULL, intel_context_retire);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_global_context_shrink(void)
|
|
|
|
{
|
|
|
|
kmem_cache_shrink(global.slab_ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void i915_global_context_exit(void)
|
|
|
|
{
|
|
|
|
kmem_cache_destroy(global.slab_ce);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct i915_global_context global = { {
|
|
|
|
.shrink = i915_global_context_shrink,
|
|
|
|
.exit = i915_global_context_exit,
|
|
|
|
} };
|
|
|
|
|
|
|
|
int __init i915_global_context_init(void)
|
|
|
|
{
|
|
|
|
global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
|
|
|
|
if (!global.slab_ce)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
i915_global_register(&global.base);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-04-24 20:07:15 +00:00
|
|
|
|
|
|
|
void intel_context_enter_engine(struct intel_context *ce)
|
|
|
|
{
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 20:07:17 +00:00
|
|
|
intel_engine_pm_get(ce->engine);
|
2019-04-24 20:07:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void intel_context_exit_engine(struct intel_context *ce)
|
|
|
|
{
|
drm/i915: Invert the GEM wakeref hierarchy
In the current scheme, on submitting a request we take a single global
GEM wakeref, which trickles down to wake up all GT power domains. This
is undesirable as we would like to be able to localise our power
management to the available power domains and to remove the global GEM
operations from the heart of the driver. (The intent there is to push
global GEM decisions to the boundary as used by the GEM user interface.)
Now during request construction, each request is responsible via its
logical context to acquire a wakeref on each power domain it intends to
utilize. Currently, each request takes a wakeref on the engine(s) and
the engines themselves take a chipset wakeref. This gives us a
transition on each engine which we can extend if we want to insert more
powermangement control (such as soft rc6). The global GEM operations
that currently require a struct_mutex are reduced to listening to pm
events from the chipset GT wakeref. As we reduce the struct_mutex
requirement, these listeners should evaporate.
Perhaps the biggest immediate change is that this removes the
struct_mutex requirement around GT power management, allowing us greater
flexibility in request construction. Another important knock-on effect,
is that by tracking engine usage, we can insert a switch back to the
kernel context on that engine immediately, avoiding any extra delay or
inserting global synchronisation barriers. This makes tracking when an
engine and its associated contexts are idle much easier -- important for
when we forgo our assumed execution ordering and need idle barriers to
unpin used contexts. In the process, it means we remove a large chunk of
code whose only purpose was to switch back to the kernel context.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-5-chris@chris-wilson.co.uk
2019-04-24 20:07:17 +00:00
|
|
|
intel_engine_pm_put(ce->engine);
|
2019-04-24 20:07:15 +00:00
|
|
|
}
|