drm/i915: intel_ring_initialized() must be simple and inline
Based on Chris Wilson's patch from 6 months ago, rebased and adapted.
The current implementation of intel_ring_initialized() is too heavyweight;
it's a non-inlined function that chases several levels of pointers. This
wouldn't matter too much if it were rarely called, but it's used inside
the iterator test of for_each_ring() and is therefore called quite
frequently. So let's make it simple and inline ...
The idea here is to use ring->dev as an indicator showing which engines
have been initialised and are therefore to be included in iterations that
use for_each_ring(). This allows us to avoid multiple memory references
and a (non-inlined) function call on each iteration of each such loop.
Fixes regression from
commit 48d823878d
Author: Oscar Mateo <oscar.mateo@intel.com>
Date: Thu Jul 24 17:04:23 2014 +0100
drm/i915/bdw: Generic logical ring init and cleanup
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1449586956-32360-2-git-send-email-david.s.gordon@intel.com
This commit is contained in:
parent
54a4196188
commit
b0366a54b4
@ -1894,8 +1894,10 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
|
||||
|
||||
dev_priv = ring->dev->dev_private;
|
||||
|
||||
intel_logical_ring_stop(ring);
|
||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
if (ring->buffer) {
|
||||
intel_logical_ring_stop(ring);
|
||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
}
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
@ -1909,6 +1911,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
|
||||
}
|
||||
|
||||
lrc_destroy_wa_ctx_obj(ring);
|
||||
ring->dev = NULL;
|
||||
}
|
||||
|
||||
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
|
||||
@ -1931,11 +1934,11 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
|
||||
|
||||
ret = i915_cmd_parser_init_ring(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error;
|
||||
|
||||
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_do_pin(
|
||||
@ -1946,9 +1949,13 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
|
||||
DRM_ERROR(
|
||||
"Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
intel_logical_ring_cleanup(ring);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -34,23 +34,6 @@
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
bool
|
||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (!dev)
|
||||
return false;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
struct intel_context *dctx = ring->default_context;
|
||||
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
|
||||
|
||||
return ringbuf->obj;
|
||||
} else
|
||||
return ring->buffer && ring->buffer->obj;
|
||||
}
|
||||
|
||||
int __intel_ring_space(int head, int tail, int size)
|
||||
{
|
||||
int space = head - tail;
|
||||
@ -2168,8 +2151,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
|
||||
ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
|
||||
if (IS_ERR(ringbuf))
|
||||
return PTR_ERR(ringbuf);
|
||||
if (IS_ERR(ringbuf)) {
|
||||
ret = PTR_ERR(ringbuf);
|
||||
goto error;
|
||||
}
|
||||
ring->buffer = ringbuf;
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev)) {
|
||||
@ -2198,8 +2183,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
return 0;
|
||||
|
||||
error:
|
||||
intel_ringbuffer_free(ringbuf);
|
||||
ring->buffer = NULL;
|
||||
intel_cleanup_ring_buffer(ring);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2212,12 +2196,14 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||
|
||||
dev_priv = to_i915(ring->dev);
|
||||
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
if (ring->buffer) {
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
intel_unpin_ringbuffer_obj(ring->buffer);
|
||||
intel_ringbuffer_free(ring->buffer);
|
||||
ring->buffer = NULL;
|
||||
intel_unpin_ringbuffer_obj(ring->buffer);
|
||||
intel_ringbuffer_free(ring->buffer);
|
||||
ring->buffer = NULL;
|
||||
}
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
@ -2226,6 +2212,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||
|
||||
i915_cmd_parser_fini_ring(ring);
|
||||
i915_gem_batch_pool_fini(&ring->batch_pool);
|
||||
ring->dev = NULL;
|
||||
}
|
||||
|
||||
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
||||
|
@ -350,7 +350,11 @@ struct intel_engine_cs {
|
||||
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
||||
};
|
||||
|
||||
bool intel_ring_initialized(struct intel_engine_cs *ring);
|
||||
static inline bool
|
||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||
{
|
||||
return ring->dev != NULL;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
intel_ring_flag(struct intel_engine_cs *ring)
|
||||
|
Loading…
Reference in New Issue
Block a user