forked from Minki/linux
drm/i915: Use drm_i915_gem_object as the preferred type
A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and many characters! Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
185cbcb304
commit
05394f3975
@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
|
||||
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj_priv->user_pin_count > 0)
|
||||
if (obj->user_pin_count > 0)
|
||||
return "P";
|
||||
else if (obj_priv->pin_count > 0)
|
||||
else if (obj->pin_count > 0)
|
||||
return "p";
|
||||
else
|
||||
return " ";
|
||||
}
|
||||
|
||||
static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
|
||||
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
switch (obj_priv->tiling_mode) {
|
||||
switch (obj->tiling_mode) {
|
||||
default:
|
||||
case I915_TILING_NONE: return " ";
|
||||
case I915_TILING_X: return "X";
|
||||
@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
struct list_head *head;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
size_t total_obj_size, total_gtt_size;
|
||||
int count, ret;
|
||||
|
||||
@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
}
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj_priv, head, mm_list) {
|
||||
list_for_each_entry(obj, head, mm_list) {
|
||||
seq_printf(m, " ");
|
||||
describe_obj(m, obj_priv);
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
total_obj_size += obj_priv->base.size;
|
||||
total_gtt_size += obj_priv->gtt_space->size;
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += obj->gtt_space->size;
|
||||
count++;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "%d prepares\n", work->pending);
|
||||
|
||||
if (work->old_fb_obj) {
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
|
||||
if(obj_priv)
|
||||
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
|
||||
struct drm_i915_gem_object *obj = work->old_fb_obj;
|
||||
if (obj)
|
||||
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
|
||||
}
|
||||
if (work->pending_flip_obj) {
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
|
||||
if(obj_priv)
|
||||
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
|
||||
struct drm_i915_gem_object *obj = work->pending_flip_obj;
|
||||
if (obj)
|
||||
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
|
||||
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++) {
|
||||
struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
|
||||
struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
|
||||
|
||||
seq_printf(m, "Fenced object[%2d] = ", i);
|
||||
if (obj == NULL)
|
||||
seq_printf(m, "unused");
|
||||
else
|
||||
describe_obj(m, to_intel_bo(obj));
|
||||
describe_obj(m, obj);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
||||
|
||||
static void i915_dump_object(struct seq_file *m,
|
||||
struct io_mapping *mapping,
|
||||
struct drm_i915_gem_object *obj_priv)
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int page, page_count, i;
|
||||
|
||||
page_count = obj_priv->base.size / PAGE_SIZE;
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
for (page = 0; page < page_count; page++) {
|
||||
u32 *mem = io_mapping_map_wc(mapping,
|
||||
obj_priv->gtt_offset + page * PAGE_SIZE);
|
||||
obj->gtt_offset + page * PAGE_SIZE);
|
||||
for (i = 0; i < PAGE_SIZE; i += 4)
|
||||
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
|
||||
io_mapping_unmap(mem);
|
||||
@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
obj = &obj_priv->base;
|
||||
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||
seq_printf(m, "--- gtt_offset = 0x%08x\n",
|
||||
obj_priv->gtt_offset);
|
||||
i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||
seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
|
||||
i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!ring->gem_object) {
|
||||
if (!ring->obj) {
|
||||
seq_printf(m, "No ringbuffer setup\n");
|
||||
} else {
|
||||
u8 *virt = ring->virtual_start;
|
||||
@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel);
|
||||
describe_obj(m, to_intel_bo(fb->obj));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
|
||||
@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel);
|
||||
describe_obj(m, to_intel_bo(fb->obj));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
||||
}
|
||||
|
||||
if (init->ring_size != 0) {
|
||||
if (dev_priv->render_ring.gem_object != NULL) {
|
||||
if (dev_priv->render_ring.obj != NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("Client tried to initialize ringbuffer in "
|
||||
"GEM mode\n");
|
||||
|
@ -32,7 +32,6 @@
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_bios.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/i2c.h>
|
||||
@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object {
|
||||
int id;
|
||||
struct page **page_list;
|
||||
drm_dma_handle_t *handle;
|
||||
struct drm_gem_object *cur_obj;
|
||||
struct drm_i915_gem_object *cur_obj;
|
||||
};
|
||||
|
||||
struct mem_block {
|
||||
@ -125,7 +124,7 @@ struct drm_i915_master_private {
|
||||
#define I915_FENCE_REG_NONE -1
|
||||
|
||||
struct drm_i915_fence_reg {
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head lru_list;
|
||||
bool gpu;
|
||||
};
|
||||
@ -280,9 +279,9 @@ typedef struct drm_i915_private {
|
||||
uint32_t counter;
|
||||
unsigned int seqno_gfx_addr;
|
||||
drm_local_map_t hws_map;
|
||||
struct drm_gem_object *seqno_obj;
|
||||
struct drm_gem_object *pwrctx;
|
||||
struct drm_gem_object *renderctx;
|
||||
struct drm_i915_gem_object *seqno_obj;
|
||||
struct drm_i915_gem_object *pwrctx;
|
||||
struct drm_i915_gem_object *renderctx;
|
||||
|
||||
struct resource mch_res;
|
||||
|
||||
@ -690,14 +689,14 @@ typedef struct drm_i915_private {
|
||||
u8 fmax;
|
||||
u8 fstart;
|
||||
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
u64 last_count2;
|
||||
struct timespec last_time2;
|
||||
unsigned long gfx_power;
|
||||
int c_m;
|
||||
int r_t;
|
||||
u8 corr;
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
u64 last_count2;
|
||||
struct timespec last_time2;
|
||||
unsigned long gfx_power;
|
||||
int c_m;
|
||||
int r_t;
|
||||
u8 corr;
|
||||
spinlock_t *mchdev_lock;
|
||||
|
||||
enum no_fbc_reason no_fbc_reason;
|
||||
@ -711,7 +710,6 @@ typedef struct drm_i915_private {
|
||||
struct intel_fbdev *fbdev;
|
||||
} drm_i915_private_t;
|
||||
|
||||
/** driver private structure attached to each drm_gem_object */
|
||||
struct drm_i915_gem_object {
|
||||
struct drm_gem_object base;
|
||||
|
||||
@ -918,7 +916,7 @@ enum intel_chip_family {
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
|
||||
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
|
||||
|
||||
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
||||
@ -947,6 +945,8 @@ enum intel_chip_family {
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
|
||||
|
||||
#include "i915_trace.h"
|
||||
|
||||
extern struct drm_ioctl_desc i915_ioctls[];
|
||||
extern int i915_max_ioctl;
|
||||
extern unsigned int i915_fbpercrtc;
|
||||
@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
|
||||
int i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
uint32_t alignment,
|
||||
bool map_and_fenceable);
|
||||
void i915_gem_object_unpin(struct drm_gem_object *obj);
|
||||
int i915_gem_object_unbind(struct drm_gem_object *obj);
|
||||
void i915_gem_release_mmap(struct drm_gem_object *obj);
|
||||
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
|
||||
/**
|
||||
@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
|
||||
int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
|
||||
bool interruptible);
|
||||
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
|
||||
int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
|
||||
bool interruptible);
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
void i915_gem_clflush_object(struct drm_gem_object *obj);
|
||||
int i915_gem_object_set_domain(struct drm_gem_object *obj,
|
||||
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
|
||||
@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev,
|
||||
bool interruptible,
|
||||
struct intel_ring_buffer *ring);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
|
||||
int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
|
||||
int write);
|
||||
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
|
||||
int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
bool pipelined);
|
||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int id,
|
||||
int align);
|
||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_gem_object *obj);
|
||||
struct drm_i915_gem_object *obj);
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
/* i915_gem_gtt.c */
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
|
||||
int i915_gem_gtt_bind_object(struct drm_gem_object *obj);
|
||||
void i915_gem_gtt_unbind_object(struct drm_gem_object *obj);
|
||||
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* i915_gem_evict.c */
|
||||
int i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* i915_gem_debug.c */
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
#if WATCH_LISTS
|
||||
int i915_verify_lists(struct drm_device *dev);
|
||||
#else
|
||||
#define i915_verify_lists(dev) 0
|
||||
#endif
|
||||
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
|
||||
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
|
||||
int handle);
|
||||
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark);
|
||||
|
||||
/* i915_debugfs.c */
|
||||
@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m,
|
||||
* In that case, we don't need to do it when GEM is initialized as nobody else
|
||||
* has access to the ring.
|
||||
*/
|
||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
|
||||
if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
|
||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
|
||||
if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \
|
||||
== NULL) \
|
||||
LOCK_TEST_WITH_RETURN(dev, file_priv); \
|
||||
LOCK_TEST_WITH_RETURN(dev, file); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
|
||||
const char *where, uint32_t mark)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int page;
|
||||
|
||||
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
|
||||
DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
|
||||
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
|
||||
int page_len, chunk, chunk_len;
|
||||
|
||||
@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
chunk_len = page_len - chunk;
|
||||
if (chunk_len > 128)
|
||||
chunk_len = 128;
|
||||
i915_gem_dump_page(obj_priv->pages[page],
|
||||
i915_gem_dump_page(obj->pages[page],
|
||||
chunk, chunk + chunk_len,
|
||||
obj_priv->gtt_offset +
|
||||
obj->gtt_offset +
|
||||
page * PAGE_SIZE,
|
||||
mark);
|
||||
}
|
||||
@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
|
||||
|
||||
#if WATCH_COHERENCY
|
||||
void
|
||||
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
||||
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
int page;
|
||||
uint32_t *gtt_mapping;
|
||||
uint32_t *backing_map = NULL;
|
||||
int bad_count = 0;
|
||||
|
||||
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
|
||||
__func__, obj, obj_priv->gtt_offset, handle,
|
||||
__func__, obj, obj->gtt_offset, handle,
|
||||
obj->size / 1024);
|
||||
|
||||
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
|
||||
obj->size);
|
||||
gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
|
||||
if (gtt_mapping == NULL) {
|
||||
DRM_ERROR("failed to map GTT space\n");
|
||||
return;
|
||||
@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
||||
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
|
||||
int i;
|
||||
|
||||
backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
|
||||
backing_map = kmap_atomic(obj->pages[page], KM_USER0);
|
||||
|
||||
if (backing_map == NULL) {
|
||||
DRM_ERROR("failed to map backing page\n");
|
||||
@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
|
||||
if (cpuval != gttval) {
|
||||
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
|
||||
"0x%08x vs 0x%08x\n",
|
||||
(int)(obj_priv->gtt_offset +
|
||||
(int)(obj->gtt_offset +
|
||||
page * PAGE_SIZE + i * 4),
|
||||
cpuval, gttval);
|
||||
if (bad_count++ >= 8) {
|
||||
|
@ -32,12 +32,11 @@
|
||||
#include "i915_drm.h"
|
||||
|
||||
static bool
|
||||
mark_free(struct drm_i915_gem_object *obj_priv,
|
||||
struct list_head *unwind)
|
||||
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
|
||||
{
|
||||
list_add(&obj_priv->evict_list, unwind);
|
||||
drm_gem_object_reference(&obj_priv->base);
|
||||
return drm_mm_scan_add_block(obj_priv->gtt_space);
|
||||
list_add(&obj->evict_list, unwind);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
return drm_mm_scan_add_block(obj->gtt_space);
|
||||
}
|
||||
|
||||
int
|
||||
@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct list_head eviction_list, unwind_list;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
|
||||
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Now merge in the soon-to-be-expired objects... */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
/* Does the object require an outstanding flush? */
|
||||
if (obj_priv->base.write_domain || obj_priv->pin_count)
|
||||
if (obj->base.write_domain || obj->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Finally add anything with a pending flush (in order of retirement) */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (obj_priv->pin_count)
|
||||
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (obj->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
if (! obj_priv->base.write_domain || obj_priv->pin_count)
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (! obj->base.write_domain || obj->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Nothing found, clean up and bail out! */
|
||||
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
|
||||
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
|
||||
list_for_each_entry(obj, &unwind_list, evict_list) {
|
||||
ret = drm_mm_scan_remove_block(obj->gtt_space);
|
||||
BUG_ON(ret);
|
||||
drm_gem_object_unreference(&obj_priv->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
/* We expect the caller to unpin, evict all and try again, or give up.
|
||||
@ -145,26 +144,26 @@ found:
|
||||
* temporary list. */
|
||||
INIT_LIST_HEAD(&eviction_list);
|
||||
while (!list_empty(&unwind_list)) {
|
||||
obj_priv = list_first_entry(&unwind_list,
|
||||
struct drm_i915_gem_object,
|
||||
evict_list);
|
||||
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
|
||||
list_move(&obj_priv->evict_list, &eviction_list);
|
||||
obj = list_first_entry(&unwind_list,
|
||||
struct drm_i915_gem_object,
|
||||
evict_list);
|
||||
if (drm_mm_scan_remove_block(obj->gtt_space)) {
|
||||
list_move(&obj->evict_list, &eviction_list);
|
||||
continue;
|
||||
}
|
||||
list_del(&obj_priv->evict_list);
|
||||
drm_gem_object_unreference(&obj_priv->base);
|
||||
list_del(&obj->evict_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
while (!list_empty(&eviction_list)) {
|
||||
obj_priv = list_first_entry(&eviction_list,
|
||||
struct drm_i915_gem_object,
|
||||
evict_list);
|
||||
obj = list_first_entry(&eviction_list,
|
||||
struct drm_i915_gem_object,
|
||||
evict_list);
|
||||
if (ret == 0)
|
||||
ret = i915_gem_object_unbind(&obj_priv->base);
|
||||
list_del(&obj_priv->evict_list);
|
||||
drm_gem_object_unreference(&obj_priv->base);
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
list_del(&obj->evict_list);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
|
||||
list_for_each_entry_safe(obj, next,
|
||||
&dev_priv->mm.inactive_list, mm_list) {
|
||||
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
|
||||
int ret = i915_gem_object_unbind(&obj->base);
|
||||
int ret = i915_gem_object_unbind(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -32,71 +32,67 @@
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
list_for_each_entry(obj_priv,
|
||||
&dev_priv->mm.gtt_list,
|
||||
gtt_list) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
BUG_ON(!obj_priv->sg_list);
|
||||
BUG_ON(!obj->sg_list);
|
||||
|
||||
intel_gtt_insert_sg_entries(obj_priv->sg_list,
|
||||
obj_priv->num_sg,
|
||||
obj_priv->gtt_space->start
|
||||
intel_gtt_insert_sg_entries(obj->sg_list,
|
||||
obj->num_sg,
|
||||
obj->gtt_space->start
|
||||
>> PAGE_SHIFT,
|
||||
obj_priv->agp_type);
|
||||
obj->agp_type);
|
||||
} else
|
||||
intel_gtt_insert_pages(obj_priv->gtt_space->start
|
||||
intel_gtt_insert_pages(obj->gtt_space->start
|
||||
>> PAGE_SHIFT,
|
||||
obj_priv->base.size >> PAGE_SHIFT,
|
||||
obj_priv->pages,
|
||||
obj_priv->agp_type);
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
obj->pages,
|
||||
obj->agp_type);
|
||||
}
|
||||
|
||||
/* Be paranoid and flush the chipset cache. */
|
||||
intel_gtt_chipset_flush();
|
||||
}
|
||||
|
||||
int i915_gem_gtt_bind_object(struct drm_gem_object *obj)
|
||||
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int ret;
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
ret = intel_gtt_map_memory(obj_priv->pages,
|
||||
obj->size >> PAGE_SHIFT,
|
||||
&obj_priv->sg_list,
|
||||
&obj_priv->num_sg);
|
||||
ret = intel_gtt_map_memory(obj->pages,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
&obj->sg_list,
|
||||
&obj->num_sg);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg,
|
||||
obj_priv->gtt_space->start
|
||||
>> PAGE_SHIFT,
|
||||
obj_priv->agp_type);
|
||||
intel_gtt_insert_sg_entries(obj->sg_list,
|
||||
obj->num_sg,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->agp_type);
|
||||
} else
|
||||
intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->size >> PAGE_SHIFT,
|
||||
obj_priv->pages,
|
||||
obj_priv->agp_type);
|
||||
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
obj->pages,
|
||||
obj->agp_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_gtt_unbind_object(struct drm_gem_object *obj)
|
||||
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg);
|
||||
obj_priv->sg_list = NULL;
|
||||
obj_priv->num_sg = 0;
|
||||
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
|
||||
obj->sg_list = NULL;
|
||||
obj->num_sg = 0;
|
||||
}
|
||||
|
||||
intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->size >> PAGE_SHIFT);
|
||||
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
}
|
||||
|
@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
|
||||
/* Is the current GTT allocation valid for the change in tiling? */
|
||||
static bool
|
||||
i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
|
||||
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
u32 size;
|
||||
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
return true;
|
||||
|
||||
if (INTEL_INFO(obj->dev)->gen >= 4)
|
||||
if (INTEL_INFO(obj->base.dev)->gen >= 4)
|
||||
return true;
|
||||
|
||||
if (!obj_priv->gtt_space)
|
||||
if (!obj->gtt_space)
|
||||
return true;
|
||||
|
||||
if (INTEL_INFO(obj->dev)->gen == 3) {
|
||||
if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
|
||||
if (INTEL_INFO(obj->base.dev)->gen == 3) {
|
||||
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
|
||||
return false;
|
||||
} else {
|
||||
if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
|
||||
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
|
||||
* Previous chips need to be aligned to the size of the smallest
|
||||
* fence register that can contain the object.
|
||||
*/
|
||||
if (INTEL_INFO(obj->dev)->gen == 3)
|
||||
if (INTEL_INFO(obj->base.dev)->gen == 3)
|
||||
size = 1024*1024;
|
||||
else
|
||||
size = 512*1024;
|
||||
|
||||
while (size < obj_priv->base.size)
|
||||
while (size < obj->base.size)
|
||||
size <<= 1;
|
||||
|
||||
if (obj_priv->gtt_space->size != size)
|
||||
if (obj->gtt_space->size != size)
|
||||
return false;
|
||||
|
||||
if (obj_priv->gtt_offset & (size - 1))
|
||||
if (obj->gtt_offset & (size - 1))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
|
||||
*/
|
||||
int
|
||||
i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_set_tiling *args = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_check_is_wedged(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (!i915_tiling_ok(dev,
|
||||
args->stride, obj->base.size, args->tiling_mode)) {
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (obj_priv->pin_count) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (obj->pin_count) {
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (args->tiling_mode != obj_priv->tiling_mode ||
|
||||
args->stride != obj_priv->stride) {
|
||||
if (args->tiling_mode != obj->tiling_mode ||
|
||||
args->stride != obj->stride) {
|
||||
/* We need to rebind the object if its current allocation
|
||||
* no longer meets the alignment restrictions for its new
|
||||
* tiling mode. Otherwise we can just leave it alone, but
|
||||
@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
*/
|
||||
if (!i915_gem_object_fence_ok(obj, args->tiling_mode))
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
|
||||
else if (obj->fence_reg != I915_FENCE_REG_NONE)
|
||||
ret = i915_gem_object_put_fence_reg(obj, true);
|
||||
else
|
||||
i915_gem_release_mmap(obj);
|
||||
|
||||
if (ret != 0) {
|
||||
args->tiling_mode = obj_priv->tiling_mode;
|
||||
args->stride = obj_priv->stride;
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
args->stride = obj->stride;
|
||||
goto err;
|
||||
}
|
||||
|
||||
obj_priv->tiling_mode = args->tiling_mode;
|
||||
obj_priv->stride = args->stride;
|
||||
obj->tiling_mode = args->tiling_mode;
|
||||
obj->stride = args->stride;
|
||||
}
|
||||
err:
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
@ -375,22 +373,20 @@ err:
|
||||
*/
|
||||
int
|
||||
i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_get_tiling *args = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
args->tiling_mode = obj_priv->tiling_mode;
|
||||
switch (obj_priv->tiling_mode) {
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_X:
|
||||
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
|
||||
break;
|
||||
@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page)
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
|
||||
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int page_count = obj->size >> PAGE_SHIFT;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||
return;
|
||||
|
||||
if (obj_priv->bit_17 == NULL)
|
||||
if (obj->bit_17 == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
|
||||
char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
|
||||
if ((new_bit_17 & 0x1) !=
|
||||
(test_bit(i, obj_priv->bit_17) != 0)) {
|
||||
i915_gem_swizzle_page(obj_priv->pages[i]);
|
||||
set_page_dirty(obj_priv->pages[i]);
|
||||
(test_bit(i, obj->bit_17) != 0)) {
|
||||
i915_gem_swizzle_page(obj->pages[i]);
|
||||
set_page_dirty(obj->pages[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
|
||||
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int page_count = obj->size >> PAGE_SHIFT;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
|
||||
return;
|
||||
|
||||
if (obj_priv->bit_17 == NULL) {
|
||||
obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||
if (obj->bit_17 == NULL) {
|
||||
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (obj_priv->bit_17 == NULL) {
|
||||
if (obj->bit_17 == NULL) {
|
||||
DRM_ERROR("Failed to allocate memory for bit 17 "
|
||||
"record\n");
|
||||
return;
|
||||
@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
|
||||
}
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
|
||||
__set_bit(i, obj_priv->bit_17);
|
||||
if (page_to_phys(obj->pages[i]) & (1 << 17))
|
||||
__set_bit(i, obj->bit_17);
|
||||
else
|
||||
__clear_bit(i, obj_priv->bit_17);
|
||||
__clear_bit(i, obj->bit_17);
|
||||
}
|
||||
}
|
||||
|
@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_object_create(struct drm_device *dev,
|
||||
struct drm_gem_object *src)
|
||||
struct drm_i915_gem_object *src)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_error_object *dst;
|
||||
struct drm_i915_gem_object *src_priv;
|
||||
int page, page_count;
|
||||
u32 reloc_offset;
|
||||
|
||||
if (src == NULL)
|
||||
if (src == NULL || src->pages == NULL)
|
||||
return NULL;
|
||||
|
||||
src_priv = to_intel_bo(src);
|
||||
if (src_priv->pages == NULL)
|
||||
return NULL;
|
||||
|
||||
page_count = src->size / PAGE_SIZE;
|
||||
page_count = src->base.size / PAGE_SIZE;
|
||||
|
||||
dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
|
||||
if (dst == NULL)
|
||||
return NULL;
|
||||
|
||||
reloc_offset = src_priv->gtt_offset;
|
||||
reloc_offset = src->gtt_offset;
|
||||
for (page = 0; page < page_count; page++) {
|
||||
unsigned long flags;
|
||||
void __iomem *s;
|
||||
@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev,
|
||||
reloc_offset += PAGE_SIZE;
|
||||
}
|
||||
dst->page_count = page_count;
|
||||
dst->gtt_offset = src_priv->gtt_offset;
|
||||
dst->gtt_offset = src->gtt_offset;
|
||||
|
||||
return dst;
|
||||
|
||||
@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
|
||||
static void i915_capture_error_state(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_error_state *error;
|
||||
struct drm_gem_object *batchbuffer[2];
|
||||
struct drm_i915_gem_object *batchbuffer[2];
|
||||
unsigned long flags;
|
||||
u32 bbaddr;
|
||||
int count;
|
||||
@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
batchbuffer[0] = NULL;
|
||||
batchbuffer[1] = NULL;
|
||||
count = 0;
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (batchbuffer[0] == NULL &&
|
||||
bbaddr >= obj_priv->gtt_offset &&
|
||||
bbaddr < obj_priv->gtt_offset + obj->size)
|
||||
bbaddr >= obj->gtt_offset &&
|
||||
bbaddr < obj->gtt_offset + obj->base.size)
|
||||
batchbuffer[0] = obj;
|
||||
|
||||
if (batchbuffer[1] == NULL &&
|
||||
error->acthd >= obj_priv->gtt_offset &&
|
||||
error->acthd < obj_priv->gtt_offset + obj->size)
|
||||
error->acthd >= obj->gtt_offset &&
|
||||
error->acthd < obj->gtt_offset + obj->base.size)
|
||||
batchbuffer[1] = obj;
|
||||
|
||||
count++;
|
||||
}
|
||||
/* Scan the other lists for completeness for those bizarre errors. */
|
||||
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (batchbuffer[0] == NULL &&
|
||||
bbaddr >= obj_priv->gtt_offset &&
|
||||
bbaddr < obj_priv->gtt_offset + obj->size)
|
||||
bbaddr >= obj->gtt_offset &&
|
||||
bbaddr < obj->gtt_offset + obj->base.size)
|
||||
batchbuffer[0] = obj;
|
||||
|
||||
if (batchbuffer[1] == NULL &&
|
||||
error->acthd >= obj_priv->gtt_offset &&
|
||||
error->acthd < obj_priv->gtt_offset + obj->size)
|
||||
error->acthd >= obj->gtt_offset &&
|
||||
error->acthd < obj->gtt_offset + obj->base.size)
|
||||
batchbuffer[1] = obj;
|
||||
|
||||
if (batchbuffer[0] && batchbuffer[1])
|
||||
@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
|
||||
if (batchbuffer[0] == NULL &&
|
||||
bbaddr >= obj_priv->gtt_offset &&
|
||||
bbaddr < obj_priv->gtt_offset + obj->size)
|
||||
bbaddr >= obj->gtt_offset &&
|
||||
bbaddr < obj->gtt_offset + obj->base.size)
|
||||
batchbuffer[0] = obj;
|
||||
|
||||
if (batchbuffer[1] == NULL &&
|
||||
error->acthd >= obj_priv->gtt_offset &&
|
||||
error->acthd < obj_priv->gtt_offset + obj->size)
|
||||
error->acthd >= obj->gtt_offset &&
|
||||
error->acthd < obj->gtt_offset + obj->base.size)
|
||||
batchbuffer[1] = obj;
|
||||
|
||||
if (batchbuffer[0] && batchbuffer[1])
|
||||
@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
|
||||
/* Record the ringbuffer */
|
||||
error->ringbuffer = i915_error_object_create(dev,
|
||||
dev_priv->render_ring.gem_object);
|
||||
dev_priv->render_ring.obj);
|
||||
|
||||
/* Record buffers on the active and pinned lists. */
|
||||
error->active_bo = NULL;
|
||||
error->pinned_bo = NULL;
|
||||
|
||||
error->active_bo_count = count;
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list)
|
||||
list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
|
||||
count++;
|
||||
error->pinned_bo_count = count - error->active_bo_count;
|
||||
|
||||
@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_unpin_work *work;
|
||||
unsigned long flags;
|
||||
bool stall_detected;
|
||||
@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
|
||||
}
|
||||
|
||||
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
|
||||
obj_priv = to_intel_bo(work->pending_flip_obj);
|
||||
obj = work->pending_flip_obj;
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
|
||||
stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
|
||||
stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
|
||||
} else {
|
||||
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
|
||||
stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
|
||||
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
|
||||
crtc->y * crtc->fb->pitch +
|
||||
crtc->x * crtc->fb->bits_per_pixel/8);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM i915
|
||||
@ -16,18 +17,18 @@
|
||||
|
||||
TRACE_EVENT(i915_gem_object_create,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(u32, size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->size = obj->size;
|
||||
__entry->size = obj->base.size;
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
|
||||
@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create,
|
||||
|
||||
TRACE_EVENT(i915_gem_object_bind,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
|
||||
|
||||
TP_ARGS(obj, gtt_offset, mappable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(u32, gtt_offset)
|
||||
__field(bool, mappable)
|
||||
),
|
||||
@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind,
|
||||
|
||||
TRACE_EVENT(i915_gem_object_change_domain,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
|
||||
|
||||
TP_ARGS(obj, old_read_domains, old_write_domain),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(u32, read_domains)
|
||||
__field(u32, write_domain)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->obj = obj;
|
||||
__entry->read_domains = obj->read_domains | (old_read_domains << 16);
|
||||
__entry->write_domain = obj->write_domain | (old_write_domain << 16);
|
||||
__entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
|
||||
__entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, read=%04x, write=%04x",
|
||||
@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain,
|
||||
|
||||
TRACE_EVENT(i915_gem_object_get_fence,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode),
|
||||
|
||||
TP_ARGS(obj, fence, tiling_mode),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(int, fence)
|
||||
__field(int, tiling_mode)
|
||||
),
|
||||
@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence,
|
||||
|
||||
DECLARE_EVENT_CLASS(i915_gem_object,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
|
||||
|
||||
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
|
||||
|
||||
TP_PROTO(struct drm_gem_object *obj),
|
||||
TP_PROTO(struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(obj)
|
||||
);
|
||||
@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_flip_request,
|
||||
TP_PROTO(int plane, struct drm_gem_object *obj),
|
||||
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(plane, obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, plane)
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request,
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_flip_complete,
|
||||
TP_PROTO(int plane, struct drm_gem_object *obj),
|
||||
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
||||
|
||||
TP_ARGS(plane, obj),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, plane)
|
||||
__field(struct drm_gem_object *, obj)
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int plane, i;
|
||||
u32 fbc_ctl, fbc_ctl2;
|
||||
|
||||
if (fb->pitch == dev_priv->cfb_pitch &&
|
||||
obj_priv->fence_reg == dev_priv->cfb_fence &&
|
||||
obj->fence_reg == dev_priv->cfb_fence &&
|
||||
intel_crtc->plane == dev_priv->cfb_plane &&
|
||||
I915_READ(FBC_CONTROL) & FBC_CTL_EN)
|
||||
return;
|
||||
@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
|
||||
/* FBC_CTL wants 64B units */
|
||||
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
|
||||
dev_priv->cfb_fence = obj_priv->fence_reg;
|
||||
dev_priv->cfb_fence = obj->fence_reg;
|
||||
dev_priv->cfb_plane = intel_crtc->plane;
|
||||
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
|
||||
|
||||
@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
|
||||
/* Set it up... */
|
||||
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
|
||||
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
|
||||
I915_WRITE(FBC_FENCE_OFF, crtc->y);
|
||||
@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
|
||||
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
|
||||
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
fbc_ctl |= dev_priv->cfb_fence;
|
||||
I915_WRITE(FBC_CONTROL, fbc_ctl);
|
||||
|
||||
@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
|
||||
unsigned long stall_watermark = 200;
|
||||
@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
dpfc_ctl = I915_READ(DPFC_CONTROL);
|
||||
if (dpfc_ctl & DPFC_CTL_EN) {
|
||||
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
|
||||
dev_priv->cfb_fence == obj_priv->fence_reg &&
|
||||
dev_priv->cfb_fence == obj->fence_reg &&
|
||||
dev_priv->cfb_plane == intel_crtc->plane &&
|
||||
dev_priv->cfb_y == crtc->y)
|
||||
return;
|
||||
@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
}
|
||||
|
||||
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
|
||||
dev_priv->cfb_fence = obj_priv->fence_reg;
|
||||
dev_priv->cfb_fence = obj->fence_reg;
|
||||
dev_priv->cfb_plane = intel_crtc->plane;
|
||||
dev_priv->cfb_y = crtc->y;
|
||||
|
||||
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE) {
|
||||
if (obj->tiling_mode != I915_TILING_NONE) {
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
|
||||
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
|
||||
} else {
|
||||
@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->fb;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
|
||||
unsigned long stall_watermark = 200;
|
||||
@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
|
||||
if (dpfc_ctl & DPFC_CTL_EN) {
|
||||
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
|
||||
dev_priv->cfb_fence == obj_priv->fence_reg &&
|
||||
dev_priv->cfb_fence == obj->fence_reg &&
|
||||
dev_priv->cfb_plane == intel_crtc->plane &&
|
||||
dev_priv->cfb_offset == obj_priv->gtt_offset &&
|
||||
dev_priv->cfb_offset == obj->gtt_offset &&
|
||||
dev_priv->cfb_y == crtc->y)
|
||||
return;
|
||||
|
||||
@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
}
|
||||
|
||||
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
|
||||
dev_priv->cfb_fence = obj_priv->fence_reg;
|
||||
dev_priv->cfb_fence = obj->fence_reg;
|
||||
dev_priv->cfb_plane = intel_crtc->plane;
|
||||
dev_priv->cfb_offset = obj_priv->gtt_offset;
|
||||
dev_priv->cfb_offset = obj->gtt_offset;
|
||||
dev_priv->cfb_y = crtc->y;
|
||||
|
||||
dpfc_ctl &= DPFC_RESERVED;
|
||||
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE) {
|
||||
if (obj->tiling_mode != I915_TILING_NONE) {
|
||||
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
|
||||
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
|
||||
} else {
|
||||
@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
||||
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
|
||||
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
|
||||
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
|
||||
I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
|
||||
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
|
||||
/* enable it... */
|
||||
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
|
||||
|
||||
@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev)
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev)
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
fb = crtc->fb;
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj_priv = to_intel_bo(intel_fb->obj);
|
||||
obj = intel_fb->obj;
|
||||
|
||||
if (intel_fb->obj->size > dev_priv->cfb_size) {
|
||||
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
|
||||
DRM_DEBUG_KMS("framebuffer too large, disabling "
|
||||
"compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
|
||||
@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev)
|
||||
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
|
||||
goto out_disable;
|
||||
}
|
||||
if (obj_priv->tiling_mode != I915_TILING_X) {
|
||||
if (obj->tiling_mode != I915_TILING_X) {
|
||||
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
|
||||
dev_priv->no_fbc_reason = FBC_NOT_TILED;
|
||||
goto out_disable;
|
||||
@ -1433,14 +1433,13 @@ out_disable:
|
||||
|
||||
int
|
||||
intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool pipelined)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
u32 alignment;
|
||||
int ret;
|
||||
|
||||
switch (obj_priv->tiling_mode) {
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
||||
alignment = 128 * 1024;
|
||||
@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
* framebuffer compression. For simplicity, we always install
|
||||
* a fence as the cost is not that onerous.
|
||||
*/
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE) {
|
||||
if (obj->tiling_mode != I915_TILING_NONE) {
|
||||
ret = i915_gem_object_get_fence_reg(obj, false);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int plane = intel_crtc->plane;
|
||||
unsigned long Start, Offset;
|
||||
u32 dspcntr;
|
||||
@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
|
||||
intel_fb = to_intel_framebuffer(fb);
|
||||
obj = intel_fb->obj;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
reg = DSPCNTR(plane);
|
||||
dspcntr = I915_READ(reg);
|
||||
@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
return -EINVAL;
|
||||
}
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (obj_priv->tiling_mode != I915_TILING_NONE)
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
dspcntr |= DISPPLANE_TILED;
|
||||
else
|
||||
dspcntr &= ~DISPPLANE_TILED;
|
||||
@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
|
||||
I915_WRITE(reg, dspcntr);
|
||||
|
||||
Start = obj_priv->gtt_offset;
|
||||
Start = obj->gtt_offset;
|
||||
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
|
||||
|
||||
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
|
||||
@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
|
||||
if (old_fb) {
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
|
||||
|
||||
wait_event(dev_priv->pending_flip_queue,
|
||||
atomic_read(&obj_priv->pending_flip) == 0);
|
||||
atomic_read(&obj->pending_flip) == 0);
|
||||
|
||||
/* Big Hammer, we also need to ensure that any pending
|
||||
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
|
||||
* current scanout is retired before unpinning the old
|
||||
* framebuffer.
|
||||
*/
|
||||
ret = i915_gem_object_flush_gpu(obj_priv, false);
|
||||
ret = i915_gem_object_flush_gpu(obj, false);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
|
||||
|
||||
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_private *dev_priv;
|
||||
|
||||
if (crtc->fb == NULL)
|
||||
return;
|
||||
|
||||
obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
|
||||
obj = to_intel_framebuffer(crtc->fb)->obj;
|
||||
dev_priv = crtc->dev->dev_private;
|
||||
wait_event(dev_priv->pending_flip_queue,
|
||||
atomic_read(&obj_priv->pending_flip) == 0);
|
||||
atomic_read(&obj->pending_flip) == 0);
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_file *file,
|
||||
uint32_t handle,
|
||||
uint32_t width, uint32_t height)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_gem_object *bo;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint32_t addr;
|
||||
int ret;
|
||||
|
||||
@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
if (!handle) {
|
||||
DRM_DEBUG_KMS("cursor off\n");
|
||||
addr = 0;
|
||||
bo = NULL;
|
||||
obj = NULL;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
goto finish;
|
||||
}
|
||||
@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo = drm_gem_object_lookup(dev, file_priv, handle);
|
||||
if (!bo)
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
obj_priv = to_intel_bo(bo);
|
||||
|
||||
if (bo->size < width * height * 4) {
|
||||
if (obj->base.size < width * height * 4) {
|
||||
DRM_ERROR("buffer is to small\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
/* we only need to pin inside GTT if cursor is non-phy */
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!dev_priv->info->cursor_needs_physical) {
|
||||
ret = i915_gem_object_pin(bo, PAGE_SIZE, true);
|
||||
ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin cursor bo\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(bo, 0);
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, 0);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to move cursor bo into the GTT\n");
|
||||
goto fail_unpin;
|
||||
}
|
||||
|
||||
addr = obj_priv->gtt_offset;
|
||||
addr = obj->gtt_offset;
|
||||
} else {
|
||||
int align = IS_I830(dev) ? 16 * 1024 : 256;
|
||||
ret = i915_gem_attach_phys_object(dev, bo,
|
||||
ret = i915_gem_attach_phys_object(dev, obj,
|
||||
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
|
||||
align);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to attach phys object\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
addr = obj_priv->phys_obj->handle->busaddr;
|
||||
addr = obj->phys_obj->handle->busaddr;
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
finish:
|
||||
if (intel_crtc->cursor_bo) {
|
||||
if (dev_priv->info->cursor_needs_physical) {
|
||||
if (intel_crtc->cursor_bo != bo)
|
||||
if (intel_crtc->cursor_bo != obj)
|
||||
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
|
||||
} else
|
||||
i915_gem_object_unpin(intel_crtc->cursor_bo);
|
||||
drm_gem_object_unreference(intel_crtc->cursor_bo);
|
||||
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_crtc->cursor_addr = addr;
|
||||
intel_crtc->cursor_bo = bo;
|
||||
intel_crtc->cursor_bo = obj;
|
||||
intel_crtc->cursor_width = width;
|
||||
intel_crtc->cursor_height = height;
|
||||
|
||||
@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
|
||||
return 0;
|
||||
fail_unpin:
|
||||
i915_gem_object_unpin(bo);
|
||||
i915_gem_object_unpin(obj);
|
||||
fail_locked:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(bo);
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work)
|
||||
* buffer), we'll also mark the display as busy, so we know to increase its
|
||||
* clock frequency.
|
||||
*/
|
||||
void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
|
||||
void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
|
||||
|
||||
mutex_lock(&work->dev->struct_mutex);
|
||||
i915_gem_object_unpin(work->old_fb_obj);
|
||||
drm_gem_object_unreference(work->pending_flip_obj);
|
||||
drm_gem_object_unreference(work->old_fb_obj);
|
||||
drm_gem_object_unreference(&work->pending_flip_obj->base);
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
mutex_unlock(&work->dev->struct_mutex);
|
||||
kfree(work);
|
||||
}
|
||||
@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_unpin_work *work;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_pending_vblank_event *e;
|
||||
struct timeval now;
|
||||
unsigned long flags;
|
||||
@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
obj_priv = to_intel_bo(work->old_fb_obj);
|
||||
obj = work->old_fb_obj;
|
||||
atomic_clear_mask(1 << intel_crtc->plane,
|
||||
&obj_priv->pending_flip.counter);
|
||||
if (atomic_read(&obj_priv->pending_flip) == 0)
|
||||
&obj->pending_flip.counter);
|
||||
if (atomic_read(&obj->pending_flip) == 0)
|
||||
wake_up(&dev_priv->pending_flip_queue);
|
||||
schedule_work(&work->work);
|
||||
|
||||
@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_unpin_work *work;
|
||||
unsigned long flags, offset;
|
||||
@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
goto cleanup_work;
|
||||
|
||||
/* Reference the objects for the scheduled work. */
|
||||
drm_gem_object_reference(work->old_fb_obj);
|
||||
drm_gem_object_reference(obj);
|
||||
drm_gem_object_reference(&work->old_fb_obj->base);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
crtc->fb = fb;
|
||||
|
||||
@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
work->pending_flip_obj = obj;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
work->enable_stall_check = true;
|
||||
|
||||
@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
/* Block clients from rendering to the new back buffer until
|
||||
* the flip occurs and the object is no longer visible.
|
||||
*/
|
||||
atomic_add(1 << intel_crtc->plane,
|
||||
&to_intel_bo(work->old_fb_obj)->pending_flip);
|
||||
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 2:
|
||||
OUT_RING(MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
OUT_RING(fb->pitch);
|
||||
OUT_RING(obj_priv->gtt_offset + offset);
|
||||
OUT_RING(obj->gtt_offset + offset);
|
||||
OUT_RING(MI_NOOP);
|
||||
break;
|
||||
|
||||
@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
OUT_RING(MI_DISPLAY_FLIP_I915 |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
OUT_RING(fb->pitch);
|
||||
OUT_RING(obj_priv->gtt_offset + offset);
|
||||
OUT_RING(obj->gtt_offset + offset);
|
||||
OUT_RING(MI_NOOP);
|
||||
break;
|
||||
|
||||
@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
OUT_RING(MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
OUT_RING(fb->pitch);
|
||||
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
|
||||
OUT_RING(obj->gtt_offset | obj->tiling_mode);
|
||||
|
||||
/* XXX Enabling the panel-fitter across page-flip is so far
|
||||
* untested on non-native modes, so ignore it for now.
|
||||
@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
case 6:
|
||||
OUT_RING(MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
OUT_RING(fb->pitch | obj_priv->tiling_mode);
|
||||
OUT_RING(obj_priv->gtt_offset);
|
||||
OUT_RING(fb->pitch | obj->tiling_mode);
|
||||
OUT_RING(obj->gtt_offset);
|
||||
|
||||
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
|
||||
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
|
||||
@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
|
||||
cleanup_objs:
|
||||
drm_gem_object_unreference(work->old_fb_obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
cleanup_work:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
||||
}
|
||||
|
||||
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
struct drm_file *file)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
|
||||
@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
|
||||
drm_framebuffer_cleanup(fb);
|
||||
drm_gem_object_unreference_unlocked(intel_fb->obj);
|
||||
drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
|
||||
|
||||
kfree(intel_fb);
|
||||
}
|
||||
|
||||
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_file *file,
|
||||
unsigned int *handle)
|
||||
{
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_gem_object *object = intel_fb->obj;
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
|
||||
return drm_gem_handle_create(file_priv, object, handle);
|
||||
return drm_gem_handle_create(file, &obj->base, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs intel_fb_funcs = {
|
||||
@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
|
||||
int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *intel_fb,
|
||||
struct drm_mode_fb_cmd *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
||||
int ret;
|
||||
|
||||
if (obj_priv->tiling_mode == I915_TILING_Y)
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
return -EINVAL;
|
||||
|
||||
if (mode_cmd->pitch & 63)
|
||||
@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *filp,
|
||||
struct drm_mode_fb_cmd *mode_cmd)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
int ret;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
|
||||
if (!obj)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
||||
if (!intel_fb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = intel_framebuffer_init(dev, intel_fb,
|
||||
mode_cmd, obj);
|
||||
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
kfree(intel_fb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
|
||||
.output_poll_changed = intel_fb_output_poll_changed,
|
||||
};
|
||||
|
||||
static struct drm_gem_object *
|
||||
static struct drm_i915_gem_object *
|
||||
intel_alloc_context_page(struct drm_device *dev)
|
||||
{
|
||||
struct drm_gem_object *ctx;
|
||||
struct drm_i915_gem_object *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = i915_gem_alloc_object(dev, 4096);
|
||||
@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev)
|
||||
err_unpin:
|
||||
i915_gem_object_unpin(ctx);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(ctx);
|
||||
drm_gem_object_unreference(&ctx->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return NULL;
|
||||
}
|
||||
@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev)
|
||||
if (dev_priv->renderctx == NULL)
|
||||
dev_priv->renderctx = intel_alloc_context_page(dev);
|
||||
if (dev_priv->renderctx) {
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
obj_priv = to_intel_bo(dev_priv->renderctx);
|
||||
if (obj_priv) {
|
||||
if (BEGIN_LP_RING(4) == 0) {
|
||||
OUT_RING(MI_SET_CONTEXT);
|
||||
OUT_RING(obj_priv->gtt_offset |
|
||||
MI_MM_SPACE_GTT |
|
||||
MI_SAVE_EXT_STATE_EN |
|
||||
MI_RESTORE_EXT_STATE_EN |
|
||||
MI_RESTORE_INHIBIT);
|
||||
OUT_RING(MI_NOOP);
|
||||
OUT_RING(MI_FLUSH);
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
struct drm_i915_gem_object *obj = dev_priv->renderctx;
|
||||
if (BEGIN_LP_RING(4) == 0) {
|
||||
OUT_RING(MI_SET_CONTEXT);
|
||||
OUT_RING(obj->gtt_offset |
|
||||
MI_MM_SPACE_GTT |
|
||||
MI_SAVE_EXT_STATE_EN |
|
||||
MI_RESTORE_EXT_STATE_EN |
|
||||
MI_RESTORE_INHIBIT);
|
||||
OUT_RING(MI_NOOP);
|
||||
OUT_RING(MI_FLUSH);
|
||||
ADVANCE_LP_RING();
|
||||
}
|
||||
} else
|
||||
DRM_DEBUG_KMS("Failed to allocate render context."
|
||||
@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev)
|
||||
}
|
||||
|
||||
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
struct drm_i915_gem_object *obj_priv = NULL;
|
||||
|
||||
if (dev_priv->pwrctx == NULL)
|
||||
dev_priv->pwrctx = intel_alloc_context_page(dev);
|
||||
if (dev_priv->pwrctx) {
|
||||
obj_priv = to_intel_bo(dev_priv->pwrctx);
|
||||
} else {
|
||||
struct drm_gem_object *pwrctx;
|
||||
|
||||
pwrctx = intel_alloc_context_page(dev);
|
||||
if (pwrctx) {
|
||||
dev_priv->pwrctx = pwrctx;
|
||||
obj_priv = to_intel_bo(pwrctx);
|
||||
}
|
||||
}
|
||||
|
||||
if (obj_priv) {
|
||||
I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
|
||||
struct drm_i915_gem_object *obj = dev_priv->pwrctx;
|
||||
I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
|
||||
I915_WRITE(MCHBAR_RENDER_STANDBY,
|
||||
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
|
||||
}
|
||||
@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
||||
dev_priv->display.disable_fbc(dev);
|
||||
|
||||
if (dev_priv->renderctx) {
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj = dev_priv->renderctx;
|
||||
|
||||
obj_priv = to_intel_bo(dev_priv->renderctx);
|
||||
I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
|
||||
I915_READ(CCID);
|
||||
i915_gem_object_unpin(dev_priv->renderctx);
|
||||
drm_gem_object_unreference(dev_priv->renderctx);
|
||||
I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN);
|
||||
POSTING_READ(CCID);
|
||||
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
dev_priv->renderctx = NULL;
|
||||
}
|
||||
|
||||
if (dev_priv->pwrctx) {
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj = dev_priv->pwrctx;
|
||||
|
||||
obj_priv = to_intel_bo(dev_priv->pwrctx);
|
||||
I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
|
||||
I915_READ(PWRCTXA);
|
||||
i915_gem_object_unpin(dev_priv->pwrctx);
|
||||
drm_gem_object_unreference(dev_priv->pwrctx);
|
||||
I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN);
|
||||
POSTING_READ(PWRCTXA);
|
||||
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
dev_priv->pwrctx = NULL;
|
||||
}
|
||||
|
||||
if (IS_IRONLAKE_M(dev))
|
||||
|
@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
|
||||
|
||||
struct intel_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
struct intel_fbdev {
|
||||
@ -166,7 +166,7 @@ struct intel_crtc {
|
||||
struct intel_unpin_work *unpin_work;
|
||||
int fdi_lanes;
|
||||
|
||||
struct drm_gem_object *cursor_bo;
|
||||
struct drm_i915_gem_object *cursor_bo;
|
||||
uint32_t cursor_addr;
|
||||
int16_t cursor_x, cursor_y;
|
||||
int16_t cursor_width, cursor_height;
|
||||
@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
|
||||
struct intel_unpin_work {
|
||||
struct work_struct work;
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *old_fb_obj;
|
||||
struct drm_gem_object *pending_flip_obj;
|
||||
struct drm_i915_gem_object *old_fb_obj;
|
||||
struct drm_i915_gem_object *pending_flip_obj;
|
||||
struct drm_pending_vblank_event *event;
|
||||
int pending;
|
||||
bool enable_stall_check;
|
||||
@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
|
||||
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
|
||||
extern void intel_dvo_init(struct drm_device *dev);
|
||||
extern void intel_tv_init(struct drm_device *dev);
|
||||
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
|
||||
extern void intel_mark_busy(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern void intel_lvds_init(struct drm_device *dev);
|
||||
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
|
||||
void
|
||||
@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev);
|
||||
extern void intel_init_emon(struct drm_device *dev);
|
||||
|
||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool pipelined);
|
||||
|
||||
extern int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *ifb,
|
||||
struct drm_mode_fb_cmd *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
|
||||
|
@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_mode_fb_cmd mode_cmd;
|
||||
struct drm_gem_object *fbo = NULL;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
|
||||
|
||||
@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
|
||||
size = mode_cmd.pitch * mode_cmd.height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
fbo = i915_gem_alloc_object(dev, size);
|
||||
if (!fbo) {
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
obj_priv = to_intel_bo(fbo);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Flush everything out, we'll be doing GTT only from now on */
|
||||
ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin fb: %d\n", ret);
|
||||
goto out_unref;
|
||||
@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
|
||||
info->par = ifbdev;
|
||||
|
||||
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
|
||||
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
else
|
||||
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
|
||||
|
||||
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
|
||||
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
|
||||
info->fix.smem_len = size;
|
||||
|
||||
info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
|
||||
size);
|
||||
info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
|
||||
if (!info->screen_base) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unpin;
|
||||
@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
|
||||
fb->width, fb->height,
|
||||
obj_priv->gtt_offset, fbo);
|
||||
obj->gtt_offset, obj);
|
||||
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||
return 0;
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_unpin(fbo);
|
||||
i915_gem_object_unpin(obj);
|
||||
out_unref:
|
||||
drm_gem_object_unreference(fbo);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
out:
|
||||
return ret;
|
||||
@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
||||
|
||||
drm_framebuffer_cleanup(&ifb->base);
|
||||
if (ifb->obj) {
|
||||
drm_gem_object_unreference_unlocked(ifb->obj);
|
||||
drm_gem_object_unreference_unlocked(&ifb->obj->base);
|
||||
ifb->obj = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_gem_object *obj = &overlay->old_vid_bo->base;
|
||||
struct drm_i915_gem_object *obj = overlay->old_vid_bo;
|
||||
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
|
||||
overlay->old_vid_bo = NULL;
|
||||
}
|
||||
|
||||
static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj = overlay->vid_bo;
|
||||
|
||||
/* never have the overlay hw on without showing a frame */
|
||||
BUG_ON(!overlay->vid_bo);
|
||||
obj = &overlay->vid_bo->base;
|
||||
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
overlay->vid_bo = NULL;
|
||||
|
||||
overlay->crtc->overlay = NULL;
|
||||
@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
|
||||
}
|
||||
|
||||
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
struct drm_gem_object *new_bo,
|
||||
struct drm_i915_gem_object *new_bo,
|
||||
struct put_image_params *params)
|
||||
{
|
||||
int ret, tmp_width;
|
||||
struct overlay_registers *regs;
|
||||
bool scale_changed = false;
|
||||
struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
|
||||
struct drm_device *dev = overlay->dev;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
|
||||
params->offset_Y, tmp_width);
|
||||
regs->SHEIGHT = params->src_h;
|
||||
regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
|
||||
regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
|
||||
regs->OSTRIDE = params->stride_Y;
|
||||
|
||||
if (params->format & I915_OVERLAY_YUV_PLANAR) {
|
||||
@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
params->src_w/uv_hscale);
|
||||
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
|
||||
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
|
||||
regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
|
||||
regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
|
||||
regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
|
||||
regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
|
||||
regs->OSTRIDE |= params->stride_UV << 16;
|
||||
}
|
||||
|
||||
@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
goto out_unpin;
|
||||
|
||||
overlay->old_vid_bo = overlay->vid_bo;
|
||||
overlay->vid_bo = to_intel_bo(new_bo);
|
||||
overlay->vid_bo = new_bo;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
|
||||
|
||||
static int check_overlay_src(struct drm_device *dev,
|
||||
struct drm_intel_overlay_put_image *rec,
|
||||
struct drm_gem_object *new_bo)
|
||||
struct drm_i915_gem_object *new_bo)
|
||||
{
|
||||
int uv_hscale = uv_hsubsampling(rec->flags);
|
||||
int uv_vscale = uv_vsubsampling(rec->flags);
|
||||
@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
tmp = rec->stride_Y*rec->src_height;
|
||||
if (rec->offset_Y + tmp > new_bo->size)
|
||||
if (rec->offset_Y + tmp > new_bo->base.size)
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
||||
@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
tmp = rec->stride_Y * rec->src_height;
|
||||
if (rec->offset_Y + tmp > new_bo->size)
|
||||
if (rec->offset_Y + tmp > new_bo->base.size)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
|
||||
if (rec->offset_U + tmp > new_bo->size ||
|
||||
rec->offset_V + tmp > new_bo->size)
|
||||
if (rec->offset_U + tmp > new_bo->base.size ||
|
||||
rec->offset_V + tmp > new_bo->base.size)
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
||||
struct intel_overlay *overlay;
|
||||
struct drm_mode_object *drmmode_obj;
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_gem_object *new_bo;
|
||||
struct drm_i915_gem_object *new_bo;
|
||||
struct put_image_params *params;
|
||||
int ret;
|
||||
|
||||
@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
||||
}
|
||||
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
|
||||
|
||||
new_bo = drm_gem_object_lookup(dev, file_priv,
|
||||
put_image_rec->bo_handle);
|
||||
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
|
||||
put_image_rec->bo_handle));
|
||||
if (!new_bo) {
|
||||
ret = -ENOENT;
|
||||
goto out_free;
|
||||
@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_gem_object_unreference_unlocked(new_bo);
|
||||
drm_gem_object_unreference_unlocked(&new_bo->base);
|
||||
out_free:
|
||||
kfree(params);
|
||||
|
||||
@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_overlay *overlay;
|
||||
struct drm_gem_object *reg_bo;
|
||||
struct drm_i915_gem_object *reg_bo;
|
||||
struct overlay_registers *regs;
|
||||
int ret;
|
||||
|
||||
@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
|
||||
if (!reg_bo)
|
||||
goto out_free;
|
||||
overlay->reg_bo = to_intel_bo(reg_bo);
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
||||
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
|
||||
ret = i915_gem_attach_phys_object(dev, reg_bo,
|
||||
@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||
DRM_ERROR("failed to attach phys overlay regs\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
|
||||
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
|
||||
} else {
|
||||
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin overlay register bo\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = overlay->reg_bo->gtt_offset;
|
||||
overlay->flip_addr = reg_bo->gtt_offset;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
|
||||
if (ret) {
|
||||
@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||
out_unpin_bo:
|
||||
i915_gem_object_unpin(reg_bo);
|
||||
out_free_bo:
|
||||
drm_gem_object_unreference(reg_bo);
|
||||
drm_gem_object_unreference(®_bo->base);
|
||||
out_free:
|
||||
kfree(overlay);
|
||||
return;
|
||||
|
@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
|
||||
static int init_ring_common(struct intel_ring_buffer *ring)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object);
|
||||
struct drm_i915_gem_object *obj = ring->obj;
|
||||
u32 head;
|
||||
|
||||
/* Stop the ring if it's running. */
|
||||
@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
||||
ring->write_tail(ring, 0);
|
||||
|
||||
/* Initialize the ring. */
|
||||
I915_WRITE_START(ring, obj_priv->gtt_offset);
|
||||
I915_WRITE_START(ring, obj->gtt_offset);
|
||||
head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
|
||||
/* G45 ring initialization fails to reset head to zero */
|
||||
@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
||||
|
||||
/* If the head is still not zero, the ring is dead */
|
||||
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
|
||||
I915_READ_START(ring) != obj_priv->gtt_offset ||
|
||||
I915_READ_START(ring) != obj->gtt_offset ||
|
||||
(I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
|
||||
DRM_ERROR("%s initialization failed "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
static void cleanup_status_page(struct intel_ring_buffer *ring)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = ring->status_page.obj;
|
||||
if (obj == NULL)
|
||||
return;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
kunmap(obj_priv->pages[0]);
|
||||
kunmap(obj->pages[0]);
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
ring->status_page.obj = NULL;
|
||||
|
||||
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
|
||||
@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_alloc_object(dev, 4096);
|
||||
@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring)
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
obj_priv = to_intel_bo(obj);
|
||||
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
|
||||
obj->agp_type = AGP_USER_CACHED_MEMORY;
|
||||
|
||||
ret = i915_gem_object_pin(obj, 4096, true);
|
||||
if (ret != 0) {
|
||||
goto err_unref;
|
||||
}
|
||||
|
||||
ring->status_page.gfx_addr = obj_priv->gtt_offset;
|
||||
ring->status_page.page_addr = kmap(obj_priv->pages[0]);
|
||||
ring->status_page.gfx_addr = obj->gtt_offset;
|
||||
ring->status_page.page_addr = kmap(obj->pages[0]);
|
||||
if (ring->status_page.page_addr == NULL) {
|
||||
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
|
||||
goto err_unpin;
|
||||
@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
|
||||
err_unpin:
|
||||
i915_gem_object_unpin(obj);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
@ -578,8 +574,7 @@ err:
|
||||
int intel_init_ring_buffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ring->dev = dev;
|
||||
@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
|
||||
goto err_hws;
|
||||
}
|
||||
|
||||
ring->gem_object = obj;
|
||||
ring->obj = obj;
|
||||
|
||||
ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
obj_priv = to_intel_bo(obj);
|
||||
ring->map.size = ring->size;
|
||||
ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
|
||||
ring->map.offset = dev->agp->base + obj->gtt_offset;
|
||||
ring->map.type = 0;
|
||||
ring->map.flags = 0;
|
||||
ring->map.mtrr = 0;
|
||||
@ -632,8 +626,8 @@ err_unmap:
|
||||
err_unpin:
|
||||
i915_gem_object_unpin(obj);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(obj);
|
||||
ring->gem_object = NULL;
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
ring->obj = NULL;
|
||||
err_hws:
|
||||
cleanup_status_page(ring);
|
||||
return ret;
|
||||
@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
||||
struct drm_i915_private *dev_priv;
|
||||
int ret;
|
||||
|
||||
if (ring->gem_object == NULL)
|
||||
if (ring->obj == NULL)
|
||||
return;
|
||||
|
||||
/* Disable the ring buffer. The ring must be idle at this point */
|
||||
@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
||||
|
||||
drm_core_ioremapfree(&ring->map, ring->dev);
|
||||
|
||||
i915_gem_object_unpin(ring->gem_object);
|
||||
drm_gem_object_unreference(ring->gem_object);
|
||||
ring->gem_object = NULL;
|
||||
i915_gem_object_unpin(ring->obj);
|
||||
drm_gem_object_unreference(&ring->obj->base);
|
||||
ring->obj = NULL;
|
||||
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
|
||||
u32 *ptr;
|
||||
int ret;
|
||||
|
||||
obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096));
|
||||
obj = i915_gem_alloc_object(ring->dev, 4096);
|
||||
if (obj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_gem_object_pin(&obj->base, 4096, true);
|
||||
ret = i915_gem_object_pin(obj, 4096, true);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
|
||||
*ptr++ = MI_NOOP;
|
||||
kunmap(obj->pages[0]);
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(&obj->base);
|
||||
i915_gem_object_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
struct intel_hw_status_page {
|
||||
u32 __iomem *page_addr;
|
||||
unsigned int gfx_addr;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
|
||||
@ -32,7 +32,7 @@ struct intel_ring_buffer {
|
||||
u32 mmio_base;
|
||||
void *virtual_start;
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *gem_object;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
unsigned int head;
|
||||
unsigned int tail;
|
||||
|
Loading…
Reference in New Issue
Block a user