mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 07:01:57 +00:00
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm/radeon/kms: fix bo's fence association drm/radeon/kms: fix indirect buffer management V2 drm/edid: Fix interlaced detailed timings to be frame size, not field. drm/vmwgfx: Use fb handover mechanism instead of stealth mode. drm/radeon/kms: use udelay for short delays drm/nouveau: Force TV encoder DPMS reinit after resume. drm/nouveau: use mutex for vbios lock
This commit is contained in:
commit
86404ab60d
@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
|
||||
return mode;
|
||||
}
|
||||
|
||||
/*
|
||||
* EDID is delightfully ambiguous about how interlaced modes are to be
|
||||
* encoded. Our internal representation is of frame height, but some
|
||||
* HDTV detailed timings are encoded as field height.
|
||||
*
|
||||
* The format list here is from CEA, in frame size. Technically we
|
||||
* should be checking refresh rate too. Whatever.
|
||||
*/
|
||||
static void
|
||||
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
|
||||
struct detailed_pixel_timing *pt)
|
||||
{
|
||||
int i;
|
||||
static const struct {
|
||||
int w, h;
|
||||
} cea_interlaced[] = {
|
||||
{ 1920, 1080 },
|
||||
{ 720, 480 },
|
||||
{ 1440, 480 },
|
||||
{ 2880, 480 },
|
||||
{ 720, 576 },
|
||||
{ 1440, 576 },
|
||||
{ 2880, 576 },
|
||||
};
|
||||
static const int n_sizes =
|
||||
sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
|
||||
|
||||
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
|
||||
return;
|
||||
|
||||
for (i = 0; i < n_sizes; i++) {
|
||||
if ((mode->hdisplay == cea_interlaced[i].w) &&
|
||||
(mode->vdisplay == cea_interlaced[i].h / 2)) {
|
||||
mode->vdisplay *= 2;
|
||||
mode->vsync_start *= 2;
|
||||
mode->vsync_end *= 2;
|
||||
mode->vtotal *= 2;
|
||||
mode->vtotal |= 1;
|
||||
}
|
||||
}
|
||||
|
||||
mode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_detailed - create a new mode from an EDID detailed timing section
|
||||
* @dev: DRM device (needed to create new mode)
|
||||
@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
if (pt->misc & DRM_EDID_PT_INTERLACED)
|
||||
mode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
drm_mode_do_interlace_quirk(mode, pt);
|
||||
|
||||
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
|
||||
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
|
||||
|
@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
struct init_exec iexec = { true, false };
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bios->lock, flags);
|
||||
mutex_lock(&bios->lock);
|
||||
bios->display.output = dcbent;
|
||||
parse_init_table(bios, table, &iexec);
|
||||
bios->display.output = NULL;
|
||||
spin_unlock_irqrestore(&bios->lock, flags);
|
||||
mutex_unlock(&bios->lock);
|
||||
}
|
||||
|
||||
static bool NVInitVBIOS(struct drm_device *dev)
|
||||
@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
|
||||
struct nvbios *bios = &dev_priv->VBIOS;
|
||||
|
||||
memset(bios, 0, sizeof(struct nvbios));
|
||||
spin_lock_init(&bios->lock);
|
||||
mutex_init(&bios->lock);
|
||||
bios->dev = dev;
|
||||
|
||||
if (!NVShadowVBIOS(dev, bios->data))
|
||||
|
@ -205,7 +205,7 @@ struct nvbios {
|
||||
struct drm_device *dev;
|
||||
struct nouveau_bios_info pub;
|
||||
|
||||
spinlock_t lock;
|
||||
struct mutex lock;
|
||||
|
||||
uint8_t data[NV_PROM_SIZE];
|
||||
unsigned int length;
|
||||
|
@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
|
||||
nouveau_encoder(encoder)->restore.output);
|
||||
|
||||
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
|
||||
|
||||
nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
|
||||
}
|
||||
|
||||
static int nv17_tv_create_resources(struct drm_encoder *encoder,
|
||||
|
@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
|
||||
uint8_t count = U8((*ptr)++);
|
||||
SDEBUG(" count: %d\n", count);
|
||||
if (arg == ATOM_UNIT_MICROSEC)
|
||||
schedule_timeout_uninterruptible(usecs_to_jiffies(count));
|
||||
udelay(count);
|
||||
else
|
||||
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
|
||||
}
|
||||
|
@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
|
||||
void r600_vb_ib_put(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
|
||||
}
|
||||
|
||||
|
@ -96,6 +96,7 @@ extern int radeon_audio;
|
||||
* symbol;
|
||||
*/
|
||||
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
/* RADEON_IB_POOL_SIZE must be a power of 2 */
|
||||
#define RADEON_IB_POOL_SIZE 16
|
||||
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
|
||||
#define RADEONFB_CONN_LIMIT 4
|
||||
@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
|
||||
*/
|
||||
struct radeon_ib {
|
||||
struct list_head list;
|
||||
unsigned long idx;
|
||||
unsigned idx;
|
||||
uint64_t gpu_addr;
|
||||
struct radeon_fence *fence;
|
||||
uint32_t *ptr;
|
||||
uint32_t *ptr;
|
||||
uint32_t length_dw;
|
||||
bool free;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -377,10 +379,9 @@ struct radeon_ib {
|
||||
struct radeon_ib_pool {
|
||||
struct mutex mutex;
|
||||
struct radeon_bo *robj;
|
||||
struct list_head scheduled_ibs;
|
||||
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
||||
bool ready;
|
||||
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
unsigned head_id;
|
||||
};
|
||||
|
||||
struct radeon_cp {
|
||||
|
@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
&p->validated);
|
||||
}
|
||||
}
|
||||
return radeon_bo_list_validate(&p->validated, p->ib->fence);
|
||||
return radeon_bo_list_validate(&p->validated);
|
||||
}
|
||||
|
||||
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (error && parser->ib) {
|
||||
radeon_bo_list_unvalidate(&parser->validated,
|
||||
parser->ib->fence);
|
||||
} else {
|
||||
radeon_bo_list_unreserve(&parser->validated);
|
||||
if (!error && parser->ib) {
|
||||
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
|
||||
}
|
||||
radeon_bo_list_unreserve(&parser->validated);
|
||||
for (i = 0; i < parser->nrelocs; i++) {
|
||||
if (parser->relocs[i].gobj) {
|
||||
mutex_lock(&parser->rdev->ddev->struct_mutex);
|
||||
|
@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_bo_list_validate(struct list_head *head, void *fence)
|
||||
int radeon_bo_list_validate(struct list_head *head)
|
||||
{
|
||||
struct radeon_bo_list *lobj;
|
||||
struct radeon_bo *bo;
|
||||
struct radeon_fence *old_fence = NULL;
|
||||
int r;
|
||||
|
||||
r = radeon_bo_list_reserve(head);
|
||||
@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
|
||||
}
|
||||
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
|
||||
lobj->tiling_flags = bo->tiling_flags;
|
||||
if (fence) {
|
||||
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
|
||||
bo->tbo.sync_obj = radeon_fence_ref(fence);
|
||||
bo->tbo.sync_obj_arg = NULL;
|
||||
}
|
||||
if (old_fence) {
|
||||
radeon_fence_unref(&old_fence);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
|
||||
void radeon_bo_list_fence(struct list_head *head, void *fence)
|
||||
{
|
||||
struct radeon_bo_list *lobj;
|
||||
struct radeon_fence *old_fence;
|
||||
struct radeon_bo *bo;
|
||||
struct radeon_fence *old_fence = NULL;
|
||||
|
||||
if (fence)
|
||||
list_for_each_entry(lobj, head, list) {
|
||||
old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
|
||||
if (old_fence == fence) {
|
||||
lobj->bo->tbo.sync_obj = NULL;
|
||||
radeon_fence_unref(&old_fence);
|
||||
}
|
||||
list_for_each_entry(lobj, head, list) {
|
||||
bo = lobj->bo;
|
||||
spin_lock(&bo->tbo.lock);
|
||||
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
|
||||
bo->tbo.sync_obj = radeon_fence_ref(fence);
|
||||
bo->tbo.sync_obj_arg = NULL;
|
||||
spin_unlock(&bo->tbo.lock);
|
||||
if (old_fence) {
|
||||
radeon_fence_unref(&old_fence);
|
||||
}
|
||||
radeon_bo_list_unreserve(head);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
||||
|
@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
|
||||
struct list_head *head);
|
||||
extern int radeon_bo_list_reserve(struct list_head *head);
|
||||
extern void radeon_bo_list_unreserve(struct list_head *head);
|
||||
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
|
||||
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
|
||||
extern int radeon_bo_list_validate(struct list_head *head);
|
||||
extern void radeon_bo_list_fence(struct list_head *head, void *fence);
|
||||
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
||||
struct vm_area_struct *vma);
|
||||
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
|
||||
|
@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
{
|
||||
struct radeon_fence *fence;
|
||||
struct radeon_ib *nib;
|
||||
unsigned long i;
|
||||
int r = 0;
|
||||
int r = 0, i, c;
|
||||
|
||||
*ib = NULL;
|
||||
r = radeon_fence_create(rdev, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to create fence for new IB\n");
|
||||
dev_err(rdev->dev, "failed to create fence for new IB\n");
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
if (i < RADEON_IB_POOL_SIZE) {
|
||||
set_bit(i, rdev->ib_pool.alloc_bm);
|
||||
rdev->ib_pool.ibs[i].length_dw = 0;
|
||||
*ib = &rdev->ib_pool.ibs[i];
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
goto out;
|
||||
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
|
||||
i &= (RADEON_IB_POOL_SIZE - 1);
|
||||
if (rdev->ib_pool.ibs[i].free) {
|
||||
nib = &rdev->ib_pool.ibs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
|
||||
/* we go do nothings here */
|
||||
if (nib == NULL) {
|
||||
/* This should never happen, it means we allocated all
|
||||
* IB and haven't scheduled one yet, return EBUSY to
|
||||
* userspace hoping that on ioctl recall we get better
|
||||
* luck
|
||||
*/
|
||||
dev_err(rdev->dev, "no free indirect buffer !\n");
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
DRM_ERROR("all IB allocated none scheduled.\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
radeon_fence_unref(&fence);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* get the first ib on the scheduled list */
|
||||
nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
|
||||
struct radeon_ib, list);
|
||||
if (nib->fence == NULL) {
|
||||
/* we go do nothings here */
|
||||
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
|
||||
nib->free = false;
|
||||
if (nib->fence) {
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
|
||||
r = radeon_fence_wait(nib->fence, false);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
|
||||
(unsigned long)nib->gpu_addr, nib->length_dw);
|
||||
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
|
||||
goto out;
|
||||
r = radeon_fence_wait(nib->fence, false);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
|
||||
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
nib->free = true;
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
radeon_fence_unref(&fence);
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
}
|
||||
radeon_fence_unref(&nib->fence);
|
||||
|
||||
nib->fence = fence;
|
||||
nib->length_dw = 0;
|
||||
|
||||
/* scheduled list is accessed here */
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
list_del(&nib->list);
|
||||
INIT_LIST_HEAD(&nib->list);
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
|
||||
*ib = nib;
|
||||
out:
|
||||
if (r) {
|
||||
radeon_fence_unref(&fence);
|
||||
} else {
|
||||
(*ib)->fence = fence;
|
||||
}
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||
return;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
|
||||
/* IB is scheduled & not signaled don't do anythings */
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
return;
|
||||
}
|
||||
list_del(&tmp->list);
|
||||
INIT_LIST_HEAD(&tmp->list);
|
||||
if (tmp->fence)
|
||||
radeon_fence_unref(&tmp->fence);
|
||||
|
||||
tmp->length_dw = 0;
|
||||
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
|
||||
tmp->free = true;
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
}
|
||||
|
||||
@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
|
||||
if (!ib->length_dw || !rdev->cp.ready) {
|
||||
/* TODO: Nothings in the ib we should report. */
|
||||
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
|
||||
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
radeon_ring_ib_execute(rdev, ib);
|
||||
radeon_fence_emit(rdev, ib->fence);
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
|
||||
/* once scheduled IB is considered free and protected by the fence */
|
||||
ib->free = true;
|
||||
mutex_unlock(&rdev->ib_pool.mutex);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
return 0;
|
||||
@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
if (rdev->ib_pool.robj)
|
||||
return 0;
|
||||
/* Allocate 1M object buffer */
|
||||
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
|
||||
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
||||
true, RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->ib_pool.robj);
|
||||
@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
rdev->ib_pool.ibs[i].ptr = ptr + offset;
|
||||
rdev->ib_pool.ibs[i].idx = i;
|
||||
rdev->ib_pool.ibs[i].length_dw = 0;
|
||||
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
|
||||
rdev->ib_pool.ibs[i].free = true;
|
||||
}
|
||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
rdev->ib_pool.head_id = 0;
|
||||
rdev->ib_pool.ready = true;
|
||||
DRM_INFO("radeon: ib pool ready.\n");
|
||||
if (radeon_debugfs_ib_init(rdev)) {
|
||||
@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||
return;
|
||||
}
|
||||
mutex_lock(&rdev->ib_pool.mutex);
|
||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||
if (rdev->ib_pool.robj) {
|
||||
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
|
||||
if (ib == NULL) {
|
||||
return 0;
|
||||
}
|
||||
seq_printf(m, "IB %04lu\n", ib->idx);
|
||||
seq_printf(m, "IB %04u\n", ib->idx);
|
||||
seq_printf(m, "IB fence %p\n", ib->fence);
|
||||
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
|
||||
for (i = 0; i < ib->length_dw; i++) {
|
||||
|
@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
*/
|
||||
|
||||
DRM_INFO("It appears like vesafb is loaded. "
|
||||
"Ignore above error if any. Entering stealth mode.\n");
|
||||
"Ignore above error if any.\n");
|
||||
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
|
||||
goto out_no_device;
|
||||
}
|
||||
vmw_kms_init(dev_priv);
|
||||
vmw_overlay_init(dev_priv);
|
||||
} else {
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_device;
|
||||
vmw_kms_init(dev_priv);
|
||||
vmw_overlay_init(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
}
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_device;
|
||||
vmw_kms_init(dev_priv);
|
||||
vmw_overlay_init(dev_priv);
|
||||
vmw_fb_init(dev_priv);
|
||||
|
||||
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
|
||||
register_pm_notifier(&dev_priv->pm_nb);
|
||||
@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
|
||||
|
||||
unregister_pm_notifier(&dev_priv->pm_nb);
|
||||
|
||||
if (!dev_priv->stealth) {
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
vmw_release_device(dev_priv);
|
||||
pci_release_regions(dev->pdev);
|
||||
} else {
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
vmw_fb_close(dev_priv);
|
||||
vmw_kms_close(dev_priv);
|
||||
vmw_overlay_close(dev_priv);
|
||||
vmw_release_device(dev_priv);
|
||||
if (dev_priv->stealth)
|
||||
pci_release_region(dev->pdev, 2);
|
||||
}
|
||||
else
|
||||
pci_release_regions(dev->pdev);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
if (dev->devname == vmw_devname)
|
||||
@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
|
||||
int ret = 0;
|
||||
|
||||
DRM_INFO("Master set.\n");
|
||||
if (dev_priv->stealth) {
|
||||
ret = vmw_request_device(dev_priv);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (active) {
|
||||
BUG_ON(active != &dev_priv->fbdev_master);
|
||||
@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
|
||||
|
||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||
|
||||
if (dev_priv->stealth) {
|
||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Unable to clean VRAM on master drop.\n");
|
||||
vmw_release_device(dev_priv);
|
||||
}
|
||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
||||
|
||||
if (!dev_priv->stealth)
|
||||
vmw_fb_on(dev_priv);
|
||||
vmw_fb_on(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
|
@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
info->pixmap.scan_align = 1;
|
||||
#endif
|
||||
|
||||
info->aperture_base = vmw_priv->vram_start;
|
||||
info->aperture_size = vmw_priv->vram_size;
|
||||
|
||||
/*
|
||||
* Dirty & Deferred IO
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user