mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 00:21:59 +00:00
062b389c87
It seems that some R6XX/R7XX silently ignore HDP flush when programmed through ring, this patch addback an ioctl callback to allow R6XX/R7XX hw to perform such flush through MMIO in order to fix a regression. For more details see: http://bugzilla.kernel.org/show_bug.cgi?id=15186 Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
364 lines
9.2 KiB
C
364 lines
9.2 KiB
C
/*
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
* Copyright 2008 Red Hat Inc.
|
|
* Copyright 2009 Jerome Glisse.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
* Authors: Dave Airlie
|
|
* Alex Deucher
|
|
* Jerome Glisse
|
|
*/
|
|
#include "drmP.h"
|
|
#include "drm.h"
|
|
#include "radeon_drm.h"
|
|
#include "radeon.h"
|
|
|
|
int radeon_gem_object_init(struct drm_gem_object *obj)
|
|
{
|
|
/* we do nothings here */
|
|
return 0;
|
|
}
|
|
|
|
void radeon_gem_object_free(struct drm_gem_object *gobj)
|
|
{
|
|
struct radeon_bo *robj = gobj->driver_private;
|
|
|
|
gobj->driver_private = NULL;
|
|
if (robj) {
|
|
radeon_bo_unref(&robj);
|
|
}
|
|
}
|
|
|
|
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
|
int alignment, int initial_domain,
|
|
bool discardable, bool kernel,
|
|
struct drm_gem_object **obj)
|
|
{
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *robj;
|
|
int r;
|
|
|
|
*obj = NULL;
|
|
gobj = drm_gem_object_alloc(rdev->ddev, size);
|
|
if (!gobj) {
|
|
return -ENOMEM;
|
|
}
|
|
/* At least align on page size */
|
|
if (alignment < PAGE_SIZE) {
|
|
alignment = PAGE_SIZE;
|
|
}
|
|
r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
|
|
if (r) {
|
|
if (r != -ERESTARTSYS)
|
|
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
|
size, initial_domain, alignment, r);
|
|
mutex_lock(&rdev->ddev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&rdev->ddev->struct_mutex);
|
|
return r;
|
|
}
|
|
gobj->driver_private = robj;
|
|
*obj = gobj;
|
|
return 0;
|
|
}
|
|
|
|
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
|
uint64_t *gpu_addr)
|
|
{
|
|
struct radeon_bo *robj = obj->driver_private;
|
|
int r;
|
|
|
|
r = radeon_bo_reserve(robj, false);
|
|
if (unlikely(r != 0))
|
|
return r;
|
|
r = radeon_bo_pin(robj, pin_domain, gpu_addr);
|
|
radeon_bo_unreserve(robj);
|
|
return r;
|
|
}
|
|
|
|
void radeon_gem_object_unpin(struct drm_gem_object *obj)
|
|
{
|
|
struct radeon_bo *robj = obj->driver_private;
|
|
int r;
|
|
|
|
r = radeon_bo_reserve(robj, false);
|
|
if (likely(r == 0)) {
|
|
radeon_bo_unpin(robj);
|
|
radeon_bo_unreserve(robj);
|
|
}
|
|
}
|
|
|
|
int radeon_gem_set_domain(struct drm_gem_object *gobj,
|
|
uint32_t rdomain, uint32_t wdomain)
|
|
{
|
|
struct radeon_bo *robj;
|
|
uint32_t domain;
|
|
int r;
|
|
|
|
/* FIXME: reeimplement */
|
|
robj = gobj->driver_private;
|
|
/* work out where to validate the buffer to */
|
|
domain = wdomain;
|
|
if (!domain) {
|
|
domain = rdomain;
|
|
}
|
|
if (!domain) {
|
|
/* Do nothings */
|
|
printk(KERN_WARNING "Set domain withou domain !\n");
|
|
return 0;
|
|
}
|
|
if (domain == RADEON_GEM_DOMAIN_CPU) {
|
|
/* Asking for cpu access wait for object idle */
|
|
r = radeon_bo_wait(robj, NULL, false);
|
|
if (r) {
|
|
printk(KERN_ERR "Failed to wait for object !\n");
|
|
return r;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int radeon_gem_init(struct radeon_device *rdev)
|
|
{
|
|
INIT_LIST_HEAD(&rdev->gem.objects);
|
|
return 0;
|
|
}
|
|
|
|
void radeon_gem_fini(struct radeon_device *rdev)
|
|
{
|
|
radeon_bo_force_delete(rdev);
|
|
}
|
|
|
|
|
|
/*
|
|
* GEM ioctls.
|
|
*/
|
|
int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
struct drm_radeon_gem_info *args = data;
|
|
|
|
args->vram_size = rdev->mc.real_vram_size;
|
|
args->vram_visible = rdev->mc.real_vram_size;
|
|
if (rdev->stollen_vga_memory)
|
|
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
|
|
if (rdev->fbdev_rbo)
|
|
args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
|
|
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
|
|
RADEON_IB_POOL_SIZE*64*1024;
|
|
return 0;
|
|
}
|
|
|
|
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
/* TODO: implement */
|
|
DRM_ERROR("unimplemented %s\n", __func__);
|
|
return -ENOSYS;
|
|
}
|
|
|
|
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
/* TODO: implement */
|
|
DRM_ERROR("unimplemented %s\n", __func__);
|
|
return -ENOSYS;
|
|
}
|
|
|
|
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
struct drm_radeon_gem_create *args = data;
|
|
struct drm_gem_object *gobj;
|
|
uint32_t handle;
|
|
int r;
|
|
|
|
/* create a gem object to contain this object in */
|
|
args->size = roundup(args->size, PAGE_SIZE);
|
|
r = radeon_gem_object_create(rdev, args->size, args->alignment,
|
|
args->initial_domain, false,
|
|
false, &gobj);
|
|
if (r) {
|
|
return r;
|
|
}
|
|
r = drm_gem_handle_create(filp, gobj, &handle);
|
|
if (r) {
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return r;
|
|
}
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_handle_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
args->handle = handle;
|
|
return 0;
|
|
}
|
|
|
|
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
/* transition the BO to a domain -
|
|
* just validate the BO into a certain domain */
|
|
struct drm_radeon_gem_set_domain *args = data;
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *robj;
|
|
int r;
|
|
|
|
/* for now if someone requests domain CPU -
|
|
* just make sure the buffer is finished with */
|
|
|
|
/* just do a BO wait for now */
|
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
|
if (gobj == NULL) {
|
|
return -EINVAL;
|
|
}
|
|
robj = gobj->driver_private;
|
|
|
|
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
|
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return r;
|
|
}
|
|
|
|
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct drm_radeon_gem_mmap *args = data;
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *robj;
|
|
|
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
|
if (gobj == NULL) {
|
|
return -EINVAL;
|
|
}
|
|
robj = gobj->driver_private;
|
|
args->addr_ptr = radeon_bo_mmap_offset(robj);
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return 0;
|
|
}
|
|
|
|
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct drm_radeon_gem_busy *args = data;
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *robj;
|
|
int r;
|
|
uint32_t cur_placement = 0;
|
|
|
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
|
if (gobj == NULL) {
|
|
return -EINVAL;
|
|
}
|
|
robj = gobj->driver_private;
|
|
r = radeon_bo_wait(robj, &cur_placement, true);
|
|
switch (cur_placement) {
|
|
case TTM_PL_VRAM:
|
|
args->domain = RADEON_GEM_DOMAIN_VRAM;
|
|
break;
|
|
case TTM_PL_TT:
|
|
args->domain = RADEON_GEM_DOMAIN_GTT;
|
|
break;
|
|
case TTM_PL_SYSTEM:
|
|
args->domain = RADEON_GEM_DOMAIN_CPU;
|
|
default:
|
|
break;
|
|
}
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return r;
|
|
}
|
|
|
|
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct drm_radeon_gem_wait_idle *args = data;
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *robj;
|
|
int r;
|
|
|
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
|
if (gobj == NULL) {
|
|
return -EINVAL;
|
|
}
|
|
robj = gobj->driver_private;
|
|
r = radeon_bo_wait(robj, NULL, false);
|
|
/* callback hw specific functions if any */
|
|
if (robj->rdev->asic->ioctl_wait_idle)
|
|
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return r;
|
|
}
|
|
|
|
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct drm_radeon_gem_set_tiling *args = data;
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *robj;
|
|
int r = 0;
|
|
|
|
DRM_DEBUG("%d \n", args->handle);
|
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
|
if (gobj == NULL)
|
|
return -EINVAL;
|
|
robj = gobj->driver_private;
|
|
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return r;
|
|
}
|
|
|
|
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
|
struct drm_file *filp)
|
|
{
|
|
struct drm_radeon_gem_get_tiling *args = data;
|
|
struct drm_gem_object *gobj;
|
|
struct radeon_bo *rbo;
|
|
int r = 0;
|
|
|
|
DRM_DEBUG("\n");
|
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
|
if (gobj == NULL)
|
|
return -EINVAL;
|
|
rbo = gobj->driver_private;
|
|
r = radeon_bo_reserve(rbo, false);
|
|
if (unlikely(r != 0))
|
|
goto out;
|
|
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
|
|
radeon_bo_unreserve(rbo);
|
|
out:
|
|
mutex_lock(&dev->struct_mutex);
|
|
drm_gem_object_unreference(gobj);
|
|
mutex_unlock(&dev->struct_mutex);
|
|
return r;
|
|
}
|