mirror of
https://github.com/torvalds/linux.git
synced 2024-11-20 02:51:44 +00:00
721604a15b
Virtual address space are per drm client (opener of /dev/drm). Client are in charge of virtual address space, they need to map bo into it by calling DRM_RADEON_GEM_VA ioctl. First 16M of virtual address space is reserved by the kernel. Once using 2 level page table we should be able to have a small vram memory footprint for each pt (there would be one pt for all gart, one for all vram and then one first level for each virtual address space). Plan include using the sub allocator for a common vm page table area and using memcpy to copy vm page table in & out. Or use a gart object and copy things in & out using dma. v2: agd5f fixes: - Add vram base offset for vram pages. The GPU physical address of a vram page is FB_OFFSET + page offset. FB_OFFSET is 0 on discrete cards and the physical bus address of the stolen memory on integrated chips. - VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR covers all vmid's >= 1 v3: agd5f: - integrate with the semaphore/multi-ring stuff v4: - rebase on top ttm dma & multi-ring stuff - userspace is now in charge of the address space - no more specific cs vm ioctl, instead cs ioctl has a new chunk v5: - properly handle mem == NULL case from move_notify callback - fix the vm cleanup path v6: - fix update of page table to only happen on valid mem placement v7: - add tlb flush for each vm context - add flags to define mapping property (readable, writeable, snooped) - make ring id implicit from ib->fence->ring, up to each asic callback to then do ring specific scheduling if vm ib scheduling function v8: - add query for ib limit and kernel reserved virtual space - rename vm->size to max_pfn (maximum number of page) - update gem_va ioctl to also allow unmap operation - bump kernel version to allow userspace to query for vm support v9: - rebuild page table only when bind and incrementaly depending on bo referenced by cs and that have been moved - allow virtual address space to grow - use sa allocator for vram page table - return invalid when querying vm limit on non cayman GPU - dump vm fault register on lockup v10: agd5f: - Move the vm schedule_ib callback to a standalone function, remove the callback and use the existing ib_execute callback for VM IBs. v11: - rebase on top of lastest Linus v12: agd5f: - remove spurious backslash - set IB vm_id to 0 in radeon_ib_get() v13: agd5f: - fix handling of RADEON_CHUNK_ID_FLAGS v14: - fix va destruction - fix suspend resume - forbid bo to have several different va in same vm v15: - rebase v16: - cleanup left over of vm init/fini v17: agd5f: - cs checker v18: agd5f: - reworks the CS ioctl to better support multiple rings and VM. Rather than adding a new chunk id for VM, just re-use the IB chunk id and add a new flags for VM mode. Also define additional dwords for the flags chunk id to define the what ring we want to use (gfx, compute, uvd, etc.) and the priority. v19: - fix cs fini in weird case of no ib - semi working flush fix for ni - rebase on top of sa allocator changes v20: agd5f: - further CS ioctl cleanups from Christian's comments v21: agd5f: - integrate CS checker improvements v22: agd5f: - final cleanups for release, only allow VM CS on cayman Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
501 lines
15 KiB
C
501 lines
15 KiB
C
/*
|
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
|
* Copyright 2008 Red Hat Inc.
|
|
* Copyright 2009 Jerome Glisse.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
* Authors: Dave Airlie
|
|
* Alex Deucher
|
|
* Jerome Glisse
|
|
*/
|
|
#include "drmP.h"
|
|
#include "drm_sarea.h"
|
|
#include "radeon.h"
|
|
#include "radeon_drm.h"
|
|
|
|
#include <linux/vga_switcheroo.h>
|
|
#include <linux/slab.h>
|
|
|
|
int radeon_driver_unload_kms(struct drm_device *dev)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
if (rdev == NULL)
|
|
return 0;
|
|
radeon_modeset_fini(rdev);
|
|
radeon_device_fini(rdev);
|
|
kfree(rdev);
|
|
dev->dev_private = NULL;
|
|
return 0;
|
|
}
|
|
|
|
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|
{
|
|
struct radeon_device *rdev;
|
|
int r, acpi_status;
|
|
|
|
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
|
|
if (rdev == NULL) {
|
|
return -ENOMEM;
|
|
}
|
|
dev->dev_private = (void *)rdev;
|
|
|
|
/* update BUS flag */
|
|
if (drm_pci_device_is_agp(dev)) {
|
|
flags |= RADEON_IS_AGP;
|
|
} else if (pci_is_pcie(dev->pdev)) {
|
|
flags |= RADEON_IS_PCIE;
|
|
} else {
|
|
flags |= RADEON_IS_PCI;
|
|
}
|
|
|
|
/* radeon_device_init should report only fatal error
|
|
* like memory allocation failure or iomapping failure,
|
|
* or memory manager initialization failure, it must
|
|
* properly initialize the GPU MC controller and permit
|
|
* VRAM allocation
|
|
*/
|
|
r = radeon_device_init(rdev, dev, dev->pdev, flags);
|
|
if (r) {
|
|
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
|
|
goto out;
|
|
}
|
|
|
|
/* Call ACPI methods */
|
|
acpi_status = radeon_acpi_init(rdev);
|
|
if (acpi_status)
|
|
dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
|
|
|
|
/* Again modeset_init should fail only on fatal error
|
|
* otherwise it should provide enough functionalities
|
|
* for shadowfb to run
|
|
*/
|
|
r = radeon_modeset_init(rdev);
|
|
if (r)
|
|
dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
|
|
out:
|
|
if (r)
|
|
radeon_driver_unload_kms(dev);
|
|
return r;
|
|
}
|
|
|
|
static void radeon_set_filp_rights(struct drm_device *dev,
|
|
struct drm_file **owner,
|
|
struct drm_file *applier,
|
|
uint32_t *value)
|
|
{
|
|
mutex_lock(&dev->struct_mutex);
|
|
if (*value == 1) {
|
|
/* wants rights */
|
|
if (!*owner)
|
|
*owner = applier;
|
|
} else if (*value == 0) {
|
|
/* revokes rights */
|
|
if (*owner == applier)
|
|
*owner = NULL;
|
|
}
|
|
*value = *owner == applier ? 1 : 0;
|
|
mutex_unlock(&dev->struct_mutex);
|
|
}
|
|
|
|
/*
|
|
* Userspace get information ioctl
|
|
*/
|
|
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
struct drm_radeon_info *info;
|
|
struct radeon_mode_info *minfo = &rdev->mode_info;
|
|
uint32_t *value_ptr;
|
|
uint32_t value;
|
|
struct drm_crtc *crtc;
|
|
int i, found;
|
|
|
|
info = data;
|
|
value_ptr = (uint32_t *)((unsigned long)info->value);
|
|
if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
|
|
return -EFAULT;
|
|
|
|
switch (info->request) {
|
|
case RADEON_INFO_DEVICE_ID:
|
|
value = dev->pci_device;
|
|
break;
|
|
case RADEON_INFO_NUM_GB_PIPES:
|
|
value = rdev->num_gb_pipes;
|
|
break;
|
|
case RADEON_INFO_NUM_Z_PIPES:
|
|
value = rdev->num_z_pipes;
|
|
break;
|
|
case RADEON_INFO_ACCEL_WORKING:
|
|
/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
|
|
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
|
|
value = false;
|
|
else
|
|
value = rdev->accel_working;
|
|
break;
|
|
case RADEON_INFO_CRTC_FROM_ID:
|
|
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
|
|
crtc = (struct drm_crtc *)minfo->crtcs[i];
|
|
if (crtc && crtc->base.id == value) {
|
|
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
|
value = radeon_crtc->crtc_id;
|
|
found = 1;
|
|
break;
|
|
}
|
|
}
|
|
if (!found) {
|
|
DRM_DEBUG_KMS("unknown crtc id %d\n", value);
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case RADEON_INFO_ACCEL_WORKING2:
|
|
value = rdev->accel_working;
|
|
break;
|
|
case RADEON_INFO_TILING_CONFIG:
|
|
if (rdev->family >= CHIP_CAYMAN)
|
|
value = rdev->config.cayman.tile_config;
|
|
else if (rdev->family >= CHIP_CEDAR)
|
|
value = rdev->config.evergreen.tile_config;
|
|
else if (rdev->family >= CHIP_RV770)
|
|
value = rdev->config.rv770.tile_config;
|
|
else if (rdev->family >= CHIP_R600)
|
|
value = rdev->config.r600.tile_config;
|
|
else {
|
|
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case RADEON_INFO_WANT_HYPERZ:
|
|
/* The "value" here is both an input and output parameter.
|
|
* If the input value is 1, filp requests hyper-z access.
|
|
* If the input value is 0, filp revokes its hyper-z access.
|
|
*
|
|
* When returning, the value is 1 if filp owns hyper-z access,
|
|
* 0 otherwise. */
|
|
if (value >= 2) {
|
|
DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
|
|
return -EINVAL;
|
|
}
|
|
radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
|
|
break;
|
|
case RADEON_INFO_WANT_CMASK:
|
|
/* The same logic as Hyper-Z. */
|
|
if (value >= 2) {
|
|
DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
|
|
return -EINVAL;
|
|
}
|
|
radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
|
|
break;
|
|
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
|
|
/* return clock value in KHz */
|
|
value = rdev->clock.spll.reference_freq * 10;
|
|
break;
|
|
case RADEON_INFO_NUM_BACKENDS:
|
|
if (rdev->family >= CHIP_CAYMAN)
|
|
value = rdev->config.cayman.max_backends_per_se *
|
|
rdev->config.cayman.max_shader_engines;
|
|
else if (rdev->family >= CHIP_CEDAR)
|
|
value = rdev->config.evergreen.max_backends;
|
|
else if (rdev->family >= CHIP_RV770)
|
|
value = rdev->config.rv770.max_backends;
|
|
else if (rdev->family >= CHIP_R600)
|
|
value = rdev->config.r600.max_backends;
|
|
else {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case RADEON_INFO_NUM_TILE_PIPES:
|
|
if (rdev->family >= CHIP_CAYMAN)
|
|
value = rdev->config.cayman.max_tile_pipes;
|
|
else if (rdev->family >= CHIP_CEDAR)
|
|
value = rdev->config.evergreen.max_tile_pipes;
|
|
else if (rdev->family >= CHIP_RV770)
|
|
value = rdev->config.rv770.max_tile_pipes;
|
|
else if (rdev->family >= CHIP_R600)
|
|
value = rdev->config.r600.max_tile_pipes;
|
|
else {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case RADEON_INFO_FUSION_GART_WORKING:
|
|
value = 1;
|
|
break;
|
|
case RADEON_INFO_BACKEND_MAP:
|
|
if (rdev->family >= CHIP_CAYMAN)
|
|
value = rdev->config.cayman.backend_map;
|
|
else if (rdev->family >= CHIP_CEDAR)
|
|
value = rdev->config.evergreen.backend_map;
|
|
else if (rdev->family >= CHIP_RV770)
|
|
value = rdev->config.rv770.backend_map;
|
|
else if (rdev->family >= CHIP_R600)
|
|
value = rdev->config.r600.backend_map;
|
|
else {
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case RADEON_INFO_VA_START:
|
|
/* this is where we report if vm is supported or not */
|
|
if (rdev->family < CHIP_CAYMAN)
|
|
return -EINVAL;
|
|
value = RADEON_VA_RESERVED_SIZE;
|
|
break;
|
|
case RADEON_INFO_IB_VM_MAX_SIZE:
|
|
/* this is where we report if vm is supported or not */
|
|
if (rdev->family < CHIP_CAYMAN)
|
|
return -EINVAL;
|
|
value = RADEON_IB_VM_MAX_SIZE;
|
|
break;
|
|
default:
|
|
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
|
|
return -EINVAL;
|
|
}
|
|
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
|
|
DRM_ERROR("copy_to_user\n");
|
|
return -EFAULT;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
/*
|
|
* Outdated mess for old drm with Xorg being in charge (void function now).
|
|
*/
|
|
int radeon_driver_firstopen_kms(struct drm_device *dev)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void radeon_driver_lastclose_kms(struct drm_device *dev)
|
|
{
|
|
vga_switcheroo_process_delayed_switch();
|
|
}
|
|
|
|
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
file_priv->driver_priv = NULL;
|
|
|
|
/* new gpu have virtual address space support */
|
|
if (rdev->family >= CHIP_CAYMAN) {
|
|
struct radeon_fpriv *fpriv;
|
|
int r;
|
|
|
|
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
|
if (unlikely(!fpriv)) {
|
|
return -ENOMEM;
|
|
}
|
|
|
|
r = radeon_vm_init(rdev, &fpriv->vm);
|
|
if (r) {
|
|
radeon_vm_fini(rdev, &fpriv->vm);
|
|
kfree(fpriv);
|
|
return r;
|
|
}
|
|
|
|
file_priv->driver_priv = fpriv;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void radeon_driver_postclose_kms(struct drm_device *dev,
|
|
struct drm_file *file_priv)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
/* new gpu have virtual address space support */
|
|
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
|
|
struct radeon_fpriv *fpriv = file_priv->driver_priv;
|
|
|
|
radeon_vm_fini(rdev, &fpriv->vm);
|
|
kfree(fpriv);
|
|
file_priv->driver_priv = NULL;
|
|
}
|
|
}
|
|
|
|
void radeon_driver_preclose_kms(struct drm_device *dev,
|
|
struct drm_file *file_priv)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
if (rdev->hyperz_filp == file_priv)
|
|
rdev->hyperz_filp = NULL;
|
|
if (rdev->cmask_filp == file_priv)
|
|
rdev->cmask_filp = NULL;
|
|
}
|
|
|
|
/*
|
|
* VBlank related functions.
|
|
*/
|
|
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
if (crtc < 0 || crtc >= rdev->num_crtc) {
|
|
DRM_ERROR("Invalid crtc %d\n", crtc);
|
|
return -EINVAL;
|
|
}
|
|
|
|
return radeon_get_vblank_counter(rdev, crtc);
|
|
}
|
|
|
|
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
if (crtc < 0 || crtc >= rdev->num_crtc) {
|
|
DRM_ERROR("Invalid crtc %d\n", crtc);
|
|
return -EINVAL;
|
|
}
|
|
|
|
rdev->irq.crtc_vblank_int[crtc] = true;
|
|
|
|
return radeon_irq_set(rdev);
|
|
}
|
|
|
|
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
|
|
{
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
if (crtc < 0 || crtc >= rdev->num_crtc) {
|
|
DRM_ERROR("Invalid crtc %d\n", crtc);
|
|
return;
|
|
}
|
|
|
|
rdev->irq.crtc_vblank_int[crtc] = false;
|
|
|
|
radeon_irq_set(rdev);
|
|
}
|
|
|
|
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
|
|
int *max_error,
|
|
struct timeval *vblank_time,
|
|
unsigned flags)
|
|
{
|
|
struct drm_crtc *drmcrtc;
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
if (crtc < 0 || crtc >= dev->num_crtcs) {
|
|
DRM_ERROR("Invalid crtc %d\n", crtc);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Get associated drm_crtc: */
|
|
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
|
|
|
|
/* Helper routine in DRM core does all the work: */
|
|
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
|
vblank_time, flags,
|
|
drmcrtc);
|
|
}
|
|
|
|
/*
|
|
* IOCTL.
|
|
*/
|
|
int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
|
|
struct drm_file *file_priv)
|
|
{
|
|
/* Not valid in KMS. */
|
|
return -EINVAL;
|
|
}
|
|
|
|
#define KMS_INVALID_IOCTL(name) \
|
|
int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
|
|
{ \
|
|
DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
|
|
return -EINVAL; \
|
|
}
|
|
|
|
/*
|
|
* All these ioctls are invalid in kms world.
|
|
*/
|
|
KMS_INVALID_IOCTL(radeon_cp_init_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_start_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_stop_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_reset_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_idle_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_resume_kms)
|
|
KMS_INVALID_IOCTL(radeon_engine_reset_kms)
|
|
KMS_INVALID_IOCTL(radeon_fullscreen_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_swap_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_clear_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_indices_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_texture_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_flip_kms)
|
|
KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
|
|
KMS_INVALID_IOCTL(radeon_mem_free_kms)
|
|
KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
|
|
KMS_INVALID_IOCTL(radeon_irq_emit_kms)
|
|
KMS_INVALID_IOCTL(radeon_irq_wait_kms)
|
|
KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
|
|
KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
|
|
KMS_INVALID_IOCTL(radeon_surface_free_kms)
|
|
|
|
|
|
struct drm_ioctl_desc radeon_ioctls_kms[] = {
|
|
DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
|
DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
|
|
DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
|
|
/* KMS */
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
|
};
|
|
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
|