Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (22 commits)
  drm: add initial r500 drm support
  radeon: setup the ring buffer fetcher to be less agressive.
  drm: fixup some of the ioctl function exit paths
  drm: the drm really should call pci_set_master..
  i915: Add chipset id for Intel Integrated Graphics Device
  drm: cleanup DRM_DEBUG() parameters
  drm/i915: add support for E7221 chipset
  drm: don't cast a pointer to pointer of list_head
  mga_dma: return 'err' not just zero from mga_do_cleanup_dma()
  drm: add _DRM_DRIVER flag, and re-order unload.
  drm: enable udev node creation
  drm: Make DRM_IOCTL_GET_CLIENT return EINVAL when it can't find client #idx.
  drm: move drm_mem_init to proper place in startup sequence
  drm: call driver load function after initialising AGP
  drm: Fix ioc32 compat layer
  drm: fd.o bug #11895: Only add the AGP base to map offset if the caller didn't.
  i915: add suspend/resume support
  drm: update DRM sysfs support
  drm: Initialize the AGP structure's base address at init rather than enable.
  drm: move two function extern into the correct block
  ...
This commit is contained in:
Linus Torvalds 2008-02-07 09:07:16 -08:00
commit d31d295409
61 changed files with 2279 additions and 758 deletions

View File

@ -38,7 +38,7 @@ config DRM_RADEON
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
run the Radeon in plain VGA mode.
If M is selected, the module will be called radeon.
config DRM_I810
@ -71,9 +71,9 @@ config DRM_I915
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
module will be called i915. AGP support is required for this driver
to work. This driver is used by the Intel driver in X.org 6.8 and
XFree86 4.4 and above. If unsure, build this and i830 as modules and
XFree86 4.4 and above. If unsure, build this and i830 as modules and
the X server will load the correct one.
endchoice
config DRM_MGA
@ -88,7 +88,7 @@ config DRM_SIS
tristate "SiS video cards"
depends on DRM && AGP
help
Choose this option if you have a SiS 630 or compatible video
Choose this option if you have a SiS 630 or compatible video
chipset. If M is selected the module will be called sis. AGP
support is required for this driver to work.
@ -105,4 +105,3 @@ config DRM_SAVAGE
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.

View File

@ -38,5 +38,3 @@ obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) +=via.o

View File

@ -41,4 +41,3 @@ For specific information about kernel-level support, see:
A Security Analysis of the Direct Rendering Infrastructure
http://dri.sourceforge.net/doc/security_low_level.html

View File

@ -41,7 +41,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
struct page *page;
int i;
DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
DRM_DEBUG("%d order\n", order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
order);
@ -54,7 +54,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
for (i = 0; i < order; i++, page++)
SetPageReserved(page);
DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
DRM_DEBUG("returning 0x%08lx\n", address);
return (void *)address;
}
@ -63,7 +63,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
struct page *page;
int i;
int num_pages = 1 << order;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
page = virt_to_page((unsigned long)address);

View File

@ -202,7 +202,8 @@ enum drm_map_flags {
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /**< Removable mapping */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {

View File

@ -292,7 +292,6 @@ struct drm_magic_entry {
struct list_head head;
struct drm_hash_item hash_item;
struct drm_file *priv;
struct drm_magic_entry *next;
};
struct drm_vma_entry {
@ -388,8 +387,8 @@ struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
void *driver_priv;
struct file *filp;
void *driver_priv;
};
/** Wait queue */
@ -401,11 +400,9 @@ struct drm_queue {
wait_queue_head_t read_queue; /**< Processes waiting on block_read */
atomic_t block_write; /**< Queue blocked for writes */
wait_queue_head_t write_queue; /**< Processes waiting on block_write */
#if 1
atomic_t total_queued; /**< Total queued statistic */
atomic_t total_flushed; /**< Total flushes statistic */
atomic_t total_locks; /**< Total locks statistics */
#endif
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
struct drm_waitlist waitlist; /**< Pending buffers */
wait_queue_head_t flush_queue; /**< Processes waiting until flush */
@ -416,7 +413,8 @@ struct drm_queue {
*/
struct drm_lock_data {
struct drm_hw_lock *hw_lock; /**< Hardware lock */
struct drm_file *file_priv; /**< File descr of lock holder (0=kernel) */
/** Private of lock holder's file (NULL=kernel) */
struct drm_file *file_priv;
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
@ -491,6 +489,27 @@ struct drm_sigdata {
struct drm_hw_lock *lock;
};
/*
* Generic memory manager structs
*/
struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
void *private;
};
struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
};
/**
* Mappings list
*/
@ -498,7 +517,7 @@ struct drm_map_list {
struct list_head head; /**< list head */
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
unsigned int user_token;
uint64_t user_token;
};
typedef struct drm_map drm_local_map_t;
@ -536,24 +555,6 @@ struct drm_ati_pcigart_info {
int table_size;
};
/*
* Generic memory manager structs
*/
struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
void *private;
};
struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
};
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@ -567,6 +568,8 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
int (*suspend) (struct drm_device *);
int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
@ -642,6 +645,7 @@ struct drm_head {
* may contain multiple heads.
*/
struct drm_device {
struct device dev; /**< Linux device */
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
@ -750,7 +754,6 @@ struct drm_device {
struct pci_controller *hose;
#endif
struct drm_sg_mem *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
@ -847,6 +850,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
@ -1061,11 +1066,11 @@ extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
/* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(struct class *cs);
extern struct class_device *drm_sysfs_device_add(struct class *cs,
struct drm_head *head);
extern void drm_sysfs_device_remove(struct class_device *class_dev);
extern void drm_sysfs_destroy(void);
extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
extern void drm_sysfs_device_remove(struct drm_device *dev);
/*
* Basic memory manager support (drm_mm.c)
@ -1073,7 +1078,7 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size,
unsigned alignment);
void drm_mm_put_block(struct drm_mm_node * cur);
extern void drm_mm_put_block(struct drm_mm_node * cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
@ -1144,8 +1149,5 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area);
/*@}*/
extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
#endif /* __KERNEL__ */
#endif

View File

@ -166,7 +166,6 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
}
@ -417,7 +416,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
head->base = head->agp_info.aper_base;
return head;
}

View File

@ -184,7 +184,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
return -ENOMEM;
}
}
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
@ -229,11 +229,17 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
#ifdef __alpha__
map->offset += dev->hose->mem_space->start;
#endif
/* Note: dev->agp->base may actually be 0 when the DRM
* is not in control of AGP space. But if user space is
* it should already have added the AGP base itself.
/* In some cases (i810 driver), user space may have already
* added the AGP base itself, because dev->agp->base previously
* only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/
map->offset += dev->agp->base;
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
@ -429,6 +435,7 @@ int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
return ret;
}
EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
@ -814,9 +821,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
page_count = 0;
while (entry->buf_count < count) {
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@ -1592,5 +1599,3 @@ int drm_order(unsigned long size)
return order;
}
EXPORT_SYMBOL(drm_order);

View File

@ -159,7 +159,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request->handle =
request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}

View File

@ -200,8 +200,10 @@ int drm_lastclose(struct drm_device * dev)
}
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
if (!(r_list->map->flags & _DRM_DRIVER)) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@ -255,8 +257,6 @@ int drm_init(struct drm_driver *driver)
DRM_DEBUG("\n");
drm_mem_init();
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
@ -293,10 +293,6 @@ static void drm_cleanup(struct drm_device * dev)
drm_lastclose(dev);
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
@ -314,6 +310,9 @@ static void drm_cleanup(struct drm_device * dev)
if (dev->driver->unload)
dev->driver->unload(dev);
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
drm_put_head(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
@ -383,22 +382,24 @@ static int __init drm_core_init(void)
goto err_p3;
}
drm_mem_init();
DRM_INFO("Initialized %s %d.%d.%d %s\n",
CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
return 0;
err_p3:
drm_sysfs_destroy(drm_class);
err_p2:
err_p3:
drm_sysfs_destroy();
err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
err_p1:
err_p1:
return ret;
}
static void __exit drm_core_exit(void)
{
remove_proc_entry("dri", NULL);
drm_sysfs_destroy(drm_class);
drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
@ -494,23 +495,25 @@ int drm_ioctl(struct inode *inode, struct file *filp,
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (!kdata)
return -ENOMEM;
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
_IOC_SIZE(cmd)) != 0) {
retcode = -EACCES;
retcode = -EFAULT;
goto err_i1;
}
}
retcode = func(dev, kdata, file_priv);
if (cmd & IOC_OUT) {
if ((retcode == 0) && (cmd & IOC_OUT)) {
if (copy_to_user((void __user *)arg, kdata,
_IOC_SIZE(cmd)) != 0)
retcode = -EACCES;
retcode = -EFAULT;
}
}

View File

@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
}
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
@ -129,7 +129,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
/*
* Just insert an item and return any "bits" bit key that hasn't been
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
@ -200,4 +200,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}

View File

@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
#endif

View File

@ -1051,8 +1051,12 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn;
int ret;
/* Assume that ioctls without an explicit compat routine will just
* work. This may not always be a good assumption, but it's better
* than always failing.
*/
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
return -ENOTTY;
return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
fn = drm_compat_ioctls[nr];

View File

@ -234,26 +234,23 @@ int drm_getclient(struct drm_device *dev, void *data,
idx = client->idx;
mutex_lock(&dev->struct_mutex);
if (list_empty(&dev->filelist)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx)
break;
}
if (i++ >= idx) {
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
client->magic = pt->magic;
client->iocs = pt->ioctl_count;
mutex_unlock(&dev->struct_mutex);
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
client->magic = pt->magic;
client->iocs = pt->ioctl_count;
return 0;
}
}
mutex_unlock(&dev->struct_mutex);
return 0;
return -EINVAL;
}
/**

View File

@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
init_waitqueue_head(&dev->vbl_queue);
@ -164,7 +164,7 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver->irq_uninstall(dev);

View File

@ -179,4 +179,3 @@ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_core_ioremapfree);

View File

@ -293,4 +293,3 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}

View File

@ -69,9 +69,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
copy_to_user(arg1, arg2, arg3)
/* Macros for copyfrom user, but checking readability only once */
#define DRM_VERIFYAREA_READ( uaddr, size ) \
#define DRM_VERIFYAREA_READ( uaddr, size ) \
(access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_from_user(arg1, arg2, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_to_user(arg1, arg2, arg3)

View File

@ -139,6 +139,101 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0, 0, 0}
@ -311,5 +406,5 @@
{0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}

View File

@ -236,11 +236,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
type = "??";
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
r_list->user_token);
(unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {

View File

@ -45,7 +45,7 @@
#endif
/** Maximum number of drawables in the SAREA */
#define SAREA_MAX_DRAWABLES 256
#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000

View File

@ -67,7 +67,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@ -81,7 +81,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@ -122,8 +122,8 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
DRM_DEBUG("handle = %08lx\n", entry->handle);
DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
@ -210,7 +210,7 @@ int drm_sg_free(struct drm_device *dev, void *data,
if (!entry || entry->handle != request->handle)
return -EINVAL;
DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);

View File

@ -98,10 +98,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
dev->driver = driver;
if (dev->driver->load)
if ((retcode = dev->driver->load(dev, ent->driver_data)))
goto error_out_unreg;
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
@ -120,6 +116,10 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
}
}
if (dev->driver->load)
if ((retcode = dev->driver->load(dev, ent->driver_data)))
goto error_out_unreg;
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@ -168,11 +168,10 @@ static int drm_get_head(struct drm_device * dev, struct drm_head * head)
goto err_g1;
}
head->dev_class = drm_sysfs_device_add(drm_class, head);
if (IS_ERR(head->dev_class)) {
ret = drm_sysfs_device_add(dev, head);
if (ret) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
ret = PTR_ERR(head->dev_class);
goto err_g2;
}
*heads = head;
@ -218,13 +217,14 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (ret)
goto err_g1;
pci_set_master(pdev);
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
if ((ret = drm_get_head(dev, &dev->primary)))
goto err_g2;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary.minor);
@ -283,7 +283,7 @@ int drm_put_head(struct drm_head * head)
DRM_DEBUG("release secondary minor %d\n", minor);
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
drm_sysfs_device_remove(head->dev_class);
drm_sysfs_device_remove(head->dev);
*head = (struct drm_head) {.dev = NULL};

View File

@ -19,6 +19,45 @@
#include "drm_core.h"
#include "drmP.h"
#define to_drm_device(d) container_of(d, struct drm_device, dev)
/**
* drm_sysfs_suspend - DRM class suspend hook
* @dev: Linux device to suspend
* @state: power state to enter
*
* Just figures out what the actual struct drm_device associated with
* @dev is and calls its suspend hook, if present.
*/
static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
{
struct drm_device *drm_dev = to_drm_device(dev);
printk(KERN_ERR "%s\n", __FUNCTION__);
if (drm_dev->driver->suspend)
return drm_dev->driver->suspend(drm_dev);
return 0;
}
/**
* drm_sysfs_resume - DRM class resume hook
* @dev: Linux device to resume
*
* Just figures out what the actual struct drm_device associated with
* @dev is and calls its resume hook, if present.
*/
static int drm_sysfs_resume(struct device *dev)
{
struct drm_device *drm_dev = to_drm_device(dev);
if (drm_dev->driver->resume)
return drm_dev->driver->resume(drm_dev);
return 0;
}
/* Display the version of drm_core. This doesn't work right in current design */
static ssize_t version_show(struct class *dev, char *buf)
{
@ -33,7 +72,7 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
* @owner: pointer to the module that is to "own" this struct drm_sysfs_class
* @name: pointer to a string for the name of this class.
*
* This is used to create a struct drm_sysfs_class pointer that can then be used
* This is used to create DRM class pointer that can then be used
* in calls to drm_sysfs_device_add().
*
* Note, the pointer created here is to be destroyed when finished by making a
@ -50,6 +89,9 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
goto err_out;
}
class->suspend = drm_sysfs_suspend;
class->resume = drm_sysfs_resume;
err = class_create_file(class, &class_attr_version);
if (err)
goto err_out_class;
@ -63,94 +105,100 @@ err_out:
}
/**
* drm_sysfs_destroy - destroys a struct drm_sysfs_class structure
* @cs: pointer to the struct drm_sysfs_class that is to be destroyed
* drm_sysfs_destroy - destroys DRM class
*
* Note, the pointer to be destroyed must have been created with a call to
* drm_sysfs_create().
* Destroy the DRM device class.
*/
void drm_sysfs_destroy(struct class *class)
void drm_sysfs_destroy(void)
{
if ((class == NULL) || (IS_ERR(class)))
if ((drm_class == NULL) || (IS_ERR(drm_class)))
return;
class_remove_file(class, &class_attr_version);
class_destroy(class);
class_remove_file(drm_class, &class_attr_version);
class_destroy(drm_class);
}
static ssize_t show_dri(struct class_device *class_device, char *buf)
static ssize_t show_dri(struct device *device, struct device_attribute *attr,
char *buf)
{
struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev;
struct drm_device *dev = to_drm_device(device);
if (dev->driver->dri_library_name)
return dev->driver->dri_library_name(dev, buf);
return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
}
static struct class_device_attribute class_device_attrs[] = {
static struct device_attribute device_attrs[] = {
__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
};
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @cs: pointer to the struct class that this device should be registered to.
* @dev: the dev_t for the device to be added.
* @device: a pointer to a struct device that is assiociated with this class device.
* @fmt: string for the class device's name
* drm_sysfs_device_release - do nothing
* @dev: Linux device
*
* A struct class_device will be created in sysfs, registered to the specified
* class. A "dev" file will be created, showing the dev_t for the device. The
* pointer to the struct class_device will be returned from the call. Any further
* sysfs files that might be required can be created using this pointer.
* Note: the struct class passed to this function must have previously been
* created with a call to drm_sysfs_create().
* Normally, this would free the DRM device associated with @dev, along
* with cleaning up any other stuff. But we do that in the DRM core, so
* this function can just return and hope that the core does its job.
*/
struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head)
static void drm_sysfs_device_release(struct device *dev)
{
struct class_device *class_dev;
int i, j, err;
return;
}
class_dev = class_device_create(cs, NULL,
MKDEV(DRM_MAJOR, head->minor),
&(head->dev->pdev)->dev,
"card%d", head->minor);
if (IS_ERR(class_dev)) {
err = PTR_ERR(class_dev);
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
* @head: DRM head in question
*
* Add a DRM device to the DRM's device model class. We use @dev's PCI device
* as the parent for the Linux device, and make sure it has a file containing
* the driver we're using (for userspace compatibility).
*/
int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
{
int err;
int i, j;
dev->dev.parent = &dev->pdev->dev;
dev->dev.class = drm_class;
dev->dev.release = drm_sysfs_device_release;
dev->dev.devt = head->device;
snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
err = device_register(&dev->dev);
if (err) {
DRM_ERROR("device add failed: %d\n", err);
goto err_out;
}
class_set_devdata(class_dev, head);
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
err = class_device_create_file(class_dev,
&class_device_attrs[i]);
for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
err = device_create_file(&dev->dev, &device_attrs[i]);
if (err)
goto err_out_files;
}
return class_dev;
return 0;
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
class_device_remove_file(class_dev,
&class_device_attrs[i]);
class_device_unregister(class_dev);
device_remove_file(&dev->dev, &device_attrs[i]);
device_unregister(&dev->dev);
err_out:
return ERR_PTR(err);
return err;
}
/**
* drm_sysfs_device_remove - removes a class device that was created with drm_sysfs_device_add()
* @dev: the dev_t of the device that was previously registered.
* drm_sysfs_device_remove - remove DRM device
* @dev: DRM device to remove
*
* This call unregisters and cleans up a class device that was created with a
* call to drm_sysfs_device_add()
*/
void drm_sysfs_device_remove(struct class_device *class_dev)
void drm_sysfs_device_remove(struct drm_device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
class_device_remove_file(class_dev, &class_device_attrs[i]);
class_device_unregister(class_dev);
for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
device_remove_file(&dev->dev, &device_attrs[i]);
device_unregister(&dev->dev);
}

View File

@ -180,7 +180,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
return NOPAGE_SIGBUS;
get_page(page);
DRM_DEBUG("shm_nopage 0x%lx\n", address);
DRM_DEBUG("0x%lx\n", address);
return page;
}
@ -294,7 +294,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
get_page(page);
DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
return page;
}

View File

@ -40,7 +40,7 @@
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
#define I810_BUF_HARDWARE 0
#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
@ -570,7 +570,7 @@ static void i810EmitState(struct drm_device * dev)
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
@ -802,8 +802,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
int pitch = dev_priv->pitch;
RING_LOCALS;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__FUNCTION__,
DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
@ -848,8 +847,6 @@ static void i810_dma_quiescent(struct drm_device * dev)
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
/* printk("%s\n", __FUNCTION__); */
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
@ -869,8 +866,6 @@ static int i810_flush_queue(struct drm_device * dev)
int i, ret = 0;
RING_LOCALS;
/* printk("%s\n", __FUNCTION__); */
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
@ -949,7 +944,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
@ -987,7 +982,7 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEBUG("i810_swap_bufs\n");
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@ -1068,11 +1063,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
sarea_priv->dirty = 0x7f;
DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
@ -1179,7 +1173,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@ -1189,7 +1183,7 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
@ -1202,7 +1196,7 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);

View File

@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
@ -134,7 +134,7 @@ extern int i810_max_ioctl;
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
#define I810_READ16(reg) I810_DEREF16(reg)
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
@ -145,7 +145,7 @@ extern int i810_max_ioctl;
#define BEGIN_LP_RING(n) do { \
if (I810_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
@ -155,19 +155,19 @@ extern int i810_max_ioctl;
} while (0)
#define ADVANCE_LP_RING() do { \
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
#define OUT_RING(n) do { \
#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@ -184,28 +184,28 @@ extern int i810_max_ioctl;
#define I810REG_HWSTAM 0x02098
#define I810REG_INT_IDENTITY_R 0x020a4
#define I810REG_INT_MASK_R 0x020a8
#define I810REG_INT_MASK_R 0x020a8
#define I810REG_INT_ENABLE_R 0x020a0
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define TAIL_ADDR 0x000FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x00FFFFF8
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x000FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x00FFFFF8
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x000FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)

View File

@ -42,7 +42,7 @@
#define I830_BUF_FREE 2
#define I830_BUF_CLIENT 1
#define I830_BUF_HARDWARE 0
#define I830_BUF_HARDWARE 0
#define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1

View File

@ -12,9 +12,9 @@
#define _I830_DEFINES_
#define I830_DMA_BUF_ORDER 12
#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
#define I830_DMA_BUF_NR 256
#define I830_NR_SAREA_CLIPRECTS 8
#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
#define I830_DMA_BUF_NR 256
#define I830_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
@ -58,7 +58,7 @@
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
#define I830_UPLOAD_STIPPLE 0x8000000
#define I830_UPLOAD_STIPPLE 0x8000000
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,

View File

@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
@ -156,8 +156,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d) in %s\n", \
n, __FUNCTION__); \
printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __FUNCTION__); \
outcount = 0; \
@ -183,7 +182,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@ -203,30 +202,30 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@ -279,9 +278,9 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))

View File

@ -144,7 +144,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t *irqwait = data;
drm_i830_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);

View File

@ -31,17 +31,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
dev->pci_device == 0x2982 || \
dev->pci_device == 0x2992 || \
dev->pci_device == 0x29A2 || \
dev->pci_device == 0x2A02 || \
dev->pci_device == 0x2A12)
#define IS_G33(dev) (dev->pci_device == 0x29b2 || \
dev->pci_device == 0x29c2 || \
dev->pci_device == 0x29d2)
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
* the head pointer changes, so that EBUSY only happens if the ring
@ -90,6 +79,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@ -97,52 +87,42 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq)
drm_irq_uninstall(dev);
if (dev->dev_private) {
drm_i915_private_t *dev_priv =
(drm_i915_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start) {
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = 0;
dev_priv->ring.map.handle = 0;
dev_priv->ring.map.size = 0;
}
if (dev_priv->ring.virtual_start) {
drm_core_ioremapfree(&dev_priv->ring.map, dev);
}
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x2080, 0x1ffff000);
}
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x2080, 0x1ffff000);
}
return 0;
}
static int i915_initialize(struct drm_device * dev,
drm_i915_private_t * dev_priv,
drm_i915_init_t * init)
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
memset(dev_priv, 0, sizeof(drm_i915_private_t));
drm_i915_private_t *dev_priv = dev->dev_private;
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
@ -165,7 +145,6 @@ static int i915_initialize(struct drm_device * dev,
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@ -197,7 +176,6 @@ static int i915_initialize(struct drm_device * dev,
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
@ -209,7 +187,6 @@ static int i915_initialize(struct drm_device * dev,
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
DRM_DEBUG("Enabled hardware status page\n");
dev->dev_private = (void *)dev_priv;
return 0;
}
@ -254,17 +231,12 @@ static int i915_dma_resume(struct drm_device * dev)
static int i915_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv;
drm_i915_init_t *init = data;
int retcode = 0;
switch (init->func) {
case I915_INIT_DMA:
dev_priv = drm_alloc(sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
if (dev_priv == NULL)
return -ENOMEM;
retcode = i915_initialize(dev, dev_priv, init);
retcode = i915_initialize(dev, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
@ -351,7 +323,7 @@ static int validate_cmd(int cmd)
{
int ret = do_validate_cmd(cmd);
/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
return ret;
}
@ -685,7 +657,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -719,7 +691,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
drm_i915_setparam_t *param = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -749,7 +721,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -757,7 +729,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws->addr;
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
@ -765,7 +737,6 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
@ -784,6 +755,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long base, size;
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@ -791,24 +766,51 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_i915_private_t));
dev->dev_private = (void *)dev_priv;
/* Add register map (needed for suspend/resume) */
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER,
&dev_priv->mmio_map);
return ret;
}
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mmio_map)
drm_rmmap(dev, dev_priv->mmio_map);
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
return 0;
}
void i915_driver_lastclose(struct drm_device * dev)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
}
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
drm_i915_private_t *dev_priv = dev->dev_private;
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
struct drm_ioctl_desc i915_ioctls[] = {

View File

@ -38,6 +38,465 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
enum pipe {
PIPE_A = 0,
PIPE_B,
};
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pipe == PIPE_A)
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
else
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
{
outb(reg, index_port);
return inb(data_port);
}
static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
{
inb(st01);
outb(palette_enable | reg, VGA_AR_INDEX);
return inb(VGA_AR_DATA_READ);
}
static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
{
inb(st01);
outb(palette_enable | reg, VGA_AR_INDEX);
outb(val, VGA_AR_DATA_WRITE);
}
static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
{
outb(reg, index_port);
outb(val, data_port);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
dev_priv->saveDACMASK = inb(VGA_DACMASK);
/* DACCRX automatically increments during read */
outb(0, VGA_DACRX);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = inb(VGA_MSR_READ);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(cr_index, cr_data, 0x11,
i915_read_indexed(cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i < 0x24; i++)
dev_priv->saveCR[i] =
i915_read_indexed(cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
inb(st01);
dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
for (i = 0; i < 20; i++)
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
inb(st01);
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->saveGR[i] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->saveGR[0x10] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->saveGR[0x11] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->saveGR[0x18] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->saveSR[i] =
i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* MSR bits */
outb(dev_priv->saveMSR, VGA_MSR_WRITE);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i < 0x24; i++)
i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->saveGR[i]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->saveGR[0x10]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->saveGR[0x11]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->saveGR[0x18]);
/* Attribute controller registers */
for (i = 0; i < 20; i++)
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
inb(st01); /* switch back to index mode */
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
/* VGA color palette registers */
outb(dev_priv->saveDACMASK, VGA_DACMASK);
/* DACCRX automatically increments during read */
outb(0, VGA_DACWX);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
}
static int i915_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (!dev || !dev_priv) {
printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
pci_save_state(dev->pdev);
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPABASE = I915_READ(DSPABASE);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* VGA state */
dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
return 0;
}
static int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev))
return -1;
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
(dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPABASE, I915_READ(DSPABASE));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
(dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
udelay(150);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
return 0;
}
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
@ -47,8 +506,11 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
DRIVER_IRQ_VBL2,
.load = i915_driver_load,
.unload = i915_driver_unload,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
.vblank_wait = i915_driver_vblank_wait,
.vblank_wait2 = i915_driver_vblank_wait2,
@ -77,7 +539,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,

View File

@ -114,6 +114,85 @@ typedef struct drm_i915_private {
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
u32 savePIPEBSRC;
u32 saveFPA0;
u32 saveFPA1;
u32 saveDPLL_A;
u32 saveDPLL_A_MD;
u32 saveHTOTAL_A;
u32 saveHBLANK_A;
u32 saveHSYNC_A;
u32 saveVTOTAL_A;
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
u32 saveDSPASTRIDE;
u32 saveDSPASIZE;
u32 saveDSPAPOS;
u32 saveDSPABASE;
u32 saveDSPASURF;
u32 saveDSPATILEOFF;
u32 savePFIT_PGM_RATIOS;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveFPB0;
u32 saveFPB1;
u32 saveDPLL_B;
u32 saveDPLL_B_MD;
u32 saveHTOTAL_B;
u32 saveHBLANK_B;
u32 saveHSYNC_B;
u32 saveVTOTAL_B;
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE;
u32 saveDSPBPOS;
u32 saveDSPBBASE;
u32 saveDSPBSURF;
u32 saveDSPBTILEOFF;
u32 saveVCLK_DIVISOR_VGA0;
u32 saveVCLK_DIVISOR_VGA1;
u32 saveVCLK_POST_DIV;
u32 saveVGACNTRL;
u32 saveADPA;
u32 saveLVDS;
u32 saveLVDSPP_ON;
u32 saveLVDSPP_OFF;
u32 saveDVOA;
u32 saveDVOB;
u32 saveDVOC;
u32 savePP_ON;
u32 savePP_OFF;
u32 savePP_CONTROL;
u32 savePP_CYCLE;
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
u32 saveFBC_CFB_BASE;
u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
u32 saveFBC_CONTROL2;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF2[3];
u8 saveMSR;
u8 saveSR[8];
u8 saveGR[24];
u8 saveAR_INDEX;
u8 saveAR[20];
u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[36];
} drm_i915_private_t;
extern struct drm_ioctl_desc i915_ioctls[];
@ -122,6 +201,7 @@ extern int i915_max_ioctl;
/* i915_dma.c */
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
@ -163,7 +243,7 @@ extern void i915_mem_release(struct drm_device * dev,
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
@ -173,9 +253,8 @@ extern void i915_mem_release(struct drm_device * dev,
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
(n), __FUNCTION__); \
if (dev_priv->ring.space < (n)*4) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
@ -200,7 +279,51 @@ extern void i915_mem_release(struct drm_device * dev,
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
/* Extended config space */
#define LBB 0xf4
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
#define VGA_ST01_CGA 0x3da
#define VGA_MSR_WRITE 0x3c2
#define VGA_MSR_READ 0x3cc
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
#define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
#define VGA_AR_DATA_WRITE 0x3c0
#define VGA_AR_DATA_READ 0x3c1
#define VGA_GR_INDEX 0x3ce
#define VGA_GR_DATA 0x3cf
/* GR05 */
#define VGA_GR_MEM_READ_MODE_SHIFT 3
#define VGA_GR_MEM_READ_MODE_PLANE 1
/* GR06 */
#define VGA_GR_MEM_MODE_MASK 0xc
#define VGA_GR_MEM_MODE_SHIFT 2
#define VGA_GR_MEM_A0000_AFFFF 0
#define VGA_GR_MEM_A0000_BFFFF 1
#define VGA_GR_MEM_B0000_B7FFF 2
#define VGA_GR_MEM_B0000_BFFFF 3
#define VGA_DACMASK 0x3c6
#define VGA_DACRX 0x3c7
#define VGA_DACWX 0x3c8
#define VGA_DACDATA 0x3c9
#define VGA_CR_INDEX_MDA 0x3b4
#define VGA_CR_DATA_MDA 0x3b5
#define VGA_CR_INDEX_CGA 0x3d4
#define VGA_CR_DATA_CGA 0x3d5
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@ -215,9 +338,47 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
/* Framebuffer compression */
#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
#define FBC_LL_BASE 0x03204 /* 4k page aligned */
#define FBC_CONTROL 0x03208
#define FBC_CTL_EN (1<<31)
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
#define FBC_CMD_COMPRESS (1<<0)
#define FBC_STATUS 0x03210
#define FBC_STAT_COMPRESSING (1<<31)
#define FBC_STAT_COMPRESSED (1<<30)
#define FBC_STAT_MODIFIED (1<<29)
#define FBC_STAT_CURRENT_LINE (1<<0)
#define FBC_CONTROL2 0x03214
#define FBC_CTL_FENCE_DBL (0<<4)
#define FBC_CTL_IDLE_IMM (0<<2)
#define FBC_CTL_IDLE_FULL (1<<2)
#define FBC_CTL_IDLE_LINE (2<<2)
#define FBC_CTL_IDLE_DEBUG (3<<2)
#define FBC_CTL_CPU_FENCE (1<<1)
#define FBC_CTL_PLANEA (0<<0)
#define FBC_CTL_PLANEB (1<<0)
#define FBC_FENCE_OFF 0x0321b
#define FBC_LL_SIZE (1536)
#define FBC_LL_PAD (32)
/* Interrupt bits:
*/
#define USER_INT_FLAG (1<<1)
#define VSYNC_PIPEB_FLAG (1<<5)
#define VSYNC_PIPEA_FLAG (1<<7)
#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
#define I915REG_PIPEASTAT 0x70024
@ -229,7 +390,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
#define SR01_SCREEN_OFF (1<<5)
#define SR01_SCREEN_OFF (1<<5)
#define PPCR 0x61204
#define PPCR_ON (1<<0)
@ -249,31 +410,129 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define ADPA_DPMS_OFF (3<<10)
#define NOPID 0x2094
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define LP_RING 0x2030
#define HP_RING 0x2040
/* The binner has its own ring buffer:
*/
#define HWB_RING 0x2400
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
/* Instruction parser error reg:
*/
#define IPEIR 0x2088
/* Scratch pad debug 0 reg:
*/
#define SCPD0 0x209c
/* Error status reg:
*/
#define ESR 0x20b8
/* Secondary DMA fetch address debug reg:
*/
#define DMA_FADD_S 0x20d4
/* Cache mode 0 reg.
* - Manipulating render cache behaviour is central
* to the concept of zone rendering, tuning this reg can help avoid
* unnecessary render cache reads and even writes (for z/stencil)
* at beginning and end of scene.
*
* - To change a bit, write to this reg with a mask bit set and the
* bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
*/
#define Cache_Mode_0 0x2120
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_DEPTH_EVICT_DISABLE (1<<4)
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
/* Graphics flush control. A CPU write flushes the GWB of all writes.
* The data is discarded.
*/
#define GFX_FLSH_CNTL 0x2170
/* Binner control. Defines the location of the bin pointer list:
*/
#define BINCTL 0x2420
#define BC_MASK (1 << 9)
/* Binned scene info.
*/
#define BINSCENE 0x2428
#define BS_OP_LOAD (1 << 8)
#define BS_MASK (1 << 22)
/* Bin command parser debug reg:
*/
#define BCPD 0x2480
/* Bin memory control debug reg:
*/
#define BMCD 0x2484
/* Bin data cache debug reg:
*/
#define BDCD 0x2488
/* Binner pointer cache debug reg:
*/
#define BPCD 0x248c
/* Binner scratch pad debug reg:
*/
#define BINSKPD 0x24f0
/* HWB scratch pad debug reg:
*/
#define HWBSKPD 0x24f4
/* Binner memory pool reg:
*/
#define BMP_BUFFER 0x2430
#define BMP_PAGE_SIZE_4K (0 << 10)
#define BMP_BUFFER_SIZE_SHIFT 1
#define BMP_ENABLE (1 << 0)
/* Get/put memory from the binner memory pool:
*/
#define BMP_GET 0x2438
#define BMP_PUT 0x2440
#define BMP_OFFSET_SHIFT 5
/* 3D state packets:
*/
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
@ -290,17 +549,19 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@ -308,9 +569,538 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
/* Display regs */
#define DSPACNTR 0x70180
#define DSPBCNTR 0x71180
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
/* Define the region of interest for the binner:
*/
#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
#define CMD_MI_FLUSH (0x04 << 23)
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_READ_FLUSH (1 << 0)
#define MI_EXE_FLUSH (1 << 1)
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define BREADCRUMB_BITS 31
#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#define BLC_PWM_CTL 0x61254
#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250
/**
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
*
* The actual value is this field multiplied by two.
*/
#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
#define BLM_LEGACY_MODE (1 << 16)
/**
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
*
* This field must be no greater than the number of cycles in the complete
* backlight modulation cycle.
*/
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
#define I915_GCFGC 0xf0
#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
#define I915_DISPLAY_CLOCK_MASK (7 << 4)
#define I855_HPLLCC 0xc0
#define I855_CLOCK_CONTROL_MASK (3 << 0)
#define I855_CLOCK_133_200 (0 << 0)
#define I855_CLOCK_100_200 (1 << 0)
#define I855_CLOCK_100_133 (2 << 0)
#define I855_CLOCK_166_250 (3 << 0)
/* p317, 319
*/
#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
#define VCLK2_VCO_N 0x600a
#define VCLK2_VCO_DIV_SEL 0x6012
#define VCLK_DIVISOR_VGA0 0x6000
#define VCLK_DIVISOR_VGA1 0x6004
#define VCLK_POST_DIV 0x6010
/** Selects a post divisor of 4 instead of 2. */
# define VGA1_PD_P2_DIV_4 (1 << 15)
/** Overrides the p2 post divisor field */
# define VGA1_PD_P1_DIV_2 (1 << 13)
# define VGA1_PD_P1_SHIFT 8
/** P1 value is 2 greater than this field */
# define VGA1_PD_P1_MASK (0x1f << 8)
/** Selects a post divisor of 4 instead of 2. */
# define VGA0_PD_P2_DIV_4 (1 << 7)
/** Overrides the p2 post divisor field */
# define VGA0_PD_P1_DIV_2 (1 << 5)
# define VGA0_PD_P1_SHIFT 0
/** P1 value is 2 greater than this field */
# define VGA0_PD_P1_MASK (0x1f << 0)
/* I830 CRTC registers */
#define HTOTAL_A 0x60000
#define HBLANK_A 0x60004
#define HSYNC_A 0x60008
#define VTOTAL_A 0x6000c
#define VBLANK_A 0x60010
#define VSYNC_A 0x60014
#define PIPEASRC 0x6001c
#define BCLRPAT_A 0x60020
#define VSYNCSHIFT_A 0x60028
#define HTOTAL_B 0x61000
#define HBLANK_B 0x61004
#define HSYNC_B 0x61008
#define VTOTAL_B 0x6100c
#define VBLANK_B 0x61010
#define VSYNC_B 0x61014
#define PIPEBSRC 0x6101c
#define BCLRPAT_B 0x61020
#define VSYNCSHIFT_B 0x61028
#define PP_STATUS 0x61200
# define PP_ON (1 << 31)
/**
* Indicates that all dependencies of the panel are on:
*
* - PLL enabled
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
# define PP_READY (1 << 30)
# define PP_SEQUENCE_NONE (0 << 28)
# define PP_SEQUENCE_ON (1 << 28)
# define PP_SEQUENCE_OFF (2 << 28)
# define PP_SEQUENCE_MASK 0x30000000
#define PP_CONTROL 0x61204
# define POWER_TARGET_ON (1 << 0)
#define LVDSPP_ON 0x61208
#define LVDSPP_OFF 0x6120c
#define PP_CYCLE 0x61210
#define PFIT_CONTROL 0x61230
# define PFIT_ENABLE (1 << 31)
# define PFIT_PIPE_MASK (3 << 29)
# define PFIT_PIPE_SHIFT 29
# define VERT_INTERP_DISABLE (0 << 10)
# define VERT_INTERP_BILINEAR (1 << 10)
# define VERT_INTERP_MASK (3 << 10)
# define VERT_AUTO_SCALE (1 << 9)
# define HORIZ_INTERP_DISABLE (0 << 6)
# define HORIZ_INTERP_BILINEAR (1 << 6)
# define HORIZ_INTERP_MASK (3 << 6)
# define HORIZ_AUTO_SCALE (1 << 5)
# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
#define PFIT_PGM_RATIOS 0x61234
# define PFIT_VERT_SCALE_MASK 0xfff00000
# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
#define PFIT_AUTO_RATIOS 0x61238
#define DPLL_A 0x06014
#define DPLL_B 0x06018
# define DPLL_VCO_ENABLE (1 << 31)
# define DPLL_DVO_HIGH_SPEED (1 << 30)
# define DPLL_SYNCLOCK_ENABLE (1 << 29)
# define DPLL_VGA_MODE_DIS (1 << 28)
# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
# define DPLL_MODE_MASK (3 << 26)
# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
/**
* The i830 generation, in DAC/serial mode, defines p1 as two plus this
* bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
*/
# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/**
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
* this field (only one bit may be set).
*/
# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
# define PLL_REF_INPUT_DREFCLK (0 << 13)
# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
# define PLL_REF_INPUT_MASK (3 << 13)
# define PLL_LOAD_PULSE_PHASE_SHIFT 9
/*
* Parallel to Serial Load Pulse phase selection.
* Selects the phase for the 10X DPLL clock for the PCIe
* digital display port. The range is 4 to 13; 10 or more
* is just a flip delay. The default is 6
*/
# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
/**
* SDVO multiplier for 945G/GM. Not used on 965.
*
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
*/
# define SDVO_MULTIPLIER_MASK 0x000000ff
# define SDVO_MULTIPLIER_SHIFT_HIRES 4
# define SDVO_MULTIPLIER_SHIFT_VGA 0
/** @defgroup DPLL_MD
* @{
*/
/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
#define DPLL_A_MD 0x0601c
/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
#define DPLL_B_MD 0x06020
/**
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
* Value is pixels minus 1. Must be set to 1 pixel for SDVO.
*/
# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
# define DPLL_MD_UDI_DIVIDER_SHIFT 24
/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
/**
* SDVO/UDI pixel multiplier.
*
* SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
* clock rate is 10 times the DPLL clock. At low resolution/refresh rate
* modes, the bus rate would be below the limits, so SDVO allows for stuffing
* dummy bytes in the datastream at an increased clock rate, with both sides of
* the link knowing how many bytes are fill.
*
* So, for a mode with a dotclock of 65Mhz, we would want to double the clock
* rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
* set to 130Mhz, and the SDVO multiplier set to 2x in this register and
* through an SDVO command.
*
* This register field has values of multiplication factor minus 1, with
* a maximum multiplier of 5 for SDVO.
*/
# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
* This best be set to the default value (3) or the CRT won't work. No,
* I don't entirely understand what this does...
*/
# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
/** @} */
#define DPLL_TEST 0x606c
# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
# define DPLLB_TEST_N_BYPASS (1 << 19)
# define DPLLB_TEST_M_BYPASS (1 << 18)
# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
# define DPLLA_TEST_N_BYPASS (1 << 3)
# define DPLLA_TEST_M_BYPASS (1 << 2)
# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define ADPA 0x61100
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
#define ADPA_VSYNC_CNTL_ENABLE 0
#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
#define ADPA_HSYNC_CNTL_ENABLE 0
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
#define ADPA_VSYNC_ACTIVE_LOW 0
#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
#define ADPA_HSYNC_ACTIVE_LOW 0
#define FPA0 0x06040
#define FPA1 0x06044
#define FPB0 0x06048
#define FPB1 0x0604c
# define FP_N_DIV_MASK 0x003f0000
# define FP_N_DIV_SHIFT 16
# define FP_M1_DIV_MASK 0x00003f00
# define FP_M1_DIV_SHIFT 8
# define FP_M2_DIV_MASK 0x0000003f
# define FP_M2_DIV_SHIFT 0
#define PORT_HOTPLUG_EN 0x61110
# define SDVOB_HOTPLUG_INT_EN (1 << 26)
# define SDVOC_HOTPLUG_INT_EN (1 << 25)
# define TV_HOTPLUG_INT_EN (1 << 18)
# define CRT_HOTPLUG_INT_EN (1 << 9)
# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define PORT_HOTPLUG_STAT 0x61114
# define CRT_HOTPLUG_INT_STATUS (1 << 11)
# define TV_HOTPLUG_INT_STATUS (1 << 10)
# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
#define SDVOB 0x61140
#define SDVOC 0x61160
#define SDVO_ENABLE (1 << 31)
#define SDVO_PIPE_B_SELECT (1 << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
/**
* 915G/GM SDVO pixel multiplier.
*
* Programmed value is multiplier - 1, up to 5x.
*
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
*/
#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
#define SDVO_PORT_MULTIPLY_SHIFT 23
#define SDVO_PHASE_SELECT_MASK (15 << 19)
#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
#define SDVOC_GANG_MODE (1 << 16)
#define SDVO_BORDER_ENABLE (1 << 7)
#define SDVOB_PCIE_CONCURRENCY (1 << 3)
#define SDVO_DETECTED (1 << 2)
/* Bits to be preserved when writing */
#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
#define SDVOC_PRESERVE_MASK (1 << 17)
/** @defgroup LVDS
* @{
*/
/**
* This register controls the LVDS output enable, pipe selection, and data
* format selection.
*
* All of the clock/data pairs are force powered down by power sequencing.
*/
#define LVDS 0x61180
/**
* Enables the LVDS port. This bit must be set before DPLLs are enabled, as
* the DPLL semantics change when the LVDS is assigned to that pipe.
*/
# define LVDS_PORT_EN (1 << 31)
/** Selects pipe B for LVDS data. Must be set on pre-965. */
# define LVDS_PIPEB_SELECT (1 << 30)
/**
* Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
* pixel.
*/
# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
/**
* Controls the A3 data pair, which contains the additional LSBs for 24 bit
* mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
* on.
*/
# define LVDS_A3_POWER_MASK (3 << 6)
# define LVDS_A3_POWER_DOWN (0 << 6)
# define LVDS_A3_POWER_UP (3 << 6)
/**
* Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
* is set.
*/
# define LVDS_CLKB_POWER_MASK (3 << 4)
# define LVDS_CLKB_POWER_DOWN (0 << 4)
# define LVDS_CLKB_POWER_UP (3 << 4)
/**
* Controls the B0-B3 data pairs. This must be set to match the DPLL p2
* setting for whether we are in dual-channel mode. The B3 pair will
* additionally only be powered up when LVDS_A3_POWER_UP is set.
*/
# define LVDS_B0B3_POWER_MASK (3 << 2)
# define LVDS_B0B3_POWER_DOWN (0 << 2)
# define LVDS_B0B3_POWER_UP (3 << 2)
#define PIPEACONF 0x70008
#define PIPEACONF_ENABLE (1<<31)
#define PIPEACONF_DISABLE 0
#define PIPEACONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
#define PIPEACONF_SINGLE_WIDE 0
#define PIPEACONF_PIPE_UNLOCKED 0
#define PIPEACONF_PIPE_LOCKED (1<<25)
#define PIPEACONF_PALETTE 0
#define PIPEACONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPEBCONF 0x71008
#define PIPEBCONF_ENABLE (1<<31)
#define PIPEBCONF_DISABLE 0
#define PIPEBCONF_DOUBLE_WIDE (1<<30)
#define PIPEBCONF_DISABLE 0
#define PIPEBCONF_GAMMA (1<<24)
#define PIPEBCONF_PALETTE 0
#define PIPEBGCMAXRED 0x71010
#define PIPEBGCMAXGREEN 0x71014
#define PIPEBGCMAXBLUE 0x71018
#define PIPEBSTAT 0x71024
#define PIPEBFRAMEHIGH 0x71040
#define PIPEBFRAMEPIXEL 0x71044
#define DSPACNTR 0x70180
#define DSPBCNTR 0x71180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
#define DISPPLANE_8BPP (0x2<<26)
#define DISPPLANE_15_16BPP (0x4<<26)
#define DISPPLANE_16BPP (0x5<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
#define DISPPLANE_32BPP (0x7<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
#define DISPPLANE_SEL_PIPE_A 0
#define DISPPLANE_SEL_PIPE_B (1<<24)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
/* plane B only */
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
#define DSPABASE 0x70184
#define DSPASTRIDE 0x70188
#define DSPBBASE 0x71184
#define DSPBADDR DSPBBASE
#define DSPBSTRIDE 0x71188
#define DSPAKEYVAL 0x70194
#define DSPAKEYMASK 0x70198
#define DSPAPOS 0x7018C /* reserved */
#define DSPASIZE 0x70190
#define DSPBPOS 0x7118C
#define DSPBSIZE 0x71190
#define DSPASURF 0x7019C
#define DSPATILEOFF 0x701A4
#define DSPBSURF 0x7119C
#define DSPBTILEOFF 0x711A4
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
/*
* Some BIOS scratch area registers. The 845 (and 830?) store the amount
* of video memory available to the BIOS in SWF1.
*/
#define SWF0 0x71410
/*
* 855 scratch registers.
*/
#define SWF10 0x70410
#define SWF30 0x72414
/*
* Overlay registers. These are overlay registers accessed via MMIO.
* Those loaded via the overlay register page are defined in i830_video.c.
*/
#define OVADD 0x30000
#define DOVSTA 0x30008
#define OC_BUF (0x3<<20)
#define OGAMC5 0x30010
#define OGAMC4 0x30014
#define OGAMC3 0x30018
#define OGAMC2 0x3001c
#define OGAMC1 0x30020
#define OGAMC0 0x30024
/*
* Palette registers
*/
#define PALETTE_A 0x0a000
#define PALETTE_B 0x0a800
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)
#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
(dev)->pci_device == 0x2982 || \
(dev)->pci_device == 0x2992 || \
(dev)->pci_device == 0x29A2 || \
(dev)->pci_device == 0x2A02 || \
(dev)->pci_device == 0x2A12 || \
(dev)->pci_device == 0x2A42)
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2)
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif

View File

@ -276,7 +276,7 @@ static int i915_emit_irq(struct drm_device * dev)
i915_kernel_lost_context(dev);
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
@ -291,7 +291,7 @@ static int i915_emit_irq(struct drm_device * dev)
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
return dev_priv->counter;
}
@ -300,7 +300,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
@ -312,8 +312,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
READ_BREADCRUMB(dev_priv) >= irq_nr);
if (ret == -EBUSY) {
DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
__FUNCTION__,
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
@ -329,14 +328,14 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
int ret = 0;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1<<23)));
*sequence = cur_vblank;
return ret;
@ -365,7 +364,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -388,7 +387,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
drm_i915_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -418,13 +417,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
drm_i915_vblank_pipe_t *pipe = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
DRM_ERROR("%s called with invalid pipe 0x%x\n",
__FUNCTION__, pipe->pipe);
DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
return -EINVAL;
}
@ -443,7 +441,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
u16 flag;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -555,7 +553,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);

View File

@ -276,7 +276,7 @@ int i915_mem_alloc(struct drm_device *dev, void *data,
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -314,7 +314,7 @@ int i915_mem_free(struct drm_device *dev, void *data,
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -342,7 +342,7 @@ int i915_mem_init_heap(struct drm_device *dev, void *data,
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -366,7 +366,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
struct mem_block **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
DRM_ERROR( "called with no initialization\n" );
return -EINVAL;
}
@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
DRM_ERROR("get_heap failed");
return -EFAULT;
}
if (!*heap) {
DRM_ERROR("heap not initialized?");
return -EFAULT;
@ -384,4 +384,3 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
i915_mem_takedown( heap );
return 0;
}

View File

@ -493,7 +493,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
dma_bs->agp_size);
return err;
}
dev_priv->agp_size = agp_size;
dev_priv->agp_handle = agp_req.handle;
@ -550,7 +550,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
{
struct drm_map_list *_entry;
unsigned long agp_token = 0;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
@ -964,7 +964,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
free_req.handle = dev_priv->agp_handle;
drm_agp_free(dev, &free_req);
dev_priv->agp_textures = NULL;
dev_priv->agp_size = 0;
dev_priv->agp_handle = 0;
@ -998,7 +998,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
}
}
return 0;
return err;
}
int mga_dma_init(struct drm_device *dev, void *data,
@ -1050,7 +1050,7 @@ int mga_dma_flush(struct drm_device *dev, void *data,
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
DRM_INFO("-EBUSY\n");
return ret;
#else
return mga_do_wait_for_idle(dev_priv);

View File

@ -216,8 +216,8 @@ static inline u32 _MGA_READ(u32 * addr)
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
@ -249,7 +249,7 @@ do { \
} else if ( dev_priv->prim.space < \
dev_priv->prim.high_mark ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
} \
@ -260,7 +260,7 @@ do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
mga_do_dma_wrap_end( dev_priv ); \
@ -280,8 +280,7 @@ do { \
#define BEGIN_DMA( n ) \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
(n), __FUNCTION__ ); \
DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
DRM_INFO( " space=0x%x req=0x%Zx\n", \
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
@ -292,7 +291,7 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \
DRM_INFO( "BEGIN_DMA()\n" ); \
DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
} \
prim = dev_priv->prim.start; \
@ -311,7 +310,7 @@ do { \
#define FLUSH_DMA() \
do { \
if ( 0 ) { \
DRM_INFO( "%s:\n", __FUNCTION__ ); \
DRM_INFO( "\n" ); \
DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
dev_priv->prim.tail, \
MGA_READ( MGA_PRIMADDRESS ) - \
@ -394,22 +393,22 @@ do { \
#define MGA_VINTCLR (1 << 4)
#define MGA_VINTEN (1 << 5)
#define MGA_ALPHACTRL 0x2c7c
#define MGA_AR0 0x1c60
#define MGA_AR1 0x1c64
#define MGA_AR2 0x1c68
#define MGA_AR3 0x1c6c
#define MGA_AR4 0x1c70
#define MGA_AR5 0x1c74
#define MGA_AR6 0x1c78
#define MGA_ALPHACTRL 0x2c7c
#define MGA_AR0 0x1c60
#define MGA_AR1 0x1c64
#define MGA_AR2 0x1c68
#define MGA_AR3 0x1c6c
#define MGA_AR4 0x1c70
#define MGA_AR5 0x1c74
#define MGA_AR6 0x1c78
#define MGA_CXBNDRY 0x1c80
#define MGA_CXLEFT 0x1ca0
#define MGA_CXLEFT 0x1ca0
#define MGA_CXRIGHT 0x1ca4
#define MGA_DMAPAD 0x1c54
#define MGA_DSTORG 0x2cb8
#define MGA_DWGCTL 0x1c00
#define MGA_DMAPAD 0x1c54
#define MGA_DSTORG 0x2cb8
#define MGA_DWGCTL 0x1c00
# define MGA_OPCOD_MASK (15 << 0)
# define MGA_OPCOD_TRAP (4 << 0)
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
@ -455,27 +454,27 @@ do { \
# define MGA_CLIPDIS (1 << 31)
#define MGA_DWGSYNC 0x2c4c
#define MGA_FCOL 0x1c24
#define MGA_FIFOSTATUS 0x1e10
#define MGA_FOGCOL 0x1cf4
#define MGA_FCOL 0x1c24
#define MGA_FIFOSTATUS 0x1e10
#define MGA_FOGCOL 0x1cf4
#define MGA_FXBNDRY 0x1c84
#define MGA_FXLEFT 0x1ca8
#define MGA_FXLEFT 0x1ca8
#define MGA_FXRIGHT 0x1cac
#define MGA_ICLEAR 0x1e18
#define MGA_ICLEAR 0x1e18
# define MGA_SOFTRAPICLR (1 << 0)
# define MGA_VLINEICLR (1 << 5)
#define MGA_IEN 0x1e1c
#define MGA_IEN 0x1e1c
# define MGA_SOFTRAPIEN (1 << 0)
# define MGA_VLINEIEN (1 << 5)
#define MGA_LEN 0x1c5c
#define MGA_LEN 0x1c5c
#define MGA_MACCESS 0x1c04
#define MGA_PITCH 0x1c8c
#define MGA_PLNWT 0x1c1c
#define MGA_PRIMADDRESS 0x1e58
#define MGA_PITCH 0x1c8c
#define MGA_PLNWT 0x1c1c
#define MGA_PRIMADDRESS 0x1e58
# define MGA_DMA_GENERAL (0 << 0)
# define MGA_DMA_BLIT (1 << 0)
# define MGA_DMA_VECTOR (2 << 0)
@ -487,43 +486,43 @@ do { \
# define MGA_PRIMPTREN0 (1 << 0)
# define MGA_PRIMPTREN1 (1 << 1)
#define MGA_RST 0x1e40
#define MGA_RST 0x1e40
# define MGA_SOFTRESET (1 << 0)
# define MGA_SOFTEXTRST (1 << 1)
#define MGA_SECADDRESS 0x2c40
#define MGA_SECEND 0x2c44
#define MGA_SETUPADDRESS 0x2cd0
#define MGA_SETUPEND 0x2cd4
#define MGA_SECADDRESS 0x2c40
#define MGA_SECEND 0x2c44
#define MGA_SETUPADDRESS 0x2cd0
#define MGA_SETUPEND 0x2cd4
#define MGA_SGN 0x1c58
#define MGA_SOFTRAP 0x2c48
#define MGA_SRCORG 0x2cb4
#define MGA_SRCORG 0x2cb4
# define MGA_SRMMAP_MASK (1 << 0)
# define MGA_SRCMAP_FB (0 << 0)
# define MGA_SRCMAP_SYSMEM (1 << 0)
# define MGA_SRCACC_MASK (1 << 1)
# define MGA_SRCACC_PCI (0 << 1)
# define MGA_SRCACC_AGP (1 << 1)
#define MGA_STATUS 0x1e14
#define MGA_STATUS 0x1e14
# define MGA_SOFTRAPEN (1 << 0)
# define MGA_VSYNCPEN (1 << 4)
# define MGA_VLINEPEN (1 << 5)
# define MGA_DWGENGSTS (1 << 16)
# define MGA_ENDPRDMASTS (1 << 17)
#define MGA_STENCIL 0x2cc8
#define MGA_STENCILCTL 0x2ccc
#define MGA_STENCILCTL 0x2ccc
#define MGA_TDUALSTAGE0 0x2cf8
#define MGA_TDUALSTAGE1 0x2cfc
#define MGA_TEXBORDERCOL 0x2c5c
#define MGA_TEXCTL 0x2c30
#define MGA_TDUALSTAGE0 0x2cf8
#define MGA_TDUALSTAGE1 0x2cfc
#define MGA_TEXBORDERCOL 0x2c5c
#define MGA_TEXCTL 0x2c30
#define MGA_TEXCTL2 0x2c3c
# define MGA_DUALTEX (1 << 7)
# define MGA_G400_TC2_MAGIC (1 << 15)
# define MGA_MAP1_ENABLE (1 << 31)
#define MGA_TEXFILTER 0x2c58
#define MGA_TEXHEIGHT 0x2c2c
#define MGA_TEXORG 0x2c24
#define MGA_TEXFILTER 0x2c58
#define MGA_TEXHEIGHT 0x2c2c
#define MGA_TEXORG 0x2c24
# define MGA_TEXORGMAP_MASK (1 << 0)
# define MGA_TEXORGMAP_FB (0 << 0)
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
@ -534,45 +533,45 @@ do { \
#define MGA_TEXORG2 0x2ca8
#define MGA_TEXORG3 0x2cac
#define MGA_TEXORG4 0x2cb0
#define MGA_TEXTRANS 0x2c34
#define MGA_TEXTRANSHIGH 0x2c38
#define MGA_TEXWIDTH 0x2c28
#define MGA_TEXTRANS 0x2c34
#define MGA_TEXTRANSHIGH 0x2c38
#define MGA_TEXWIDTH 0x2c28
#define MGA_WACCEPTSEQ 0x1dd4
#define MGA_WCODEADDR 0x1e6c
#define MGA_WFLAG 0x1dc4
#define MGA_WFLAG1 0x1de0
#define MGA_WACCEPTSEQ 0x1dd4
#define MGA_WCODEADDR 0x1e6c
#define MGA_WFLAG 0x1dc4
#define MGA_WFLAG1 0x1de0
#define MGA_WFLAGNB 0x1e64
#define MGA_WFLAGNB1 0x1e08
#define MGA_WFLAGNB1 0x1e08
#define MGA_WGETMSB 0x1dc8
#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR2 0x1dd8
# define MGA_WMODE_SUSPEND (0 << 0)
# define MGA_WMODE_RESUME (1 << 0)
# define MGA_WMODE_JUMP (2 << 0)
# define MGA_WMODE_START (3 << 0)
# define MGA_WAGP_ENABLE (1 << 2)
#define MGA_WMISC 0x1e70
#define MGA_WMISC 0x1e70
# define MGA_WUCODECACHE_ENABLE (1 << 0)
# define MGA_WMASTER_ENABLE (1 << 1)
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
#define MGA_WVRTXSZ 0x1dcc
#define MGA_YBOT 0x1c9c
#define MGA_YDST 0x1c90
#define MGA_YBOT 0x1c9c
#define MGA_YDST 0x1c90
#define MGA_YDSTLEN 0x1c88
#define MGA_YDSTORG 0x1c94
#define MGA_YTOP 0x1c98
#define MGA_YTOP 0x1c98
#define MGA_ZORG 0x1c0c
#define MGA_ZORG 0x1c0c
/* This finishes the current batch of commands
*/
#define MGA_EXEC 0x0100
#define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
#define MGA_AGP_PLL 0x1e4c
#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)

View File

@ -150,8 +150,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
DMA_LOCALS;
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
BEGIN_DMA(6);
@ -190,8 +190,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
DMA_LOCALS;
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
BEGIN_DMA(5);
@ -256,7 +256,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
unsigned int pipe = sarea_priv->warp_pipe;
DMA_LOCALS;
/* printk("mga_g400_emit_pipe %x\n", pipe); */
/* printk("mga_g400_emit_pipe %x\n", pipe); */
BEGIN_DMA(10);
@ -619,7 +619,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
FLUSH_DMA();
DRM_DEBUG("%s... done.\n", __FUNCTION__);
DRM_DEBUG("... done.\n");
}
static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
@ -631,7 +631,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
u32 length = (u32) buf->used;
int i = 0;
DMA_LOCALS;
DRM_DEBUG("vertex: buf=%d used=%d\n", buf->idx, buf->used);
DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
if (buf->used) {
buf_priv->dispatched = 1;
@ -678,7 +678,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
u32 address = (u32) buf->bus_address;
int i = 0;
DMA_LOCALS;
DRM_DEBUG("indices: buf=%d start=%d end=%d\n", buf->idx, start, end);
DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
if (start != end) {
buf_priv->dispatched = 1;
@ -955,7 +955,7 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi
#if 0
if (mga_do_wait_for_idle(dev_priv) < 0) {
if (MGA_DMA_DEBUG)
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
DRM_INFO("-EBUSY\n");
return -EBUSY;
}
#endif
@ -1014,7 +1014,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -1046,7 +1046,7 @@ static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *fi
DMA_LOCALS;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -1075,7 +1075,7 @@ file_priv)
u32 *fence = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}

View File

@ -1,4 +1,4 @@
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*
@ -651,7 +651,7 @@ int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
DRM_DEBUG("while CCE running\n");
return 0;
}
@ -710,7 +710,7 @@ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
DRM_DEBUG("called before init done\n");
return -EINVAL;
}

View File

@ -462,8 +462,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
(n), __FUNCTION__ ); \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@ -493,7 +492,7 @@ do { \
write * sizeof(u32) ); \
} \
if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
DRM_ERROR( \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \

View File

@ -42,7 +42,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
@ -85,7 +85,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(2);
@ -100,7 +100,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(13);
@ -126,7 +126,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(3);
@ -142,7 +142,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(5);
@ -161,7 +161,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(2);
@ -178,7 +178,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
int i;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
@ -204,7 +204,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
int i;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
@ -226,7 +226,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
DRM_DEBUG("dirty=0x%08x\n", dirty);
if (dirty & R128_UPLOAD_CORE) {
r128_emit_core(dev_priv);
@ -362,7 +362,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,
unsigned int flags = clear->flags;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
unsigned int tmp = flags;
@ -466,7 +466,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)
struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
#if R128_PERFORMANCE_BOXES
/* Do some trivial performance monitoring...
@ -528,8 +528,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__FUNCTION__,
DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
#if R128_PERFORMANCE_BOXES
@ -1156,7 +1155,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
int count, *x, *y;
int i, xbuf_size, ybuf_size;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
count = depth->n;
if (count > 4096 || count <= 0)
@ -1226,7 +1225,7 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(33);
@ -1309,7 +1308,7 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev)
static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@ -1328,7 +1327,7 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@ -1356,7 +1355,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -1412,7 +1411,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -1557,11 +1556,11 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
@ -1622,7 +1621,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}

View File

@ -77,23 +77,31 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
return -EFAULT;
}
box.x1 =
(box.x1 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y1 =
(box.y1 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2 =
(box.x2 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y2 =
(box.y2 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
box.x1 = (box.x1) &
R300_CLIPRECT_MASK;
box.y1 = (box.y1) &
R300_CLIPRECT_MASK;
box.x2 = (box.x2) &
R300_CLIPRECT_MASK;
box.y2 = (box.y2) &
R300_CLIPRECT_MASK;
} else {
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
}
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
}
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
@ -133,9 +141,11 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
static u8 r300_reg_flags[0x10000 >> 2];
void r300_init_reg_flags(void)
void r300_init_reg_flags(struct drm_device *dev)
{
int i;
drm_radeon_private_t *dev_priv = dev->dev_private;
memset(r300_reg_flags, 0, 0x10000 >> 2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
@ -230,6 +240,9 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
ADD_RANGE(0x4074, 16);
}
}
static __inline__ int r300_check_range(unsigned reg, int count)
@ -486,7 +499,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
if (cmd[0] & 0x8000) {
u32 offset;
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = !radeon_check_offset(dev_priv, offset);
@ -504,7 +517,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return -EINVAL;
}
}
}
@ -723,54 +736,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
RING_LOCALS;
if (cmdbuf->bufsz <
if (cmdbuf->bufsz <
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
return -EINVAL;
}
if (header.scratch.reg >= 5) {
return -EINVAL;
}
dev_priv->scratch_ages[header.scratch.reg]++;
ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
cmdbuf->buf += sizeof(u64);
cmdbuf->bufsz -= sizeof(u64);
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
return -EINVAL;
}
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
return -EINVAL;
}
if (h_pending == 0) {
return -EINVAL;
}
h_pending--;
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
return -EINVAL;
}
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
}
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
return 0;
}
@ -919,7 +932,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,

View File

@ -853,13 +853,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_TX_FORMAT_W8Z8Y8X8 0xC
# define R300_TX_FORMAT_W2Z10Y10X10 0xD
# define R300_TX_FORMAT_W16Z16Y16X16 0xE
# define R300_TX_FORMAT_DXT1 0xF
# define R300_TX_FORMAT_DXT3 0x10
# define R300_TX_FORMAT_DXT5 0x11
# define R300_TX_FORMAT_DXT1 0xF
# define R300_TX_FORMAT_DXT3 0x10
# define R300_TX_FORMAT_DXT5 0x11
# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
/* 0x16 - some 16 bit green format.. ?? */
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
@ -867,19 +867,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* gap */
/* Floating point formats */
/* Note - hardware supports both 16 and 32 bit floating point */
# define R300_TX_FORMAT_FL_I16 0x18
# define R300_TX_FORMAT_FL_I16A16 0x19
# define R300_TX_FORMAT_FL_I16 0x18
# define R300_TX_FORMAT_FL_I16A16 0x19
# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
# define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
# define R300_TX_FORMAT_ALPHA_1CH 0x000
# define R300_TX_FORMAT_ALPHA_2CH 0x200
# define R300_TX_FORMAT_ALPHA_4CH 0x600
# define R300_TX_FORMAT_ALPHA_NONE 0xA00
# define R300_TX_FORMAT_ALPHA_1CH 0x000
# define R300_TX_FORMAT_ALPHA_2CH 0x200
# define R300_TX_FORMAT_ALPHA_4CH 0x600
# define R300_TX_FORMAT_ALPHA_NONE 0xA00
/* Swizzling */
/* constants */
# define R300_TX_FORMAT_X 0
@ -1360,11 +1360,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_RB3D_Z_DISABLED_2 0x00000014
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_STENCIL_ENABLE 0x00000001
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04

View File

@ -816,6 +816,46 @@ static const u32 R300_cp_microcode[][2] = {
{0000000000, 0000000000},
};
static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
ret = RADEON_READ(R520_MC_IND_DATA);
RADEON_WRITE(R520_MC_IND_INDEX, 0);
return ret;
}
u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
else
return RADEON_READ(RADEON_MC_FB_LOCATION);
}
static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
else
RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
}
static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
else
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
}
static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@ -824,7 +864,7 @@ static int RADEON_READ_PLL(struct drm_device * dev, int addr)
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
}
static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
return RADEON_READ(RADEON_PCIE_DATA);
@ -1074,41 +1114,43 @@ static int radeon_do_engine_reset(struct drm_device * dev)
radeon_do_pixcache_flush(dev_priv);
clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
RADEON_FORCEON_MCLKA |
RADEON_FORCEON_MCLKB |
RADEON_FORCEON_YCLKA |
RADEON_FORCEON_YCLKB |
RADEON_FORCEON_MC |
RADEON_FORCEON_AIC));
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
RADEON_FORCEON_MCLKA |
RADEON_FORCEON_MCLKB |
RADEON_FORCEON_YCLKA |
RADEON_FORCEON_YCLKB |
RADEON_FORCEON_MC |
RADEON_FORCEON_AIC));
rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
RADEON_SOFT_RESET_CP |
RADEON_SOFT_RESET_HI |
RADEON_SOFT_RESET_SE |
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
RADEON_SOFT_RESET_RB));
RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
~(RADEON_SOFT_RESET_CP |
RADEON_SOFT_RESET_HI |
RADEON_SOFT_RESET_SE |
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
RADEON_SOFT_RESET_RB)));
RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
RADEON_SOFT_RESET_CP |
RADEON_SOFT_RESET_HI |
RADEON_SOFT_RESET_SE |
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
RADEON_SOFT_RESET_RB));
RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
~(RADEON_SOFT_RESET_CP |
RADEON_SOFT_RESET_HI |
RADEON_SOFT_RESET_SE |
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
RADEON_SOFT_RESET_RB)));
RADEON_READ(RADEON_RBBM_SOFT_RESET);
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
}
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
@ -1127,21 +1169,21 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
{
u32 ring_start, cur_read_ptr;
u32 tmp;
/* Initialize the memory controller. With new memory map, the fb location
* is not changed, it should have been properly initialized already. Part
* of the problem is that the code below is bogus, assuming the GART is
* always appended to the fb which is not necessarily the case
*/
if (!dev_priv->new_memmap)
RADEON_WRITE(RADEON_MC_FB_LOCATION,
radeon_write_fb_location(dev_priv,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
@ -1190,9 +1232,15 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
RADEON_BUF_SWAP_32BIT |
(dev_priv->ring.fetch_size_l2ow << 18) |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
RADEON_WRITE(RADEON_CP_RB_CNTL,
(dev_priv->ring.fetch_size_l2ow << 18) |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
/* Start with assuming that writeback doesn't work */
@ -1299,7 +1347,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024;
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
@ -1333,7 +1381,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
dev_priv->gart_vm_start +
dev_priv->gart_size - 1);
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
RADEON_PCIE_TX_GART_EN);
@ -1358,7 +1406,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
return;
}
tmp = RADEON_READ(RADEON_AIC_CNTL);
tmp = RADEON_READ(RADEON_AIC_CNTL);
if (on) {
RADEON_WRITE(RADEON_AIC_CNTL,
@ -1376,7 +1424,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
/* Turn off AGP aperture -- is this required for PCI GART?
*/
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
radeon_write_agp_location(dev_priv, 0xffffffc0);
RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
} else {
RADEON_WRITE(RADEON_AIC_CNTL,
@ -1581,10 +1629,9 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev->agp_buffer_map->handle);
}
dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
& 0xffff) << 16;
dev_priv->fb_size =
((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
dev_priv->fb_size =
((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
- dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
@ -1630,7 +1677,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location
- dev_priv->gart_size;
}
}
dev_priv->gart_vm_start = base & 0xffc00000u;
if (dev_priv->gart_vm_start != base)
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
@ -1663,6 +1710,11 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
@ -1830,7 +1882,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (init->func == RADEON_INIT_R300_CP)
r300_init_reg_flags();
r300_init_reg_flags(dev);
switch (init->func) {
case RADEON_INIT_CP:
@ -1852,12 +1904,12 @@ int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cp_running) {
DRM_DEBUG("%s while CP running\n", __FUNCTION__);
DRM_DEBUG("while CP running\n");
return 0;
}
if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
DRM_DEBUG("%s called with bogus CP mode (%d)\n",
__FUNCTION__, dev_priv->cp_mode);
DRM_DEBUG("called with bogus CP mode (%d)\n",
dev_priv->cp_mode);
return 0;
}
@ -1962,7 +2014,7 @@ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
DRM_DEBUG("called before init done\n");
return -EINVAL;
}
@ -2239,6 +2291,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
case CHIP_R350:
case CHIP_R420:
case CHIP_RV410:
case CHIP_RV515:
case CHIP_R520:
case CHIP_RV570:
case CHIP_R580:
dev_priv->flags |= RADEON_HAS_HIERZ;
break;
default:

View File

@ -223,10 +223,10 @@ typedef union {
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
#define R300_CMD_SCRATCH 8
typedef union {
@ -656,6 +656,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
typedef struct drm_radeon_getparam {
int param;
@ -722,7 +723,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
#endif

View File

@ -123,6 +123,12 @@ enum radeon_family {
CHIP_R420,
CHIP_RV410,
CHIP_RS400,
CHIP_RV515,
CHIP_R520,
CHIP_RV530,
CHIP_RV560,
CHIP_RV570,
CHIP_R580,
CHIP_LAST,
};
@ -166,6 +172,12 @@ typedef struct drm_radeon_ring_buffer {
int size;
int size_l2qw;
int rptr_update; /* Double Words */
int rptr_update_l2qw; /* log2 Quad Words */
int fetch_size; /* Double Words */
int fetch_size_l2ow; /* log2 Oct Words */
u32 tail;
u32 tail_mask;
int space;
@ -336,6 +348,7 @@ extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file
extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
@ -382,7 +395,7 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
extern void r300_init_reg_flags(struct drm_device *dev);
extern int r300_do_cp_cmdbuf(struct drm_device * dev,
struct drm_file *file_priv,
@ -429,7 +442,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_PCIE_INDEX 0x0030
#define RADEON_PCIE_DATA 0x0034
#define RADEON_PCIE_TX_GART_CNTL 0x10
# define RADEON_PCIE_TX_GART_EN (1 << 0)
# define RADEON_PCIE_TX_GART_EN (1 << 0)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
@ -439,7 +452,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
#define RADEON_PCIE_TX_GART_BASE 0x13
#define RADEON_PCIE_TX_GART_BASE 0x13
#define RADEON_PCIE_TX_GART_START_LO 0x14
#define RADEON_PCIE_TX_GART_START_HI 0x15
#define RADEON_PCIE_TX_GART_END_LO 0x16
@ -454,6 +467,16 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_IGPGART_ENABLE 0x38
#define RADEON_IGPGART_UNK_39 0x39
#define R520_MC_IND_INDEX 0x70
#define R520_MC_IND_WR_EN (1<<24)
#define R520_MC_IND_DATA 0x74
#define RV515_MC_FB_LOCATION 0x01
#define RV515_MC_AGP_LOCATION 0x02
#define R520_MC_FB_LOCATION 0x04
#define R520_MC_AGP_LOCATION 0x05
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
@ -512,12 +535,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_FIRE (1 << 26)
#define RADEON_HOST_PATH_CNTL 0x0130
@ -615,9 +638,51 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_SOFT_RESET_E2 (1 << 5)
# define RADEON_SOFT_RESET_RB (1 << 6)
# define RADEON_SOFT_RESET_HDP (1 << 7)
/*
* 6:0 Available slots in the FIFO
* 8 Host Interface active
* 9 CP request active
* 10 FIFO request active
* 11 Host Interface retry active
* 12 CP retry active
* 13 FIFO retry active
* 14 FIFO pipeline busy
* 15 Event engine busy
* 16 CP command stream busy
* 17 2D engine busy
* 18 2D portion of render backend busy
* 20 3D setup engine busy
* 26 GA engine busy
* 27 CBA 2D engine busy
* 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
* command stream queue not empty or Ring Buffer not empty
*/
#define RADEON_RBBM_STATUS 0x0e40
/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
/* #define RADEON_RBBM_STATUS 0x1740 */
/* bits 6:0 are dword slots available in the cmd fifo */
# define RADEON_RBBM_FIFOCNT_MASK 0x007f
# define RADEON_RBBM_ACTIVE (1 << 31)
# define RADEON_HIRQ_ON_RBB (1 << 8)
# define RADEON_CPRQ_ON_RBB (1 << 9)
# define RADEON_CFRQ_ON_RBB (1 << 10)
# define RADEON_HIRQ_IN_RTBUF (1 << 11)
# define RADEON_CPRQ_IN_RTBUF (1 << 12)
# define RADEON_CFRQ_IN_RTBUF (1 << 13)
# define RADEON_PIPE_BUSY (1 << 14)
# define RADEON_ENG_EV_BUSY (1 << 15)
# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
# define RADEON_E2_BUSY (1 << 17)
# define RADEON_RB2D_BUSY (1 << 18)
# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
# define RADEON_VAP_BUSY (1 << 20)
# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
# define RADEON_GA_BUSY (1 << 26)
# define RADEON_CBA2D_BUSY (1 << 27)
# define RADEON_RBBM_ACTIVE (1 << 31)
#define RADEON_RE_LINE_PATTERN 0x1cd0
#define RADEON_RE_MISC 0x26c4
#define RADEON_RE_TOP_LEFT 0x26c0
@ -1004,6 +1069,13 @@ do { \
RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \
} while (0)
#define RADEON_WRITE_MCIND( addr, val ) \
do { \
RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
RADEON_WRITE(R520_MC_IND_DATA, (val)); \
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
} while (0)
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET0_TABLE( reg, n ) \
@ -1114,8 +1186,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
n, __FUNCTION__ ); \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@ -1133,7 +1204,7 @@ do { \
write, dev_priv->ring.tail ); \
} \
if (((dev_priv->ring.tail + _nr) & mask) != write) { \
DRM_ERROR( \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & mask), \
write, __LINE__); \

View File

@ -154,7 +154,7 @@ static int radeon_driver_vblank_do_wait(struct drm_device * dev,
int ack = 0;
atomic_t *counter;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -205,7 +205,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -227,7 +227,7 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}

View File

@ -224,7 +224,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -259,7 +259,7 @@ int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_pr
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -285,7 +285,7 @@ int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *fi
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}

View File

@ -898,7 +898,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
x, y, w, h, flags);
if (flags & RADEON_FRONT) {
@ -1368,7 +1368,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
BEGIN_RING(9);
@ -1422,8 +1422,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
DRM_DEBUG("%s: pfCurrentPage=%d\n",
__FUNCTION__,
DRM_DEBUG("pfCurrentPage=%d\n",
dev_priv->sarea_priv->pfCurrentPage);
/* Do some trivial performance monitoring...
@ -1562,7 +1561,7 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
if (start != end) {
int offset = (dev_priv->gart_buffers_offset
@ -1758,7 +1757,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
buf = radeon_freelist_get(dev);
}
if (!buf) {
DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
DRM_DEBUG("EAGAIN\n");
if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
@ -2413,7 +2412,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
@ -2779,7 +2778,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
DRM_DEBUG("%x\n", flags);
switch (flags) {
case RADEON_WAIT_2D:
BEGIN_RING(2);
@ -3035,6 +3034,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
case RADEON_PARAM_VBLANK_CRTC:
value = radeon_vblank_crtc_get(dev);
break;
case RADEON_PARAM_FB_LOCATION:
value = radeon_read_fb_location(dev_priv);
break;
default:
DRM_DEBUG("Invalid parameter %d\n", param->param);
return -EINVAL;

View File

@ -512,7 +512,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
DMA_COPY(&vtxbuf[vb_stride * start],
DMA_COPY(&vtxbuf[vb_stride * start],
vtx_size * count);
} else {
for (i = start; i < start + count; ++i) {
@ -742,7 +742,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
while (n != 0) {
/* Can emit up to 255 vertices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
/* Check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride * 4)) {
@ -933,7 +933,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
/* j was check in savage_bci_cmdbuf */
ret = savage_dispatch_vb_idx(dev_priv,
&cmd_header, (const uint16_t *)cmdbuf,
(const uint32_t *)vtxbuf, vb_size,
(const uint32_t *)vtxbuf, vb_size,
vb_stride);
cmdbuf += j;
break;

View File

@ -115,7 +115,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
@ -205,7 +205,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@ -249,7 +249,7 @@ int sis_idle(struct drm_device *dev)
return 0;
}
}
/*
* Implement a device switch here if needed
*/

View File

@ -179,14 +179,12 @@ static int via_initialize(struct drm_device * dev,
}
if (dev_priv->ring.virtual_start != NULL) {
DRM_ERROR("%s called again without calling cleanup\n",
__FUNCTION__);
DRM_ERROR("called again without calling cleanup\n");
return -EFAULT;
}
if (!dev->agp || !dev->agp->base) {
DRM_ERROR("%s called with no agp memory available\n",
__FUNCTION__);
DRM_ERROR("called with no agp memory available\n");
return -EFAULT;
}
@ -267,8 +265,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
__FUNCTION__);
DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
@ -337,8 +334,7 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
cmdbuf->size);
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_cmdbuffer(dev, cmdbuf);
if (ret) {
@ -379,8 +375,7 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
cmdbuf->size);
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
if (ret) {
@ -648,14 +643,13 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
uint32_t tmp_size, count;
drm_via_private_t *dev_priv;
DRM_DEBUG("via cmdbuf_size\n");
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
__FUNCTION__);
DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}

View File

@ -1,5 +1,5 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
*
*
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -16,22 +16,22 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
/*
* Unmaps the DMA mappings.
* FIXME: Is this a NoOp on x86? Also
* FIXME: What happens if this one is called and a pending blit has previously done
* the same DMA mappings?
* Unmaps the DMA mappings.
* FIXME: Is this a NoOp on x86? Also
* FIXME: What happens if this one is called and a pending blit has previously done
* the same DMA mappings?
*/
#include "drmP.h"
@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
static void
via_map_blit_for_device(struct pci_dev *pdev,
const drm_via_dmablit_t *xfer,
drm_via_sg_info_t *vsg,
drm_via_sg_info_t *vsg,
int mode)
{
unsigned cur_descriptor_page = 0;
@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
drm_via_descriptor_t *desc_ptr = NULL;
if (mode == 1)
if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
@ -118,24 +118,24 @@ via_map_blit_for_device(struct pci_dev *pdev,
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
while (line_len > 0) {
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
line_len -= remaining_len;
if (mode == 1) {
desc_ptr->mem_addr =
dma_map_page(&pdev->dev,
vsg->pages[VIA_PFN(cur_mem) -
desc_ptr->mem_addr =
dma_map_page(&pdev->dev,
vsg->pages[VIA_PFN(cur_mem) -
VIA_PFN(first_addr)],
VIA_PGOFF(cur_mem), remaining_len,
VIA_PGOFF(cur_mem), remaining_len,
vsg->direction);
desc_ptr->dev_addr = cur_fb;
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
@ -143,12 +143,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
}
}
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
mem_addr += xfer->mem_stride;
fb_addr += xfer->fb_stride;
}
@ -161,14 +161,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
}
/*
* Function that frees up all resources for a blit. It is usable even if the
* Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
struct page *page;
int i;
@ -185,7 +185,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
case dr_via_pages_locked:
for (i=0; i<vsg->num_pages; ++i) {
if ( NULL != (page = vsg->pages[i])) {
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@ -200,7 +200,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
vsg->bounce_buffer = NULL;
}
vsg->free_on_sequence = 0;
}
}
/*
* Fire a blit engine.
@ -213,7 +213,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
@ -233,9 +233,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
first_pfn + 1;
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
if (ret < 0)
if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
return -EINVAL;
@ -264,21 +264,21 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
* quite large for some blits, and pages don't need to be contingous.
*/
static int
static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
return -ENOMEM;
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->num_desc);
return 0;
}
static void
via_abort_dmablit(struct drm_device *dev, int engine)
{
@ -300,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@ -311,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
@ -331,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
done_transfer = blitq->is_active &&
done_transfer = blitq->is_active &&
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
DRM_WAKEUP(blitq->blit_queue + cur);
DRM_WAKEUP(blitq->blit_queue + cur);
cur++;
if (cur >= VIA_NUM_BLIT_SLOTS)
if (cur >= VIA_NUM_BLIT_SLOTS)
cur = 0;
blitq->cur = cur;
@ -355,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 0;
blitq->aborting = 0;
schedule_work(&blitq->wq);
schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
@ -367,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
}
if (!blitq->is_active) {
if (blitq->num_outstanding) {
via_fire_dmablit(dev, blitq->blits[cur], engine);
@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
}
via_dmablit_engine_off(dev, engine);
}
}
}
if (from_irq) {
spin_unlock(&blitq->blit_lock);
} else {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
}
}
@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
return active;
}
/*
* Sync. Wait for at least three seconds for the blit to be performed.
*/
static int
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
return ret;
}
@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long) jiffies);
via_dmablit_handler(dev, engine, 0);
if (!timer_pending(&blitq->poll_timer)) {
mod_timer(&blitq->poll_timer, jiffies + 1);
@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
*/
static void
static void
via_dmablit_workqueue(struct work_struct *work)
{
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
@ -505,38 +505,38 @@ via_dmablit_workqueue(struct work_struct *work)
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
while(blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
blitq->serviced = 0;
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
DRM_WAKEUP(&blitq->busy_queue);
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
@ -550,8 +550,8 @@ via_init_dmablit(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
pci_set_master(dev->pdev);
pci_set_master(dev->pdev);
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
@ -572,20 +572,20 @@ via_init_dmablit(struct drm_device *dev)
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
(unsigned long)blitq);
}
}
}
/*
* Build all info and do all mappings required for a blit.
*/
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
@ -599,7 +599,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
* extra logic of avoiding them. Make sure there are no.
* extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
@ -625,11 +625,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
}
}
/*
/*
* we allow a negative fb stride to allow flipping of images in
* transfer.
* transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
@ -653,11 +653,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
#else
if ((((unsigned long)xfer->mem_addr & 15) ||
((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) &&
((xfer->num_lines > 1) &&
((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
}
}
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
@ -673,17 +673,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
return 0;
}
/*
* Reserve one free slot in the blit queue. Will wait for one second for one
* to become available. Otherwise -EBUSY is returned.
*/
static int
static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
int ret=0;
@ -698,10 +698,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
if (ret) {
return (-EINTR == ret) ? -EAGAIN : ret;
}
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
blitq->num_free--;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
@ -712,7 +712,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
* Hand back a free slot if we changed our mind.
*/
static void
static void
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
unsigned long irqsave;
@ -728,8 +728,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
*/
static int
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
static int
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
@ -760,15 +760,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->blits[blitq->head++] = vsg;
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
blitq->head = 0;
blitq->num_outstanding++;
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
xfer->sync.engine = engine;
via_dmablit_handler(dev, engine, 0);
via_dmablit_handler(dev, engine, 0);
return 0;
}
@ -776,7 +776,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
/*
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
* case it returns with -EAGAIN for the signal to be delivered.
* case it returns with -EAGAIN for the signal to be delivered.
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
*/
@ -786,7 +786,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
drm_via_blitsync_t *sync = data;
int err;
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
return -EINVAL;
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
@ -796,15 +796,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
return err;
}
/*
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
* while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
* be reissued. See the above IOCTL code.
*/
int
int
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
drm_via_dmablit_t *xfer = data;

View File

@ -1,5 +1,5 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
*
*
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
@ -17,12 +17,12 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
unsigned cur;
unsigned num_free;
unsigned num_outstanding;
unsigned long end;
unsigned long end;
int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
struct work_struct wq;
struct timer_list poll_timer;
} drm_via_blitq_t;
/*
/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
/* Define for DMA engine */
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */

View File

@ -35,7 +35,7 @@
#include "via_drmclient.h"
#endif
#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_XVMC_PORTS 10
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
@ -259,7 +259,7 @@ typedef struct drm_via_blitsync {
typedef struct drm_via_dmablit {
uint32_t num_lines;
uint32_t line_length;
uint32_t fb_addr;
uint32_t fb_stride;

View File

@ -71,7 +71,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,

View File

@ -169,9 +169,9 @@ int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
unsigned int cur_vblank;
int ret = 0;
DRM_DEBUG("viadrv_vblank_wait\n");
DRM_DEBUG("\n");
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@ -201,24 +201,23 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
maskarray_t *masks;
int real_irq;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (irq >= drm_via_irq_num) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irq);
DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
return -EINVAL;
}
real_irq = dev_priv->irq_map[irq];
if (real_irq < 0) {
DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
__FUNCTION__, irq);
DRM_ERROR("Video IRQ %d not available on this hardware.\n",
irq);
return -EINVAL;
}
@ -251,7 +250,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
drm_via_irq_t *cur_irq;
int i;
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
DRM_DEBUG("dev_priv: %p\n", dev_priv);
if (dev_priv) {
cur_irq = dev_priv->via_irqs;
@ -298,7 +297,7 @@ void via_driver_irq_postinstall(struct drm_device * dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("via_driver_irq_postinstall\n");
DRM_DEBUG("\n");
if (dev_priv) {
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
@ -317,7 +316,7 @@ void via_driver_irq_uninstall(struct drm_device * dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("driver_irq_uninstall)\n");
DRM_DEBUG("\n");
if (dev_priv) {
/* Some more magic, oh for some data sheets ! */
@ -344,7 +343,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
if (irqwait->request.irq >= dev_priv->num_irqs) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
DRM_ERROR("Trying to wait on unknown irq %d\n",
irqwait->request.irq);
return -EINVAL;
}
@ -362,8 +361,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
if (irqwait->request.type & VIA_IRQ_SIGNAL) {
DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
__FUNCTION__);
DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
return -EINVAL;
}

View File

@ -29,7 +29,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
@ -79,7 +79,7 @@ int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_init_t *init = data;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
switch (init->func) {
case VIA_INIT_MAP:
@ -121,4 +121,3 @@ int via_driver_unload(struct drm_device *dev)
return 0;
}

View File

@ -53,7 +53,7 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@ -77,7 +77,7 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
@ -113,7 +113,7 @@ void via_lastclose(struct drm_device *dev)
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
mutex_unlock(&dev->struct_mutex);
}
}
int via_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)

View File

@ -33,7 +33,7 @@ void via_init_futex(drm_via_private_t * dev_priv)
{
unsigned int i;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
@ -73,7 +73,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (fx->lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;