mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (76 commits) drm/radeon/kms: enable ACPI powermanagement mode on radeon gpus. drm/radeon/kms: rs400/480 should set common registers. drm/radeon/kms: add sanity check to wptr. drm/radeon/kms/evergreen: get DP working drm/radeon/kms: add hw_i2c module option drm/radeon/kms: use new pre/post_xfer i2c bit algo hooks drm/radeon/kms: disable MSI on IGP chips drm/radeon/kms: display watermark updates (v2) drm/radeon/kms/dp: disable training pattern on the sink at the end of link training drm/radeon/kms: minor fixes for eDP with LCD* device tags (v2) drm/radeon/kms/dp: remove extraneous training complete call drm/radeon/kms/atom: minor fixes to transmitter setup drm/radeon/kms: Only restrict BO to visible VRAM size when pinning to VRAM. drm: fix build error when SYSRQ is disabled drm/radeon/kms: fix macbookpro connector quirk drm/radeon/r6xx/r7xx: further safe reg clean up drm/radeon: bump the UMS driver version for r6xx/r7xx const buffer support drm/radeon/kms: bump the version for r6xx/r7xx const buffer support drm/radeon/r6xx/r7xx: CS parser fixes drm/radeon/kms: fix some typos in r6xx/r7xx hpd setup ... Fix up MSI-related conflicts in drivers/gpu/drm/radeon/radeon_irq_kms.c
This commit is contained in:
commit
42be79e37e
@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
||||
if (connector->status == connector_status_disconnected) {
|
||||
DRM_DEBUG_KMS("%s is disconnected\n",
|
||||
drm_get_connector_name(connector));
|
||||
drm_mode_connector_update_edid_property(connector, NULL);
|
||||
goto prune;
|
||||
}
|
||||
|
||||
|
@ -707,15 +707,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
||||
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
|
||||
mode->vtotal = mode->vdisplay + vblank;
|
||||
|
||||
/* perform the basic check for the detailed timing */
|
||||
if (mode->hsync_end > mode->htotal ||
|
||||
mode->vsync_end > mode->vtotal) {
|
||||
drm_mode_destroy(dev, mode);
|
||||
DRM_DEBUG_KMS("Incorrect detailed timing. "
|
||||
"Sync is beyond the blank.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Some EDIDs have bogus h/vtotal values */
|
||||
if (mode->hsync_end > mode->htotal)
|
||||
mode->htotal = mode->hsync_end + 1;
|
||||
|
@ -283,6 +283,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
|
||||
.help_msg = "force-fb(V)",
|
||||
.action_msg = "Restore framebuffer console",
|
||||
};
|
||||
#else
|
||||
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
|
||||
#endif
|
||||
|
||||
static void drm_fb_helper_on(struct fb_info *info)
|
||||
|
@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
spin_unlock(&dev->count_lock);
|
||||
}
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (minor->type == DRM_MINOR_LEGACY) {
|
||||
BUG_ON((dev->dev_mapping != NULL) &&
|
||||
(dev->dev_mapping != inode->i_mapping));
|
||||
if (dev->dev_mapping == NULL)
|
||||
dev->dev_mapping = inode->i_mapping;
|
||||
if (!retcode) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (minor->type == DRM_MINOR_LEGACY) {
|
||||
if (dev->dev_mapping == NULL)
|
||||
dev->dev_mapping = inode->i_mapping;
|
||||
else if (dev->dev_mapping != inode->i_mapping)
|
||||
retcode = -ENODEV;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
|
||||
nouveau_dp.o nouveau_grctx.o \
|
||||
nv04_timer.o \
|
||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||
nv04_fb.o nv10_fb.o nv40_fb.o \
|
||||
nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
|
||||
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
|
||||
nv04_graph.o nv10_graph.o nv20_graph.o \
|
||||
nv40_graph.o nv50_graph.o \
|
||||
|
@ -5210,6 +5210,21 @@ divine_connector_type(struct nvbios *bios, int index)
|
||||
return type;
|
||||
}
|
||||
|
||||
static void
|
||||
apply_dcb_connector_quirks(struct nvbios *bios, int idx)
|
||||
{
|
||||
struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
|
||||
struct drm_device *dev = bios->dev;
|
||||
|
||||
/* Gigabyte NX85T */
|
||||
if ((dev->pdev->device == 0x0421) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1458) &&
|
||||
(dev->pdev->subsystem_device == 0x344c)) {
|
||||
if (cte->type == DCB_CONNECTOR_HDMI_1)
|
||||
cte->type = DCB_CONNECTOR_DVI_I;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
parse_dcb_connector_table(struct nvbios *bios)
|
||||
{
|
||||
@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios)
|
||||
entry = conntab + conntab[1];
|
||||
cte = &ct->entry[0];
|
||||
for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
|
||||
cte->index = i;
|
||||
if (conntab[3] == 2)
|
||||
cte->entry = ROM16(entry[0]);
|
||||
else
|
||||
cte->entry = ROM32(entry[0]);
|
||||
|
||||
cte->type = (cte->entry & 0x000000ff) >> 0;
|
||||
cte->index = (cte->entry & 0x00000f00) >> 8;
|
||||
cte->index2 = (cte->entry & 0x00000f00) >> 8;
|
||||
switch (cte->entry & 0x00033000) {
|
||||
case 0x00001000:
|
||||
cte->gpio_tag = 0x07;
|
||||
@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios)
|
||||
if (cte->type == 0xff)
|
||||
continue;
|
||||
|
||||
apply_dcb_connector_quirks(bios, i);
|
||||
|
||||
NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
|
||||
i, cte->entry, cte->type, cte->index, cte->gpio_tag);
|
||||
|
||||
@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios)
|
||||
break;
|
||||
default:
|
||||
cte->type = divine_connector_type(bios, cte->index);
|
||||
NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
|
||||
NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
|
||||
break;
|
||||
}
|
||||
|
||||
if (nouveau_override_conntype) {
|
||||
int type = divine_connector_type(bios, cte->index);
|
||||
if (type != cte->type)
|
||||
NV_WARN(dev, " -> type 0x%02x\n", cte->type);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,9 +72,10 @@ enum dcb_connector_type {
|
||||
};
|
||||
|
||||
struct dcb_connector_table_entry {
|
||||
uint8_t index;
|
||||
uint32_t entry;
|
||||
enum dcb_connector_type type;
|
||||
uint8_t index;
|
||||
uint8_t index2;
|
||||
uint8_t gpio_tag;
|
||||
};
|
||||
|
||||
|
@ -439,8 +439,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_SYSTEM);
|
||||
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
|
||||
break;
|
||||
default:
|
||||
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
|
||||
|
@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector)
|
||||
|
||||
detect_analog:
|
||||
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
|
||||
if (!nv_encoder)
|
||||
if (!nv_encoder && !nouveau_tv_disable)
|
||||
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
|
||||
if (nv_encoder) {
|
||||
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
|
||||
|
@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
|
||||
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
|
||||
|
||||
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
|
||||
|
||||
DRM_MEMORYBARRIER();
|
||||
/* Flush writes. */
|
||||
nouveau_bo_rd32(pb, 0);
|
||||
|
||||
nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
|
||||
chan->dma.ib_free--;
|
||||
}
|
||||
|
@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
|
||||
int nouveau_nofbaccel = 0;
|
||||
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
|
||||
int nouveau_override_conntype = 0;
|
||||
module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
|
||||
int nouveau_tv_disable = 0;
|
||||
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
|
||||
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
|
||||
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
|
||||
@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
|
||||
if (pm_state.event == PM_EVENT_PRETHAW)
|
||||
return 0;
|
||||
|
||||
NV_INFO(dev, "Disabling fbcon acceleration...\n");
|
||||
fbdev_flags = dev_priv->fbdev_info->flags;
|
||||
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
|
||||
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct nouveau_framebuffer *nouveau_fb;
|
||||
|
||||
|
@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds;
|
||||
extern int nouveau_vram_pushbuf;
|
||||
extern int nouveau_vram_notify;
|
||||
extern int nouveau_fbpercrtc;
|
||||
extern int nouveau_tv_disable;
|
||||
extern char *nouveau_tv_norm;
|
||||
extern int nouveau_reg_debug;
|
||||
extern char *nouveau_vbios;
|
||||
@ -688,6 +689,7 @@ extern int nouveau_ctxfw;
|
||||
extern int nouveau_ignorelid;
|
||||
extern int nouveau_nofbaccel;
|
||||
extern int nouveau_noaccel;
|
||||
extern int nouveau_override_conntype;
|
||||
|
||||
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
|
||||
extern int nouveau_pci_resume(struct pci_dev *pdev);
|
||||
@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *);
|
||||
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
|
||||
/* nv50_fb.c */
|
||||
extern int nv50_fb_init(struct drm_device *);
|
||||
extern void nv50_fb_takedown(struct drm_device *);
|
||||
|
||||
/* nv04_fifo.c */
|
||||
extern int nv04_fifo_init(struct drm_device *);
|
||||
extern void nv04_fifo_disable(struct drm_device *);
|
||||
|
@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value,
|
||||
#define nouveau_print_bitfield_names(val, namelist) \
|
||||
nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
|
||||
|
||||
struct nouveau_enum_names {
|
||||
uint32_t value;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_print_enum_names_(uint32_t value,
|
||||
const struct nouveau_enum_names *namelist,
|
||||
const int namelist_len)
|
||||
{
|
||||
/*
|
||||
* Caller must have already printed the KERN_* log level for us.
|
||||
* Also the caller is responsible for adding the newline.
|
||||
*/
|
||||
int i;
|
||||
for (i = 0; i < namelist_len; ++i) {
|
||||
if (value == namelist[i].value) {
|
||||
printk("%s", namelist[i].name);
|
||||
return;
|
||||
}
|
||||
}
|
||||
printk("unknown value 0x%08x", value);
|
||||
}
|
||||
#define nouveau_print_enum_names(val, namelist) \
|
||||
nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
|
||||
|
||||
static int
|
||||
nouveau_graph_chid_from_grctx(struct drm_device *dev)
|
||||
@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
|
||||
|
||||
NV_INFO(dev, "%s - nSource:", id);
|
||||
nouveau_print_bitfield_names(nsource, nsource_names);
|
||||
printk(", nStatus:");
|
||||
if (dev_priv->card_type < NV_10)
|
||||
nouveau_print_bitfield_names(nstatus, nstatus_names);
|
||||
else
|
||||
nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
|
||||
printk("\n");
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
NV_INFO(dev, "%s - nSource:", id);
|
||||
nouveau_print_bitfield_names(nsource, nsource_names);
|
||||
printk(", nStatus:");
|
||||
if (dev_priv->card_type < NV_10)
|
||||
nouveau_print_bitfield_names(nstatus, nstatus_names);
|
||||
else
|
||||
nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
|
||||
"Data 0x%08x:0x%08x\n",
|
||||
@ -577,28 +604,503 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
|
||||
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t trap[6];
|
||||
int i, ch;
|
||||
uint32_t idx = nv_rd32(dev, 0x100c90);
|
||||
if (idx & 0x80000000) {
|
||||
idx &= 0xffffff;
|
||||
if (display) {
|
||||
for (i = 0; i < 6; i++) {
|
||||
nv_wr32(dev, 0x100c90, idx | i << 24);
|
||||
trap[i] = nv_rd32(dev, 0x100c94);
|
||||
}
|
||||
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
|
||||
struct nouveau_channel *chan = dev_priv->fifos[ch];
|
||||
|
||||
if (!chan || !chan->ramin)
|
||||
continue;
|
||||
|
||||
if (trap[1] == chan->ramin->instance >> 12)
|
||||
break;
|
||||
}
|
||||
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
|
||||
name, (trap[5]&0x100?"read":"write"),
|
||||
trap[5]&0xff, trap[4]&0xffff,
|
||||
trap[3]&0xffff, trap[0], trap[2], ch);
|
||||
}
|
||||
nv_wr32(dev, 0x100c90, idx | 0x80000000);
|
||||
} else if (display) {
|
||||
NV_INFO(dev, "%s - no VM fault?\n", name);
|
||||
}
|
||||
}
|
||||
|
||||
static struct nouveau_enum_names nv50_mp_exec_error_names[] =
|
||||
{
|
||||
{ 3, "STACK_UNDERFLOW" },
|
||||
{ 4, "QUADON_ACTIVE" },
|
||||
{ 8, "TIMEOUT" },
|
||||
{ 0x10, "INVALID_OPCODE" },
|
||||
{ 0x40, "BREAKPOINT" },
|
||||
};
|
||||
|
||||
static void
|
||||
nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t units = nv_rd32(dev, 0x1540);
|
||||
uint32_t addr, mp10, status, pc, oplow, ophigh;
|
||||
int i;
|
||||
int mps = 0;
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (!(units & 1 << (i+24)))
|
||||
continue;
|
||||
if (dev_priv->chipset < 0xa0)
|
||||
addr = 0x408200 + (tpid << 12) + (i << 7);
|
||||
else
|
||||
addr = 0x408100 + (tpid << 11) + (i << 7);
|
||||
mp10 = nv_rd32(dev, addr + 0x10);
|
||||
status = nv_rd32(dev, addr + 0x14);
|
||||
if (!status)
|
||||
continue;
|
||||
if (display) {
|
||||
nv_rd32(dev, addr + 0x20);
|
||||
pc = nv_rd32(dev, addr + 0x24);
|
||||
oplow = nv_rd32(dev, addr + 0x70);
|
||||
ophigh= nv_rd32(dev, addr + 0x74);
|
||||
NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
|
||||
"TP %d MP %d: ", tpid, i);
|
||||
nouveau_print_enum_names(status,
|
||||
nv50_mp_exec_error_names);
|
||||
printk(" at %06x warp %d, opcode %08x %08x\n",
|
||||
pc&0xffffff, pc >> 24,
|
||||
oplow, ophigh);
|
||||
}
|
||||
nv_wr32(dev, addr + 0x10, mp10);
|
||||
nv_wr32(dev, addr + 0x14, 0);
|
||||
mps++;
|
||||
}
|
||||
if (!mps && display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
|
||||
"No MPs claiming errors?\n", tpid);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
|
||||
uint32_t ustatus_new, int display, const char *name)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int tps = 0;
|
||||
uint32_t units = nv_rd32(dev, 0x1540);
|
||||
int i, r;
|
||||
uint32_t ustatus_addr, ustatus;
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (!(units & (1 << i)))
|
||||
continue;
|
||||
if (dev_priv->chipset < 0xa0)
|
||||
ustatus_addr = ustatus_old + (i << 12);
|
||||
else
|
||||
ustatus_addr = ustatus_new + (i << 11);
|
||||
ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
|
||||
if (!ustatus)
|
||||
continue;
|
||||
tps++;
|
||||
switch (type) {
|
||||
case 6: /* texture error... unknown for now */
|
||||
nv50_pfb_vm_trap(dev, display, name);
|
||||
if (display) {
|
||||
NV_ERROR(dev, "magic set %d:\n", i);
|
||||
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
}
|
||||
break;
|
||||
case 7: /* MP error */
|
||||
if (ustatus & 0x00010000) {
|
||||
nv50_pgraph_mp_trap(dev, i, display);
|
||||
ustatus &= ~0x00010000;
|
||||
}
|
||||
break;
|
||||
case 8: /* TPDMA error */
|
||||
{
|
||||
uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
|
||||
uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
|
||||
uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
|
||||
uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
|
||||
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
|
||||
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
|
||||
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
|
||||
nv50_pfb_vm_trap(dev, display, name);
|
||||
/* 2d engine destination */
|
||||
if (ustatus & 0x00000010) {
|
||||
if (display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
|
||||
i, e14, e10);
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
|
||||
i, e0c, e18, e1c, e20, e24);
|
||||
}
|
||||
ustatus &= ~0x00000010;
|
||||
}
|
||||
/* Render target */
|
||||
if (ustatus & 0x00000040) {
|
||||
if (display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
|
||||
i, e14, e10);
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
|
||||
i, e0c, e18, e1c, e20, e24);
|
||||
}
|
||||
ustatus &= ~0x00000040;
|
||||
}
|
||||
/* CUDA memory: l[], g[] or stack. */
|
||||
if (ustatus & 0x00000080) {
|
||||
if (display) {
|
||||
if (e18 & 0x80000000) {
|
||||
/* g[] read fault? */
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
|
||||
i, e14, e10 | ((e18 >> 24) & 0x1f));
|
||||
e18 &= ~0x1f000000;
|
||||
} else if (e18 & 0xc) {
|
||||
/* g[] write fault? */
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
|
||||
i, e14, e10 | ((e18 >> 7) & 0x1f));
|
||||
e18 &= ~0x00000f80;
|
||||
} else {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
|
||||
i, e14, e10);
|
||||
}
|
||||
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
|
||||
i, e0c, e18, e1c, e20, e24);
|
||||
}
|
||||
ustatus &= ~0x00000080;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (ustatus) {
|
||||
if (display)
|
||||
NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
|
||||
}
|
||||
nv_wr32(dev, ustatus_addr, 0xc0000000);
|
||||
}
|
||||
|
||||
if (!tps && display)
|
||||
NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_pgraph_trap_handler(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_pgraph_trap trap;
|
||||
uint32_t status = nv_rd32(dev, 0x400108);
|
||||
uint32_t ustatus;
|
||||
int display = nouveau_ratelimit();
|
||||
|
||||
|
||||
if (!status && display) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
|
||||
NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
|
||||
}
|
||||
|
||||
/* DISPATCH: Relays commands to other units and handles NOTIFY,
|
||||
* COND, QUERY. If you get a trap from it, the command is still stuck
|
||||
* in DISPATCH and you need to do something about it. */
|
||||
if (status & 0x001) {
|
||||
ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
|
||||
if (!ustatus && display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
|
||||
}
|
||||
|
||||
/* Known to be triggered by screwed up NOTIFY and COND... */
|
||||
if (ustatus & 0x00000001) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
|
||||
nv_wr32(dev, 0x400500, 0);
|
||||
if (nv_rd32(dev, 0x400808) & 0x80000000) {
|
||||
if (display) {
|
||||
if (nouveau_graph_trapped_channel(dev, &trap.channel))
|
||||
trap.channel = -1;
|
||||
trap.class = nv_rd32(dev, 0x400814);
|
||||
trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
|
||||
trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
|
||||
trap.data = nv_rd32(dev, 0x40080c);
|
||||
trap.data2 = nv_rd32(dev, 0x400810);
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
|
||||
}
|
||||
nv_wr32(dev, 0x400808, 0);
|
||||
} else if (display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
|
||||
}
|
||||
nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
|
||||
nv_wr32(dev, 0x400848, 0);
|
||||
ustatus &= ~0x00000001;
|
||||
}
|
||||
if (ustatus & 0x00000002) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
|
||||
nv_wr32(dev, 0x400500, 0);
|
||||
if (nv_rd32(dev, 0x40084c) & 0x80000000) {
|
||||
if (display) {
|
||||
if (nouveau_graph_trapped_channel(dev, &trap.channel))
|
||||
trap.channel = -1;
|
||||
trap.class = nv_rd32(dev, 0x400814);
|
||||
trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
|
||||
trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
|
||||
trap.data = nv_rd32(dev, 0x40085c);
|
||||
trap.data2 = 0;
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
|
||||
}
|
||||
nv_wr32(dev, 0x40084c, 0);
|
||||
} else if (display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
|
||||
}
|
||||
ustatus &= ~0x00000002;
|
||||
}
|
||||
if (ustatus && display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
|
||||
nv_wr32(dev, 0x400804, 0xc0000000);
|
||||
nv_wr32(dev, 0x400108, 0x001);
|
||||
status &= ~0x001;
|
||||
}
|
||||
|
||||
/* TRAPs other than dispatch use the "normal" trap regs. */
|
||||
if (status && display) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_TRAP", &trap);
|
||||
}
|
||||
|
||||
/* M2MF: Memory to memory copy engine. */
|
||||
if (status & 0x002) {
|
||||
ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
|
||||
if (!ustatus && display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
|
||||
}
|
||||
if (ustatus & 0x00000001) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
|
||||
ustatus &= ~0x00000001;
|
||||
}
|
||||
if (ustatus & 0x00000002) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
|
||||
ustatus &= ~0x00000002;
|
||||
}
|
||||
if (ustatus & 0x00000004) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
|
||||
ustatus &= ~0x00000004;
|
||||
}
|
||||
NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
|
||||
nv_rd32(dev, 0x406804),
|
||||
nv_rd32(dev, 0x406808),
|
||||
nv_rd32(dev, 0x40680c),
|
||||
nv_rd32(dev, 0x406810));
|
||||
if (ustatus && display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
|
||||
/* No sane way found yet -- just reset the bugger. */
|
||||
nv_wr32(dev, 0x400040, 2);
|
||||
nv_wr32(dev, 0x400040, 0);
|
||||
nv_wr32(dev, 0x406800, 0xc0000000);
|
||||
nv_wr32(dev, 0x400108, 0x002);
|
||||
status &= ~0x002;
|
||||
}
|
||||
|
||||
/* VFETCH: Fetches data from vertex buffers. */
|
||||
if (status & 0x004) {
|
||||
ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
|
||||
if (!ustatus && display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
|
||||
}
|
||||
if (ustatus & 0x00000001) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
|
||||
NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
|
||||
nv_rd32(dev, 0x400c00),
|
||||
nv_rd32(dev, 0x400c08),
|
||||
nv_rd32(dev, 0x400c0c),
|
||||
nv_rd32(dev, 0x400c10));
|
||||
ustatus &= ~0x00000001;
|
||||
}
|
||||
if (ustatus && display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
|
||||
nv_wr32(dev, 0x400c04, 0xc0000000);
|
||||
nv_wr32(dev, 0x400108, 0x004);
|
||||
status &= ~0x004;
|
||||
}
|
||||
|
||||
/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
|
||||
if (status & 0x008) {
|
||||
ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
|
||||
if (!ustatus && display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
|
||||
}
|
||||
if (ustatus & 0x00000001) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
|
||||
NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
|
||||
nv_rd32(dev, 0x401804),
|
||||
nv_rd32(dev, 0x401808),
|
||||
nv_rd32(dev, 0x40180c),
|
||||
nv_rd32(dev, 0x401810));
|
||||
ustatus &= ~0x00000001;
|
||||
}
|
||||
if (ustatus && display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
|
||||
/* No sane way found yet -- just reset the bugger. */
|
||||
nv_wr32(dev, 0x400040, 0x80);
|
||||
nv_wr32(dev, 0x400040, 0);
|
||||
nv_wr32(dev, 0x401800, 0xc0000000);
|
||||
nv_wr32(dev, 0x400108, 0x008);
|
||||
status &= ~0x008;
|
||||
}
|
||||
|
||||
/* CCACHE: Handles code and c[] caches and fills them. */
|
||||
if (status & 0x010) {
|
||||
ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
|
||||
if (!ustatus && display) {
|
||||
NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
|
||||
}
|
||||
if (ustatus & 0x00000001) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
|
||||
NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
nv_rd32(dev, 0x405800),
|
||||
nv_rd32(dev, 0x405804),
|
||||
nv_rd32(dev, 0x405808),
|
||||
nv_rd32(dev, 0x40580c),
|
||||
nv_rd32(dev, 0x405810),
|
||||
nv_rd32(dev, 0x405814),
|
||||
nv_rd32(dev, 0x40581c));
|
||||
ustatus &= ~0x00000001;
|
||||
}
|
||||
if (ustatus && display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
|
||||
nv_wr32(dev, 0x405018, 0xc0000000);
|
||||
nv_wr32(dev, 0x400108, 0x010);
|
||||
status &= ~0x010;
|
||||
}
|
||||
|
||||
/* Unknown, not seen yet... 0x402000 is the only trap status reg
|
||||
* remaining, so try to handle it anyway. Perhaps related to that
|
||||
* unknown DMA slot on tesla? */
|
||||
if (status & 0x20) {
|
||||
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
|
||||
ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
|
||||
if (display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
|
||||
nv_wr32(dev, 0x402000, 0xc0000000);
|
||||
/* no status modifiction on purpose */
|
||||
}
|
||||
|
||||
/* TEXTURE: CUDA texturing units */
|
||||
if (status & 0x040) {
|
||||
nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
|
||||
"PGRAPH_TRAP_TEXTURE");
|
||||
nv_wr32(dev, 0x400108, 0x040);
|
||||
status &= ~0x040;
|
||||
}
|
||||
|
||||
/* MP: CUDA execution engines. */
|
||||
if (status & 0x080) {
|
||||
nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
|
||||
"PGRAPH_TRAP_MP");
|
||||
nv_wr32(dev, 0x400108, 0x080);
|
||||
status &= ~0x080;
|
||||
}
|
||||
|
||||
/* TPDMA: Handles TP-initiated uncached memory accesses:
|
||||
* l[], g[], stack, 2d surfaces, render targets. */
|
||||
if (status & 0x100) {
|
||||
nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
|
||||
"PGRAPH_TRAP_TPDMA");
|
||||
nv_wr32(dev, 0x400108, 0x100);
|
||||
status &= ~0x100;
|
||||
}
|
||||
|
||||
if (status) {
|
||||
if (display)
|
||||
NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
|
||||
status);
|
||||
nv_wr32(dev, 0x400108, status);
|
||||
}
|
||||
}
|
||||
|
||||
/* There must be a *lot* of these. Will take some time to gather them up. */
|
||||
static struct nouveau_enum_names nv50_data_error_names[] =
|
||||
{
|
||||
{ 4, "INVALID_VALUE" },
|
||||
{ 5, "INVALID_ENUM" },
|
||||
{ 8, "INVALID_OBJECT" },
|
||||
{ 0xc, "INVALID_BITFIELD" },
|
||||
{ 0x28, "MP_NO_REG_SPACE" },
|
||||
{ 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
|
||||
};
|
||||
|
||||
static void
|
||||
nv50_pgraph_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_pgraph_trap trap;
|
||||
int unhandled = 0;
|
||||
uint32_t status;
|
||||
|
||||
while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
|
||||
uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
|
||||
|
||||
/* NOTIFY: You've set a NOTIFY an a command and it's done. */
|
||||
if (status & 0x00000001) {
|
||||
nouveau_pgraph_intr_notify(dev, nsource);
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_NOTIFY", &trap);
|
||||
status &= ~0x00000001;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
|
||||
}
|
||||
|
||||
if (status & 0x00000010) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
|
||||
/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
|
||||
* when you write 0x200 to 0x50c0 method 0x31c. */
|
||||
if (status & 0x00000002) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_COMPUTE_QUERY", &trap);
|
||||
status &= ~0x00000002;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
|
||||
}
|
||||
|
||||
/* Unknown, never seen: 0x4 */
|
||||
|
||||
/* ILLEGAL_MTHD: You used a wrong method for this class. */
|
||||
if (status & 0x00000010) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_pgraph_intr_swmthd(dev, &trap))
|
||||
unhandled = 1;
|
||||
if (unhandled && nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_ILLEGAL_MTHD", &trap);
|
||||
status &= ~0x00000010;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
|
||||
}
|
||||
|
||||
/* ILLEGAL_CLASS: You used a wrong class. */
|
||||
if (status & 0x00000020) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_ILLEGAL_CLASS", &trap);
|
||||
status &= ~0x00000020;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
|
||||
}
|
||||
|
||||
/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
|
||||
if (status & 0x00000040) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_DOUBLE_NOTIFY", &trap);
|
||||
status &= ~0x00000040;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
|
||||
}
|
||||
|
||||
/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
|
||||
if (status & 0x00001000) {
|
||||
nv_wr32(dev, 0x400500, 0x00000000);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR,
|
||||
@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
|
||||
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
|
||||
}
|
||||
|
||||
if (status & 0x00100000) {
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_DATA_ERROR);
|
||||
/* BUFFER_NOTIFY: Your m2mf transfer finished */
|
||||
if (status & 0x00010000) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_BUFFER_NOTIFY", &trap);
|
||||
status &= ~0x00010000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
|
||||
}
|
||||
|
||||
/* DATA_ERROR: Invalid value for this method, or invalid
|
||||
* state in current PGRAPH context for this operation */
|
||||
if (status & 0x00100000) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit()) {
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_DATA_ERROR", &trap);
|
||||
NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
|
||||
nouveau_print_enum_names(nv_rd32(dev, 0x400110),
|
||||
nv50_data_error_names);
|
||||
printk("\n");
|
||||
}
|
||||
status &= ~0x00100000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
|
||||
}
|
||||
|
||||
/* TRAP: Something bad happened in the middle of command
|
||||
* execution. Has a billion types, subtypes, and even
|
||||
* subsubtypes. */
|
||||
if (status & 0x00200000) {
|
||||
int r;
|
||||
|
||||
nouveau_pgraph_intr_error(dev, nsource |
|
||||
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
|
||||
|
||||
NV_ERROR(dev, "magic set 1:\n");
|
||||
for (r = 0x408900; r <= 0x408910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408900,
|
||||
nv_rd32(dev, 0x408904) | 0xc0000000);
|
||||
for (r = 0x408e08; r <= 0x408e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x408e08,
|
||||
nv_rd32(dev, 0x408e08) | 0xc0000000);
|
||||
|
||||
NV_ERROR(dev, "magic set 2:\n");
|
||||
for (r = 0x409900; r <= 0x409910; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409900,
|
||||
nv_rd32(dev, 0x409904) | 0xc0000000);
|
||||
for (r = 0x409e08; r <= 0x409e24; r += 4)
|
||||
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
|
||||
nv_rd32(dev, r));
|
||||
nv_wr32(dev, 0x409e08,
|
||||
nv_rd32(dev, 0x409e08) | 0xc0000000);
|
||||
|
||||
nv50_pgraph_trap_handler(dev);
|
||||
status &= ~0x00200000;
|
||||
nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
|
||||
}
|
||||
|
||||
/* Unknown, never seen: 0x00400000 */
|
||||
|
||||
/* SINGLE_STEP: Happens on every method if you turned on
|
||||
* single stepping in 40008c */
|
||||
if (status & 0x01000000) {
|
||||
nouveau_graph_trap_info(dev, &trap);
|
||||
if (nouveau_ratelimit())
|
||||
nouveau_graph_dump_trap_info(dev,
|
||||
"PGRAPH_SINGLE_STEP", &trap);
|
||||
status &= ~0x01000000;
|
||||
nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
|
||||
}
|
||||
|
||||
/* 0x02000000 happens when you pause a ctxprog...
|
||||
* but the only way this can happen that I know is by
|
||||
* poking the relevant MMIO register, and we don't
|
||||
* do that. */
|
||||
|
||||
if (status) {
|
||||
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
|
||||
status);
|
||||
@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
if (nv_rd32(dev, 0x400824) & (1 << 31))
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include "nouveau_drm.h"
|
||||
#include "nv50_display.h"
|
||||
|
||||
static int nouveau_stub_init(struct drm_device *dev) { return 0; }
|
||||
static void nouveau_stub_takedown(struct drm_device *dev) {}
|
||||
|
||||
static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
@ -277,8 +276,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->timer.init = nv04_timer_init;
|
||||
engine->timer.read = nv04_timer_read;
|
||||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nouveau_stub_init;
|
||||
engine->fb.takedown = nouveau_stub_takedown;
|
||||
engine->fb.init = nv50_fb_init;
|
||||
engine->fb.takedown = nv50_fb_takedown;
|
||||
engine->graph.grclass = nv50_graph_grclass;
|
||||
engine->graph.init = nv50_graph_init;
|
||||
engine->graph.takedown = nv50_graph_takedown;
|
||||
|
@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
struct drm_framebuffer *fb = crtc->fb;
|
||||
|
||||
/* Calculate our timings */
|
||||
int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
|
||||
int horizStart = (mode->crtc_hsync_start >> 3) - 1;
|
||||
int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
|
||||
int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
|
||||
int horizStart = (mode->crtc_hsync_start >> 3) + 1;
|
||||
int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
|
||||
int horizTotal = (mode->crtc_htotal >> 3) - 5;
|
||||
int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
|
||||
int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
|
||||
|
@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
return;
|
||||
}
|
||||
|
||||
width = ALIGN(image->width, 32);
|
||||
dsize = (width * image->height) >> 5;
|
||||
width = ALIGN(image->width, 8);
|
||||
dsize = ALIGN(width * image->height, 32) >> 5;
|
||||
|
||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
|
||||
@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
((image->dx + image->width) & 0xffff));
|
||||
OUT_RING(chan, bg);
|
||||
OUT_RING(chan, fg);
|
||||
OUT_RING(chan, (image->height << 16) | image->width);
|
||||
OUT_RING(chan, (image->height << 16) | width);
|
||||
OUT_RING(chan, (image->height << 16) | image->width);
|
||||
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
|
||||
|
||||
while (dsize) {
|
||||
|
@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev)
|
||||
}
|
||||
|
||||
for (i = 0 ; i < dcb->connector.entries; i++) {
|
||||
if (i != 0 && dcb->connector.entry[i].index ==
|
||||
dcb->connector.entry[i - 1].index)
|
||||
if (i != 0 && dcb->connector.entry[i].index2 ==
|
||||
dcb->connector.entry[i - 1].index2)
|
||||
continue;
|
||||
nouveau_connector_create(dev, &dcb->connector.entry[i]);
|
||||
}
|
||||
|
32
drivers/gpu/drm/nouveau/nv50_fb.c
Normal file
32
drivers/gpu/drm/nouveau/nv50_fb.c
Normal file
@ -0,0 +1,32 @@
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
|
||||
int
|
||||
nv50_fb_init(struct drm_device *dev)
|
||||
{
|
||||
/* This is needed to get meaningful information from 100c90
|
||||
* on traps. No idea what these values mean exactly. */
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x50:
|
||||
nv_wr32(dev, 0x100c90, 0x0707ff);
|
||||
break;
|
||||
case 0xa5:
|
||||
case 0xa8:
|
||||
nv_wr32(dev, 0x100c90, 0x0d0fff);
|
||||
break;
|
||||
default:
|
||||
nv_wr32(dev, 0x100c90, 0x1d07ff);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv50_fb_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
@ -233,7 +233,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
|
||||
BEGIN_RING(chan, NvSub2D, 0x0808, 3);
|
||||
OUT_RING(chan, 0);
|
||||
OUT_RING(chan, 0);
|
||||
OUT_RING(chan, 0);
|
||||
OUT_RING(chan, 1);
|
||||
BEGIN_RING(chan, NvSub2D, 0x081c, 1);
|
||||
OUT_RING(chan, 1);
|
||||
BEGIN_RING(chan, NvSub2D, 0x0840, 4);
|
||||
|
@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev)
|
||||
static void
|
||||
nv50_graph_init_regs__nv(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t units = nv_rd32(dev, 0x1540);
|
||||
int i;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
nv_wr32(dev, 0x400804, 0xc0000000);
|
||||
@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
|
||||
nv_wr32(dev, 0x405018, 0xc0000000);
|
||||
nv_wr32(dev, 0x402000, 0xc0000000);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (units & 1 << i) {
|
||||
if (dev_priv->chipset < 0xa0) {
|
||||
nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
|
||||
nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
|
||||
nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
|
||||
} else {
|
||||
nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
|
||||
nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
|
||||
nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nv_wr32(dev, 0x400108, 0xffffffff);
|
||||
|
||||
nv_wr32(dev, 0x400824, 0x00004000);
|
||||
@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
|
||||
nouveau_grctx_vals_load(dev, ctx);
|
||||
}
|
||||
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
|
||||
if ((dev_priv->chipset & 0xf0) == 0xa0)
|
||||
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
|
||||
else
|
||||
nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -64,6 +64,9 @@
|
||||
#define CP_FLAG_ALWAYS ((2 * 32) + 13)
|
||||
#define CP_FLAG_ALWAYS_FALSE 0
|
||||
#define CP_FLAG_ALWAYS_TRUE 1
|
||||
#define CP_FLAG_INTR ((2 * 32) + 15)
|
||||
#define CP_FLAG_INTR_NOT_PENDING 0
|
||||
#define CP_FLAG_INTR_PENDING 1
|
||||
|
||||
#define CP_CTX 0x00100000
|
||||
#define CP_CTX_COUNT 0x000f0000
|
||||
@ -214,6 +217,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
|
||||
cp_name(ctx, cp_setup_save);
|
||||
cp_set (ctx, UNK1D, SET);
|
||||
cp_wait(ctx, STATUS, BUSY);
|
||||
cp_wait(ctx, INTR, PENDING);
|
||||
cp_bra (ctx, STATUS, BUSY, cp_setup_save);
|
||||
cp_set (ctx, UNK01, SET);
|
||||
cp_set (ctx, SWAP_DIRECTION, SAVE);
|
||||
|
||||
@ -269,7 +274,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
|
||||
int offset, base;
|
||||
uint32_t units = nv_rd32 (ctx->dev, 0x1540);
|
||||
|
||||
/* 0800 */
|
||||
/* 0800: DISPATCH */
|
||||
cp_ctx(ctx, 0x400808, 7);
|
||||
gr_def(ctx, 0x400814, 0x00000030);
|
||||
cp_ctx(ctx, 0x400834, 0x32);
|
||||
@ -300,7 +305,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
|
||||
gr_def(ctx, 0x400b20, 0x0001629d);
|
||||
}
|
||||
|
||||
/* 0C00 */
|
||||
/* 0C00: VFETCH */
|
||||
cp_ctx(ctx, 0x400c08, 0x2);
|
||||
gr_def(ctx, 0x400c08, 0x0000fe0c);
|
||||
|
||||
@ -326,7 +331,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
|
||||
cp_ctx(ctx, 0x401540, 0x5);
|
||||
gr_def(ctx, 0x401550, 0x00001018);
|
||||
|
||||
/* 1800 */
|
||||
/* 1800: STREAMOUT */
|
||||
cp_ctx(ctx, 0x401814, 0x1);
|
||||
gr_def(ctx, 0x401814, 0x000000ff);
|
||||
if (dev_priv->chipset == 0x50) {
|
||||
@ -641,7 +646,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
|
||||
if (dev_priv->chipset == 0x50)
|
||||
cp_ctx(ctx, 0x4063e0, 0x1);
|
||||
|
||||
/* 6800 */
|
||||
/* 6800: M2MF */
|
||||
if (dev_priv->chipset < 0x90) {
|
||||
cp_ctx(ctx, 0x406814, 0x2b);
|
||||
gr_def(ctx, 0x406818, 0x00000f80);
|
||||
|
@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
|
||||
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
|
||||
radeon_irq.o r300_cmdbuf.o r600_cp.o
|
||||
# add KMS driver
|
||||
radeon-y += radeon_device.o radeon_kms.o \
|
||||
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
||||
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
|
||||
atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
|
||||
radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
|
||||
|
@ -52,15 +52,17 @@
|
||||
|
||||
typedef struct {
|
||||
struct atom_context *ctx;
|
||||
|
||||
uint32_t *ps, *ws;
|
||||
int ps_shift;
|
||||
uint16_t start;
|
||||
unsigned last_jump;
|
||||
unsigned long last_jump_jiffies;
|
||||
bool abort;
|
||||
} atom_exec_context;
|
||||
|
||||
int atom_debug = 0;
|
||||
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
|
||||
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
|
||||
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
|
||||
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
|
||||
|
||||
static uint32_t atom_arg_mask[8] =
|
||||
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
|
||||
@ -604,12 +606,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
|
||||
static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
int idx = U8((*ptr)++);
|
||||
int r = 0;
|
||||
|
||||
if (idx < ATOM_TABLE_NAMES_CNT)
|
||||
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
|
||||
else
|
||||
SDEBUG(" table: %d\n", idx);
|
||||
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
|
||||
atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
|
||||
r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
|
||||
if (r) {
|
||||
ctx->abort = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
|
||||
@ -673,6 +680,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
|
||||
static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
int execute = 0, target = U16(*ptr);
|
||||
unsigned long cjiffies;
|
||||
|
||||
(*ptr) += 2;
|
||||
switch (arg) {
|
||||
case ATOM_COND_ABOVE:
|
||||
@ -700,8 +709,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
|
||||
if (arg != ATOM_COND_ALWAYS)
|
||||
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
|
||||
SDEBUG(" target: 0x%04X\n", target);
|
||||
if (execute)
|
||||
if (execute) {
|
||||
if (ctx->last_jump == (ctx->start + target)) {
|
||||
cjiffies = jiffies;
|
||||
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
|
||||
cjiffies -= ctx->last_jump_jiffies;
|
||||
if ((jiffies_to_msecs(cjiffies) > 1000)) {
|
||||
DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
|
||||
ctx->abort = true;
|
||||
}
|
||||
} else {
|
||||
/* jiffies wrap around we will just wait a little longer */
|
||||
ctx->last_jump_jiffies = jiffies;
|
||||
}
|
||||
} else {
|
||||
ctx->last_jump = ctx->start + target;
|
||||
ctx->last_jump_jiffies = jiffies;
|
||||
}
|
||||
*ptr = ctx->start + target;
|
||||
}
|
||||
}
|
||||
|
||||
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
|
||||
@ -1104,7 +1130,7 @@ static struct {
|
||||
atom_op_shr, ATOM_ARG_MC}, {
|
||||
atom_op_debug, 0},};
|
||||
|
||||
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
|
||||
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
|
||||
{
|
||||
int base = CU16(ctx->cmd_table + 4 + 2 * index);
|
||||
int len, ws, ps, ptr;
|
||||
@ -1112,7 +1138,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
|
||||
atom_exec_context ectx;
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
return -EINVAL;
|
||||
|
||||
len = CU16(base + ATOM_CT_SIZE_PTR);
|
||||
ws = CU8(base + ATOM_CT_WS_PTR);
|
||||
@ -1125,6 +1151,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
|
||||
ectx.ps_shift = ps / 4;
|
||||
ectx.start = base;
|
||||
ectx.ps = params;
|
||||
ectx.abort = false;
|
||||
ectx.last_jump = 0;
|
||||
if (ws)
|
||||
ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
|
||||
else
|
||||
@ -1137,6 +1165,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
|
||||
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
|
||||
else
|
||||
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
|
||||
if (ectx.abort) {
|
||||
DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
|
||||
base, len, ws, ps, ptr - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (op < ATOM_OP_CNT && op > 0)
|
||||
opcode_table[op].func(&ectx, &ptr,
|
||||
@ -1152,10 +1185,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
|
||||
|
||||
if (ws)
|
||||
kfree(ectx.ws);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
|
||||
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&ctx->mutex);
|
||||
/* reset reg block */
|
||||
ctx->reg_block = 0;
|
||||
@ -1163,8 +1199,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
|
||||
ctx->fb_base = 0;
|
||||
/* reset io mode */
|
||||
ctx->io_mode = ATOM_IO_MM;
|
||||
atom_execute_table_locked(ctx, index, params);
|
||||
r = atom_execute_table_locked(ctx, index, params);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
|
||||
@ -1248,9 +1285,7 @@ int atom_asic_init(struct atom_context *ctx)
|
||||
|
||||
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
|
||||
return 1;
|
||||
atom_execute_table(ctx, ATOM_CMD_INIT, ps);
|
||||
|
||||
return 0;
|
||||
return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
|
||||
}
|
||||
|
||||
void atom_destroy(struct atom_context *ctx)
|
||||
@ -1260,12 +1295,16 @@ void atom_destroy(struct atom_context *ctx)
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
void atom_parse_data_header(struct atom_context *ctx, int index,
|
||||
bool atom_parse_data_header(struct atom_context *ctx, int index,
|
||||
uint16_t * size, uint8_t * frev, uint8_t * crev,
|
||||
uint16_t * data_start)
|
||||
{
|
||||
int offset = index * 2 + 4;
|
||||
int idx = CU16(ctx->data_table + offset);
|
||||
u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
|
||||
|
||||
if (!mdt[index])
|
||||
return false;
|
||||
|
||||
if (size)
|
||||
*size = CU16(idx);
|
||||
@ -1274,38 +1313,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
|
||||
if (crev)
|
||||
*crev = CU8(idx + 3);
|
||||
*data_start = idx;
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
|
||||
bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
|
||||
uint8_t * crev)
|
||||
{
|
||||
int offset = index * 2 + 4;
|
||||
int idx = CU16(ctx->cmd_table + offset);
|
||||
u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
|
||||
|
||||
if (!mct[index])
|
||||
return false;
|
||||
|
||||
if (frev)
|
||||
*frev = CU8(idx + 2);
|
||||
if (crev)
|
||||
*crev = CU8(idx + 3);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
int atom_allocate_fb_scratch(struct atom_context *ctx)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
|
||||
uint16_t data_offset;
|
||||
int usage_bytes;
|
||||
int usage_bytes = 0;
|
||||
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
|
||||
|
||||
atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
|
||||
if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
|
||||
|
||||
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
|
||||
DRM_DEBUG("atom firmware requested %08x %dkb\n",
|
||||
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
|
||||
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
|
||||
|
||||
DRM_DEBUG("atom firmware requested %08x %dkb\n",
|
||||
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
|
||||
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
|
||||
|
||||
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
|
||||
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
|
||||
}
|
||||
if (usage_bytes == 0)
|
||||
usage_bytes = 20 * 1024;
|
||||
/* allocate some scratch memory */
|
||||
|
@ -140,11 +140,13 @@ struct atom_context {
|
||||
extern int atom_debug;
|
||||
|
||||
struct atom_context *atom_parse(struct card_info *, void *);
|
||||
void atom_execute_table(struct atom_context *, int, uint32_t *);
|
||||
int atom_execute_table(struct atom_context *, int, uint32_t *);
|
||||
int atom_asic_init(struct atom_context *);
|
||||
void atom_destroy(struct atom_context *);
|
||||
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
|
||||
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
|
||||
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
|
||||
uint8_t *frev, uint8_t *crev, uint16_t *data_start);
|
||||
bool atom_parse_cmd_header(struct atom_context *ctx, int index,
|
||||
uint8_t *frev, uint8_t *crev);
|
||||
int atom_allocate_fb_scratch(struct atom_context *ctx);
|
||||
#include "atom-types.h"
|
||||
#include "atombios.h"
|
||||
|
@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
static void atombios_disable_ss(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
u32 ss_cntl;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
switch (radeon_crtc->pll_id) {
|
||||
case ATOM_PPLL1:
|
||||
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
|
||||
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
|
||||
WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
|
||||
break;
|
||||
case ATOM_PPLL2:
|
||||
ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
|
||||
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
|
||||
WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
|
||||
break;
|
||||
case ATOM_DCPLL:
|
||||
case ATOM_PPLL_INVALID:
|
||||
return;
|
||||
}
|
||||
} else if (ASIC_IS_AVIVO(rdev)) {
|
||||
switch (radeon_crtc->pll_id) {
|
||||
case ATOM_PPLL1:
|
||||
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
|
||||
ss_cntl &= ~1;
|
||||
WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
|
||||
break;
|
||||
case ATOM_PPLL2:
|
||||
ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
|
||||
ss_cntl &= ~1;
|
||||
WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
|
||||
break;
|
||||
case ATOM_DCPLL:
|
||||
case ATOM_PPLL_INVALID:
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
union atom_enable_ss {
|
||||
ENABLE_LVDS_SS_PARAMETERS legacy;
|
||||
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
|
||||
};
|
||||
|
||||
static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
static void atombios_enable_ss(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
step = dig->ss->step;
|
||||
delay = dig->ss->delay;
|
||||
range = dig->ss->range;
|
||||
} else if (enable)
|
||||
} else
|
||||
return;
|
||||
} else if (enable)
|
||||
} else
|
||||
return;
|
||||
break;
|
||||
}
|
||||
@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
|
||||
args.v1.ucSpreadSpectrumDelay = delay;
|
||||
args.v1.ucSpreadSpectrumRange = range;
|
||||
args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
|
||||
args.v1.ucEnable = enable;
|
||||
args.v1.ucEnable = ATOM_ENABLE;
|
||||
} else {
|
||||
args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
|
||||
args.legacy.ucSpreadSpectrumType = type;
|
||||
args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
|
||||
args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
|
||||
args.legacy.ucEnable = enable;
|
||||
args.legacy.ucEnable = ATOM_ENABLE;
|
||||
}
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
@ -478,11 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
|
||||
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
|
||||
adjusted_clock = mode->clock * 2;
|
||||
/* LVDS PLL quirks */
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
pll->algo = dig->pll_algo;
|
||||
}
|
||||
} else {
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
|
||||
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
@ -503,8 +541,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
int index;
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev))
|
||||
return adjusted_clock;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
@ -542,11 +581,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||
}
|
||||
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
/* may want to enable SS on DP/eDP eventually */
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_SS_ENABLE;
|
||||
if (mode->clock > 165000)
|
||||
/*args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_SS_ENABLE;*/
|
||||
if (encoder_mode == ATOM_ENCODER_MODE_DP)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_DUAL_LINK;
|
||||
DISPPLL_CONFIG_COHERENT_MODE;
|
||||
else {
|
||||
if (mode->clock > 165000)
|
||||
args.v3.sInput.ucDispPllConfig |=
|
||||
DISPPLL_CONFIG_DUAL_LINK;
|
||||
}
|
||||
}
|
||||
atom_execute_table(rdev->mode_info.atom_context,
|
||||
index, (uint32_t *)&args);
|
||||
@ -592,8 +636,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev))
|
||||
return;
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
@ -667,8 +712,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
|
||||
&ref_div, &post_div);
|
||||
|
||||
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
|
||||
&crev))
|
||||
return;
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
@ -1083,15 +1129,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
/* TODO color tiling */
|
||||
|
||||
/* pick pll */
|
||||
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
|
||||
|
||||
atombios_set_ss(crtc, 0);
|
||||
atombios_disable_ss(crtc);
|
||||
/* always set DCPLL */
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_crtc_set_dcpll(crtc);
|
||||
atombios_crtc_set_pll(crtc, adjusted_mode);
|
||||
atombios_set_ss(crtc, 1);
|
||||
atombios_enable_ss(crtc);
|
||||
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
@ -1120,6 +1163,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
|
||||
static void atombios_crtc_prepare(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
|
||||
/* pick pll */
|
||||
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
|
||||
|
||||
atombios_lock_crtc(crtc, ATOM_ENABLE);
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder,
|
||||
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
|
||||
|
||||
/* disable the training pattern on the sink */
|
||||
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
|
||||
|
||||
/* disable the training pattern on the source */
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
|
||||
else
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
|
||||
dig_connector->dp_clock, enc_id, 0);
|
||||
|
||||
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
|
||||
dig_connector->dp_clock, enc_id, 0);
|
||||
}
|
||||
|
||||
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "rv770d.h"
|
||||
#include "atom.h"
|
||||
@ -436,7 +437,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
int evergreen_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
@ -481,12 +481,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
}
|
||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -746,6 +742,7 @@ int evergreen_init(struct radeon_device *rdev)
|
||||
|
||||
void evergreen_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
evergreen_suspend(rdev);
|
||||
#if 0
|
||||
r600_blit_fini(rdev);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "r100d.h"
|
||||
#include "rs100d.h"
|
||||
#include "rv200d.h"
|
||||
@ -235,9 +236,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
||||
|
||||
void r100_pci_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_gart_fini(rdev);
|
||||
r100_pci_gart_disable(rdev);
|
||||
radeon_gart_table_ram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
int r100_irq_set(struct radeon_device *rdev)
|
||||
@ -312,10 +313,12 @@ int r100_irq_process(struct radeon_device *rdev)
|
||||
/* Vertical blank interrupts */
|
||||
if (status & RADEON_CRTC_VBLANK_STAT) {
|
||||
drm_handle_vblank(rdev->ddev, 0);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
}
|
||||
if (status & RADEON_CRTC2_VBLANK_STAT) {
|
||||
drm_handle_vblank(rdev->ddev, 1);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
}
|
||||
if (status & RADEON_FP_DETECT_STAT) {
|
||||
@ -741,6 +744,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
||||
udelay(10);
|
||||
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
|
||||
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
|
||||
/* protect against crazy HW on resume */
|
||||
rdev->cp.wptr &= rdev->cp.ptr_mask;
|
||||
/* Set cp mode to bus mastering & enable cp*/
|
||||
WREG32(RADEON_CP_CSQ_MODE,
|
||||
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
|
||||
@ -1804,6 +1809,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
bool force_dac2 = false;
|
||||
u32 tmp;
|
||||
|
||||
/* set these so they don't interfere with anything */
|
||||
WREG32(RADEON_OV0_SCALE_CNTL, 0);
|
||||
@ -1875,6 +1881,12 @@ void r100_set_common_regs(struct radeon_device *rdev)
|
||||
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
|
||||
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
|
||||
}
|
||||
|
||||
/* switch PM block to ACPI mode */
|
||||
tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
|
||||
tmp &= ~RADEON_PM_MODE_SEL;
|
||||
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2022,6 +2034,7 @@ void r100_mc_init(struct radeon_device *rdev)
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
|
||||
@ -2385,6 +2398,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||
uint32_t pixel_bytes1 = 0;
|
||||
uint32_t pixel_bytes2 = 0;
|
||||
|
||||
radeon_update_display_priority(rdev);
|
||||
|
||||
if (rdev->mode_info.crtcs[0]->base.enabled) {
|
||||
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
|
||||
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
|
||||
@ -2413,11 +2428,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||
/*
|
||||
* determine is there is enough bw for current mode
|
||||
*/
|
||||
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
|
||||
temp_ff.full = rfixed_const(100);
|
||||
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
|
||||
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
|
||||
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
|
||||
sclk_ff = rdev->pm.sclk;
|
||||
mclk_ff = rdev->pm.mclk;
|
||||
|
||||
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
|
||||
temp_ff.full = rfixed_const(temp);
|
||||
@ -3440,6 +3452,7 @@ int r100_suspend(struct radeon_device *rdev)
|
||||
|
||||
void r100_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
|
||||
#include "r100d.h"
|
||||
#include "r200_reg_safe.h"
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "drm.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "r100_track.h"
|
||||
#include "r300d.h"
|
||||
@ -164,9 +165,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
|
||||
|
||||
void rv370_pcie_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_gart_fini(rdev);
|
||||
rv370_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
@ -481,6 +482,7 @@ void r300_mc_init(struct radeon_device *rdev)
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
|
||||
@ -1334,6 +1336,7 @@ int r300_suspend(struct radeon_device *rdev)
|
||||
|
||||
void r300_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "drmP.h"
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
#include "r100d.h"
|
||||
#include "r420d.h"
|
||||
@ -266,6 +267,7 @@ int r420_suspend(struct radeon_device *rdev)
|
||||
|
||||
void r420_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -27,6 +27,7 @@
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
#include "r520d.h"
|
||||
|
||||
@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev)
|
||||
|
||||
void r520_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
|
||||
r520_vram_get_type(rdev);
|
||||
r100_vram_init_sizes(rdev);
|
||||
radeon_vram_location(rdev, &rdev->mc, 0);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
void r520_mc_program(struct radeon_device *rdev)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "drmP.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_mode.h"
|
||||
#include "r600d.h"
|
||||
#include "atom.h"
|
||||
@ -491,9 +492,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
|
||||
|
||||
void r600_pcie_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_gart_fini(rdev);
|
||||
r600_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
void r600_agp_enable(struct radeon_device *rdev)
|
||||
@ -675,7 +676,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
||||
|
||||
int r600_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
@ -719,14 +719,10 @@ int r600_mc_init(struct radeon_device *rdev)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
}
|
||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1132,6 +1128,7 @@ void r600_gpu_init(struct radeon_device *rdev)
|
||||
/* Setup pipes */
|
||||
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
|
||||
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
|
||||
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
|
||||
@ -2119,6 +2116,7 @@ int r600_init(struct radeon_device *rdev)
|
||||
|
||||
void r600_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r600_audio_fini(rdev);
|
||||
r600_blit_fini(rdev);
|
||||
r600_cp_fini(rdev);
|
||||
@ -2398,19 +2396,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
|
||||
WREG32(DC_HPD4_INT_CONTROL, tmp);
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD5_INT_CONTROL, 0);
|
||||
WREG32(DC_HPD5_INT_CONTROL, tmp);
|
||||
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
|
||||
WREG32(DC_HPD6_INT_CONTROL, 0);
|
||||
WREG32(DC_HPD6_INT_CONTROL, tmp);
|
||||
}
|
||||
} else {
|
||||
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
|
||||
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
|
||||
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
|
||||
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
|
||||
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
|
||||
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
|
||||
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
|
||||
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
|
||||
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
|
||||
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
|
||||
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2765,6 +2763,7 @@ restart_ih:
|
||||
case 0: /* D1 vblank */
|
||||
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 0);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D1 vblank\n");
|
||||
@ -2786,6 +2785,7 @@ restart_ih:
|
||||
case 0: /* D2 vblank */
|
||||
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
|
||||
drm_handle_vblank(rdev->ddev, 1);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
|
||||
DRM_DEBUG("IH: D2 vblank\n");
|
||||
@ -2834,14 +2834,14 @@ restart_ih:
|
||||
break;
|
||||
case 10:
|
||||
if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
|
||||
disp_int_cont &= ~DC_HPD5_INTERRUPT;
|
||||
disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD5\n");
|
||||
}
|
||||
break;
|
||||
case 12:
|
||||
if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
|
||||
disp_int_cont &= ~DC_HPD6_INTERRUPT;
|
||||
disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
|
||||
queue_hotplug = true;
|
||||
DRM_DEBUG("IH: HPD6\n");
|
||||
}
|
||||
|
@ -181,41 +181,6 @@ int r600_audio_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* determin how the encoders and audio interface is wired together
|
||||
*/
|
||||
int r600_audio_tmds_index(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_encoder *other;
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
return 0;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
/* special case check if an TMDS1 is present */
|
||||
list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
|
||||
if (to_radeon_encoder(other)->encoder_id ==
|
||||
ENCODER_OBJECT_ID_INTERNAL_TMDS1)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
return 1;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Unsupported encoder type 0x%02X\n",
|
||||
radeon_encoder->encoder_id);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* atach the audio codec to the clock source of the encoder
|
||||
*/
|
||||
@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
int base_rate = 48000;
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
|
||||
break;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("Unsupported encoder type 0x%02X\n",
|
||||
radeon_encoder->encoder_id);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (r600_audio_tmds_index(encoder)) {
|
||||
switch (dig->dig_encoder) {
|
||||
case 0:
|
||||
WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
|
||||
WREG32(R600_AUDIO_PLL1_DIV, clock*100);
|
||||
WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
|
||||
WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
|
||||
WREG32(R600_AUDIO_CLK_SRCSEL, 0);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
|
||||
WREG32(R600_AUDIO_PLL2_DIV, clock*100);
|
||||
WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
|
||||
WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
|
||||
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
|
||||
break;
|
||||
default:
|
||||
dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
|
||||
radeon_encoder->encoder_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,42 @@
|
||||
/*
|
||||
* Copyright 2009 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Alex Deucher <alexander.deucher@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
/*
|
||||
* R6xx+ cards need to use the 3D engine to blit data which requires
|
||||
* quite a bit of hw state setup. Rather than pull the whole 3D driver
|
||||
* (which normally generates the 3D state) into the DRM, we opt to use
|
||||
* statically generated state tables. The regsiter state and shaders
|
||||
* were hand generated to support blitting functionality. See the 3D
|
||||
* driver or documentation for descriptions of the registers and
|
||||
* shader instructions.
|
||||
*/
|
||||
|
||||
const u32 r6xx_default_state[] =
|
||||
{
|
||||
0xc0002400,
|
||||
|
@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev,
|
||||
|
||||
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
|
||||
RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
|
||||
RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
|
||||
RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
|
||||
RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
|
||||
|
||||
num_qd_pipes =
|
||||
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
|
||||
|
@ -45,6 +45,7 @@ struct r600_cs_track {
|
||||
u32 nbanks;
|
||||
u32 npipes;
|
||||
/* value we track */
|
||||
u32 sq_config;
|
||||
u32 nsamples;
|
||||
u32 cb_color_base_last[8];
|
||||
struct radeon_bo *cb_color_bo[8];
|
||||
@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* assume DX9 mode */
|
||||
track->sq_config = DX9_CONSTS;
|
||||
for (i = 0; i < 8; i++) {
|
||||
track->cb_color_base_last[i] = 0;
|
||||
track->cb_color_size[i] = 0;
|
||||
@ -715,6 +718,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
|
||||
tmp =radeon_get_ib_value(p, idx);
|
||||
ib[idx] = 0;
|
||||
break;
|
||||
case SQ_CONFIG:
|
||||
track->sq_config = radeon_get_ib_value(p, idx);
|
||||
break;
|
||||
case R_028800_DB_DEPTH_CONTROL:
|
||||
track->db_depth_control = radeon_get_ib_value(p, idx);
|
||||
break;
|
||||
@ -869,6 +875,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
|
||||
case SQ_PGM_START_VS:
|
||||
case SQ_PGM_START_GS:
|
||||
case SQ_PGM_START_PS:
|
||||
case SQ_ALU_CONST_CACHE_GS_0:
|
||||
case SQ_ALU_CONST_CACHE_GS_1:
|
||||
case SQ_ALU_CONST_CACHE_GS_2:
|
||||
case SQ_ALU_CONST_CACHE_GS_3:
|
||||
case SQ_ALU_CONST_CACHE_GS_4:
|
||||
case SQ_ALU_CONST_CACHE_GS_5:
|
||||
case SQ_ALU_CONST_CACHE_GS_6:
|
||||
case SQ_ALU_CONST_CACHE_GS_7:
|
||||
case SQ_ALU_CONST_CACHE_GS_8:
|
||||
case SQ_ALU_CONST_CACHE_GS_9:
|
||||
case SQ_ALU_CONST_CACHE_GS_10:
|
||||
case SQ_ALU_CONST_CACHE_GS_11:
|
||||
case SQ_ALU_CONST_CACHE_GS_12:
|
||||
case SQ_ALU_CONST_CACHE_GS_13:
|
||||
case SQ_ALU_CONST_CACHE_GS_14:
|
||||
case SQ_ALU_CONST_CACHE_GS_15:
|
||||
case SQ_ALU_CONST_CACHE_PS_0:
|
||||
case SQ_ALU_CONST_CACHE_PS_1:
|
||||
case SQ_ALU_CONST_CACHE_PS_2:
|
||||
case SQ_ALU_CONST_CACHE_PS_3:
|
||||
case SQ_ALU_CONST_CACHE_PS_4:
|
||||
case SQ_ALU_CONST_CACHE_PS_5:
|
||||
case SQ_ALU_CONST_CACHE_PS_6:
|
||||
case SQ_ALU_CONST_CACHE_PS_7:
|
||||
case SQ_ALU_CONST_CACHE_PS_8:
|
||||
case SQ_ALU_CONST_CACHE_PS_9:
|
||||
case SQ_ALU_CONST_CACHE_PS_10:
|
||||
case SQ_ALU_CONST_CACHE_PS_11:
|
||||
case SQ_ALU_CONST_CACHE_PS_12:
|
||||
case SQ_ALU_CONST_CACHE_PS_13:
|
||||
case SQ_ALU_CONST_CACHE_PS_14:
|
||||
case SQ_ALU_CONST_CACHE_PS_15:
|
||||
case SQ_ALU_CONST_CACHE_VS_0:
|
||||
case SQ_ALU_CONST_CACHE_VS_1:
|
||||
case SQ_ALU_CONST_CACHE_VS_2:
|
||||
case SQ_ALU_CONST_CACHE_VS_3:
|
||||
case SQ_ALU_CONST_CACHE_VS_4:
|
||||
case SQ_ALU_CONST_CACHE_VS_5:
|
||||
case SQ_ALU_CONST_CACHE_VS_6:
|
||||
case SQ_ALU_CONST_CACHE_VS_7:
|
||||
case SQ_ALU_CONST_CACHE_VS_8:
|
||||
case SQ_ALU_CONST_CACHE_VS_9:
|
||||
case SQ_ALU_CONST_CACHE_VS_10:
|
||||
case SQ_ALU_CONST_CACHE_VS_11:
|
||||
case SQ_ALU_CONST_CACHE_VS_12:
|
||||
case SQ_ALU_CONST_CACHE_VS_13:
|
||||
case SQ_ALU_CONST_CACHE_VS_14:
|
||||
case SQ_ALU_CONST_CACHE_VS_15:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
@ -1226,13 +1280,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
}
|
||||
break;
|
||||
case PACKET3_SET_ALU_CONST:
|
||||
start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
|
||||
end_reg = 4 * pkt->count + start_reg - 4;
|
||||
if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
|
||||
(start_reg >= PACKET3_SET_ALU_CONST_END) ||
|
||||
(end_reg >= PACKET3_SET_ALU_CONST_END)) {
|
||||
DRM_ERROR("bad SET_ALU_CONST\n");
|
||||
return -EINVAL;
|
||||
if (track->sq_config & DX9_CONSTS) {
|
||||
start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
|
||||
end_reg = 4 * pkt->count + start_reg - 4;
|
||||
if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
|
||||
(start_reg >= PACKET3_SET_ALU_CONST_END) ||
|
||||
(end_reg >= PACKET3_SET_ALU_CONST_END)) {
|
||||
DRM_ERROR("bad SET_ALU_CONST\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case PACKET3_SET_BOOL_CONST:
|
||||
|
@ -42,13 +42,13 @@ enum r600_hdmi_color_format {
|
||||
*/
|
||||
enum r600_hdmi_iec_status_bits {
|
||||
AUDIO_STATUS_DIG_ENABLE = 0x01,
|
||||
AUDIO_STATUS_V = 0x02,
|
||||
AUDIO_STATUS_VCFG = 0x04,
|
||||
AUDIO_STATUS_V = 0x02,
|
||||
AUDIO_STATUS_VCFG = 0x04,
|
||||
AUDIO_STATUS_EMPHASIS = 0x08,
|
||||
AUDIO_STATUS_COPYRIGHT = 0x10,
|
||||
AUDIO_STATUS_NONAUDIO = 0x20,
|
||||
AUDIO_STATUS_PROFESSIONAL = 0x40,
|
||||
AUDIO_STATUS_LEVEL = 0x80
|
||||
AUDIO_STATUS_LEVEL = 0x80
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -85,7 +85,7 @@ struct {
|
||||
static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
|
||||
{
|
||||
if (*CTS == 0)
|
||||
*CTS = clock*N/(128*freq)*1000;
|
||||
*CTS = clock * N / (128 * freq) * 1000;
|
||||
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
|
||||
N, *CTS, freq);
|
||||
}
|
||||
@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType,
|
||||
uint8_t length,
|
||||
uint8_t *frame)
|
||||
{
|
||||
int i;
|
||||
frame[0] = packetType + versionNumber + length;
|
||||
for (i = 1; i <= length; i++)
|
||||
frame[0] += frame[i];
|
||||
frame[0] = 0x100 - frame[0];
|
||||
int i;
|
||||
frame[0] = packetType + versionNumber + length;
|
||||
for (i = 1; i <= length; i++)
|
||||
frame[0] += frame[i];
|
||||
frame[0] = 0x100 - frame[0];
|
||||
}
|
||||
|
||||
/*
|
||||
@ -417,90 +417,141 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
|
||||
WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
|
||||
}
|
||||
|
||||
/*
|
||||
* enable/disable the HDMI engine
|
||||
*/
|
||||
void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
|
||||
static int r600_hdmi_find_free_block(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *encoder;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
bool free_blocks[3] = { true, true, true };
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
switch (radeon_encoder->hdmi_offset) {
|
||||
case R600_HDMI_BLOCK1:
|
||||
free_blocks[0] = false;
|
||||
break;
|
||||
case R600_HDMI_BLOCK2:
|
||||
free_blocks[1] = false;
|
||||
break;
|
||||
case R600_HDMI_BLOCK3:
|
||||
free_blocks[2] = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
|
||||
return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
|
||||
} else if (rdev->family >= CHIP_R600) {
|
||||
if (free_blocks[0])
|
||||
return R600_HDMI_BLOCK1;
|
||||
else if (free_blocks[1])
|
||||
return R600_HDMI_BLOCK2;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void r600_hdmi_assign_block(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (!offset)
|
||||
if (!dig) {
|
||||
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
|
||||
|
||||
/* some version of atombios ignore the enable HDMI flag
|
||||
* so enabling/disabling HDMI was moved here for TMDS1+2 */
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
|
||||
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
|
||||
break;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
|
||||
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
|
||||
break;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
/* This part is doubtfull in my opinion */
|
||||
WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("unknown HDMI output type\n");
|
||||
break;
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
/* TODO */
|
||||
} else if (ASIC_IS_DCE3(rdev)) {
|
||||
radeon_encoder->hdmi_offset = dig->dig_encoder ?
|
||||
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
|
||||
if (ASIC_IS_DCE32(rdev))
|
||||
radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
|
||||
R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
|
||||
} else if (rdev->family >= CHIP_R600) {
|
||||
radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* determin at which register offset the HDMI encoder is
|
||||
* enable the HDMI engine
|
||||
*/
|
||||
void r600_hdmi_init(struct drm_encoder *encoder)
|
||||
void r600_hdmi_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
|
||||
break;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
switch (r600_audio_tmds_index(encoder)) {
|
||||
case 0:
|
||||
radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
|
||||
break;
|
||||
case 1:
|
||||
radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
|
||||
break;
|
||||
default:
|
||||
radeon_encoder->hdmi_offset = 0;
|
||||
break;
|
||||
if (!radeon_encoder->hdmi_offset) {
|
||||
r600_hdmi_assign_block(encoder);
|
||||
if (!radeon_encoder->hdmi_offset) {
|
||||
dev_warn(rdev->dev, "Could not find HDMI block for "
|
||||
"0x%x encoder\n", radeon_encoder->encoder_id);
|
||||
return;
|
||||
}
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
|
||||
break;
|
||||
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
radeon_encoder->hdmi_offset = R600_HDMI_DIG;
|
||||
break;
|
||||
|
||||
default:
|
||||
radeon_encoder->hdmi_offset = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
|
||||
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
|
||||
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
|
||||
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
|
||||
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
|
||||
int offset = radeon_encoder->hdmi_offset;
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
|
||||
WREG32(offset + R600_HDMI_ENABLE, 0x101);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
|
||||
WREG32(offset + R600_HDMI_ENABLE, 0x105);
|
||||
break;
|
||||
default:
|
||||
dev_err(rdev->dev, "Unknown HDMI output type\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: make this configureable */
|
||||
radeon_encoder->hdmi_audio_workaround = 0;
|
||||
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
|
||||
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* disable the HDMI engine
|
||||
*/
|
||||
void r600_hdmi_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
if (!radeon_encoder->hdmi_offset) {
|
||||
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
|
||||
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
|
||||
|
||||
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
|
||||
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
|
||||
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
|
||||
int offset = radeon_encoder->hdmi_offset;
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
|
||||
WREG32(offset + R600_HDMI_ENABLE, 0);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
|
||||
WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
|
||||
WREG32(offset + R600_HDMI_ENABLE, 0);
|
||||
break;
|
||||
default:
|
||||
dev_err(rdev->dev, "Unknown HDMI output type\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
radeon_encoder->hdmi_offset = 0;
|
||||
radeon_encoder->hdmi_config_offset = 0;
|
||||
}
|
||||
|
@ -152,9 +152,9 @@
|
||||
#define R600_AUDIO_STATUS_BITS 0x73d8
|
||||
|
||||
/* HDMI base register addresses */
|
||||
#define R600_HDMI_TMDS1 0x7400
|
||||
#define R600_HDMI_TMDS2 0x7700
|
||||
#define R600_HDMI_DIG 0x7800
|
||||
#define R600_HDMI_BLOCK1 0x7400
|
||||
#define R600_HDMI_BLOCK2 0x7700
|
||||
#define R600_HDMI_BLOCK3 0x7800
|
||||
|
||||
/* HDMI registers */
|
||||
#define R600_HDMI_ENABLE 0x00
|
||||
@ -185,4 +185,8 @@
|
||||
#define R600_HDMI_AUDIO_DEBUG_2 0xe8
|
||||
#define R600_HDMI_AUDIO_DEBUG_3 0xec
|
||||
|
||||
/* HDMI additional config base register addresses */
|
||||
#define R600_HDMI_CONFIG1 0x7600
|
||||
#define R600_HDMI_CONFIG2 0x7a00
|
||||
|
||||
#endif
|
||||
|
@ -77,6 +77,55 @@
|
||||
#define CB_COLOR0_FRAG 0x280e0
|
||||
#define CB_COLOR0_MASK 0x28100
|
||||
|
||||
#define SQ_ALU_CONST_CACHE_PS_0 0x28940
|
||||
#define SQ_ALU_CONST_CACHE_PS_1 0x28944
|
||||
#define SQ_ALU_CONST_CACHE_PS_2 0x28948
|
||||
#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
|
||||
#define SQ_ALU_CONST_CACHE_PS_4 0x28950
|
||||
#define SQ_ALU_CONST_CACHE_PS_5 0x28954
|
||||
#define SQ_ALU_CONST_CACHE_PS_6 0x28958
|
||||
#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
|
||||
#define SQ_ALU_CONST_CACHE_PS_8 0x28960
|
||||
#define SQ_ALU_CONST_CACHE_PS_9 0x28964
|
||||
#define SQ_ALU_CONST_CACHE_PS_10 0x28968
|
||||
#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
|
||||
#define SQ_ALU_CONST_CACHE_PS_12 0x28970
|
||||
#define SQ_ALU_CONST_CACHE_PS_13 0x28974
|
||||
#define SQ_ALU_CONST_CACHE_PS_14 0x28978
|
||||
#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
|
||||
#define SQ_ALU_CONST_CACHE_VS_0 0x28980
|
||||
#define SQ_ALU_CONST_CACHE_VS_1 0x28984
|
||||
#define SQ_ALU_CONST_CACHE_VS_2 0x28988
|
||||
#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
|
||||
#define SQ_ALU_CONST_CACHE_VS_4 0x28990
|
||||
#define SQ_ALU_CONST_CACHE_VS_5 0x28994
|
||||
#define SQ_ALU_CONST_CACHE_VS_6 0x28998
|
||||
#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
|
||||
#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
|
||||
#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
|
||||
#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
|
||||
#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
|
||||
#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
|
||||
#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
|
||||
#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
|
||||
#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
|
||||
#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
|
||||
#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
|
||||
#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
|
||||
#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
|
||||
#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
|
||||
#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
|
||||
#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
|
||||
#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
|
||||
#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
|
||||
#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
|
||||
#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
|
||||
#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
|
||||
#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
|
||||
#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
|
||||
#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
|
||||
#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
|
||||
|
||||
#define CONFIG_MEMSIZE 0x5428
|
||||
#define CONFIG_CNTL 0x5424
|
||||
#define CP_STAT 0x8680
|
||||
|
@ -91,6 +91,8 @@ extern int radeon_tv;
|
||||
extern int radeon_new_pll;
|
||||
extern int radeon_dynpm;
|
||||
extern int radeon_audio;
|
||||
extern int radeon_disp_priority;
|
||||
extern int radeon_hw_i2c;
|
||||
|
||||
/*
|
||||
* Copy from radeon_drv.h so we don't have to include both and have conflicting
|
||||
@ -168,6 +170,7 @@ struct radeon_clock {
|
||||
* Power management
|
||||
*/
|
||||
int radeon_pm_init(struct radeon_device *rdev);
|
||||
void radeon_pm_fini(struct radeon_device *rdev);
|
||||
void radeon_pm_compute_clocks(struct radeon_device *rdev);
|
||||
void radeon_combios_get_power_modes(struct radeon_device *rdev);
|
||||
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
|
||||
@ -687,6 +690,7 @@ struct radeon_pm {
|
||||
bool downclocked;
|
||||
int active_crtcs;
|
||||
int req_vblank;
|
||||
bool vblank_sync;
|
||||
fixed20_12 max_bandwidth;
|
||||
fixed20_12 igp_sideport_mclk;
|
||||
fixed20_12 igp_system_mclk;
|
||||
@ -697,6 +701,7 @@ struct radeon_pm {
|
||||
fixed20_12 ht_bandwidth;
|
||||
fixed20_12 core_bandwidth;
|
||||
fixed20_12 sclk;
|
||||
fixed20_12 mclk;
|
||||
fixed20_12 needed_bandwidth;
|
||||
/* XXX: use a define for num power modes */
|
||||
struct radeon_power_state power_state[8];
|
||||
@ -707,6 +712,7 @@ struct radeon_pm {
|
||||
struct radeon_power_state *requested_power_state;
|
||||
struct radeon_pm_clock_info *requested_clock_mode;
|
||||
struct radeon_power_state *default_power_state;
|
||||
struct radeon_i2c_chan *i2c_bus;
|
||||
};
|
||||
|
||||
|
||||
@ -729,8 +735,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
|
||||
struct drm_info_list *files,
|
||||
unsigned nfiles);
|
||||
int radeon_debugfs_fence_init(struct radeon_device *rdev);
|
||||
int r100_debugfs_rbbm_init(struct radeon_device *rdev);
|
||||
int r100_debugfs_cp_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
@ -782,7 +786,7 @@ struct radeon_asic {
|
||||
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
uint32_t offset, uint32_t obj_size);
|
||||
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
|
||||
void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
|
||||
void (*bandwidth_update)(struct radeon_device *rdev);
|
||||
void (*hpd_init)(struct radeon_device *rdev);
|
||||
void (*hpd_fini)(struct radeon_device *rdev);
|
||||
@ -862,6 +866,12 @@ union radeon_asic_config {
|
||||
struct rv770_asic rv770;
|
||||
};
|
||||
|
||||
/*
|
||||
* asic initizalization from radeon_asic.c
|
||||
*/
|
||||
void radeon_agp_disable(struct radeon_device *rdev);
|
||||
int radeon_asic_init(struct radeon_device *rdev);
|
||||
|
||||
|
||||
/*
|
||||
* IOCTL.
|
||||
@ -1172,6 +1182,8 @@ extern void radeon_gart_restore(struct radeon_device *rdev);
|
||||
extern int radeon_modeset_init(struct radeon_device *rdev);
|
||||
extern void radeon_modeset_fini(struct radeon_device *rdev);
|
||||
extern bool radeon_card_posted(struct radeon_device *rdev);
|
||||
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
|
||||
extern void radeon_update_display_priority(struct radeon_device *rdev);
|
||||
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
|
||||
extern int radeon_clocks_init(struct radeon_device *rdev);
|
||||
extern void radeon_clocks_fini(struct radeon_device *rdev);
|
||||
@ -1188,51 +1200,6 @@ extern int radeon_resume_kms(struct drm_device *dev);
|
||||
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||
|
||||
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
|
||||
struct r100_mc_save {
|
||||
u32 GENMO_WT;
|
||||
u32 CRTC_EXT_CNTL;
|
||||
u32 CRTC_GEN_CNTL;
|
||||
u32 CRTC2_GEN_CNTL;
|
||||
u32 CUR_OFFSET;
|
||||
u32 CUR2_OFFSET;
|
||||
};
|
||||
extern void r100_cp_disable(struct radeon_device *rdev);
|
||||
extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
extern void r100_cp_fini(struct radeon_device *rdev);
|
||||
extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
|
||||
extern int r100_pci_gart_init(struct radeon_device *rdev);
|
||||
extern void r100_pci_gart_fini(struct radeon_device *rdev);
|
||||
extern int r100_pci_gart_enable(struct radeon_device *rdev);
|
||||
extern void r100_pci_gart_disable(struct radeon_device *rdev);
|
||||
extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
|
||||
extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
extern void r100_ib_fini(struct radeon_device *rdev);
|
||||
extern int r100_ib_init(struct radeon_device *rdev);
|
||||
extern void r100_irq_disable(struct radeon_device *rdev);
|
||||
extern int r100_irq_set(struct radeon_device *rdev);
|
||||
extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
|
||||
extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
|
||||
extern void r100_vram_init_sizes(struct radeon_device *rdev);
|
||||
extern void r100_wb_disable(struct radeon_device *rdev);
|
||||
extern void r100_wb_fini(struct radeon_device *rdev);
|
||||
extern int r100_wb_init(struct radeon_device *rdev);
|
||||
extern void r100_hdp_reset(struct radeon_device *rdev);
|
||||
extern int r100_rb2d_reset(struct radeon_device *rdev);
|
||||
extern int r100_cp_reset(struct radeon_device *rdev);
|
||||
extern void r100_vga_render_disable(struct radeon_device *rdev);
|
||||
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
struct radeon_bo *robj);
|
||||
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
const unsigned *auth, unsigned n,
|
||||
radeon_packet0_check_t check);
|
||||
extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx);
|
||||
extern void r100_enable_bm(struct radeon_device *rdev);
|
||||
extern void r100_set_common_regs(struct radeon_device *rdev);
|
||||
|
||||
/* rv200,rv250,rv280 */
|
||||
extern void r200_set_safe_registers(struct radeon_device *rdev);
|
||||
@ -1322,7 +1289,8 @@ extern int r600_audio_tmds_index(struct drm_encoder *encoder);
|
||||
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
|
||||
extern void r600_audio_fini(struct radeon_device *rdev);
|
||||
extern void r600_hdmi_init(struct drm_encoder *encoder);
|
||||
extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
|
||||
extern void r600_hdmi_enable(struct drm_encoder *encoder);
|
||||
extern void r600_hdmi_disable(struct drm_encoder *encoder);
|
||||
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
||||
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
|
||||
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
|
||||
|
772
drivers/gpu/drm/radeon/radeon_asic.c
Normal file
772
drivers/gpu/drm/radeon/radeon_asic.c
Normal file
@ -0,0 +1,772 @@
|
||||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
*/
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
* Registers accessors functions.
|
||||
*/
|
||||
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
|
||||
BUG_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
|
||||
reg, v);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
static void radeon_register_accessor_init(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->mc_rreg = &radeon_invalid_rreg;
|
||||
rdev->mc_wreg = &radeon_invalid_wreg;
|
||||
rdev->pll_rreg = &radeon_invalid_rreg;
|
||||
rdev->pll_wreg = &radeon_invalid_wreg;
|
||||
rdev->pciep_rreg = &radeon_invalid_rreg;
|
||||
rdev->pciep_wreg = &radeon_invalid_wreg;
|
||||
|
||||
/* Don't change order as we are overridding accessor. */
|
||||
if (rdev->family < CHIP_RV515) {
|
||||
rdev->pcie_reg_mask = 0xff;
|
||||
} else {
|
||||
rdev->pcie_reg_mask = 0x7ff;
|
||||
}
|
||||
/* FIXME: not sure here */
|
||||
if (rdev->family <= CHIP_R580) {
|
||||
rdev->pll_rreg = &r100_pll_rreg;
|
||||
rdev->pll_wreg = &r100_pll_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_R420) {
|
||||
rdev->mc_rreg = &r420_mc_rreg;
|
||||
rdev->mc_wreg = &r420_mc_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_RV515) {
|
||||
rdev->mc_rreg = &rv515_mc_rreg;
|
||||
rdev->mc_wreg = &rv515_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
|
||||
rdev->mc_rreg = &rs400_mc_rreg;
|
||||
rdev->mc_wreg = &rs400_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||
rdev->mc_rreg = &rs690_mc_rreg;
|
||||
rdev->mc_wreg = &rs690_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS600) {
|
||||
rdev->mc_rreg = &rs600_mc_rreg;
|
||||
rdev->mc_wreg = &rs600_mc_wreg;
|
||||
}
|
||||
if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
|
||||
rdev->pciep_rreg = &r600_pciep_rreg;
|
||||
rdev->pciep_wreg = &r600_pciep_wreg;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* helper to disable agp */
|
||||
void radeon_agp_disable(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
if (rdev->family >= CHIP_R600) {
|
||||
DRM_INFO("Forcing AGP to PCIE mode\n");
|
||||
rdev->flags |= RADEON_IS_PCIE;
|
||||
} else if (rdev->family >= CHIP_RV515 ||
|
||||
rdev->family == CHIP_RV380 ||
|
||||
rdev->family == CHIP_RV410 ||
|
||||
rdev->family == CHIP_R423) {
|
||||
DRM_INFO("Forcing AGP to PCIE mode\n");
|
||||
rdev->flags |= RADEON_IS_PCIE;
|
||||
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
|
||||
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
|
||||
} else {
|
||||
DRM_INFO("Forcing AGP to PCI mode\n");
|
||||
rdev->flags |= RADEON_IS_PCI;
|
||||
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
|
||||
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
|
||||
}
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
}
|
||||
|
||||
/*
|
||||
* ASIC
|
||||
*/
|
||||
static struct radeon_asic r100_asic = {
|
||||
.init = &r100_init,
|
||||
.fini = &r100_fini,
|
||||
.suspend = &r100_suspend,
|
||||
.resume = &r100_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r100_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r100_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r100_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = NULL,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic r200_asic = {
|
||||
.init = &r100_init,
|
||||
.fini = &r100_fini,
|
||||
.suspend = &r100_suspend,
|
||||
.resume = &r100_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r100_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r100_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r100_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic r300_asic = {
|
||||
.init = &r300_init,
|
||||
.fini = &r300_fini,
|
||||
.suspend = &r300_suspend,
|
||||
.resume = &r300_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic r300_asic_pcie = {
|
||||
.init = &r300_init,
|
||||
.fini = &r300_fini,
|
||||
.suspend = &r300_suspend,
|
||||
.resume = &r300_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic r420_asic = {
|
||||
.init = &r420_init,
|
||||
.fini = &r420_fini,
|
||||
.suspend = &r420_suspend,
|
||||
.resume = &r420_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic rs400_asic = {
|
||||
.init = &rs400_init,
|
||||
.fini = &rs400_fini,
|
||||
.suspend = &rs400_suspend,
|
||||
.resume = &rs400_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rs400_gart_tlb_flush,
|
||||
.gart_set_page = &rs400_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic rs600_asic = {
|
||||
.init = &rs600_init,
|
||||
.fini = &rs600_fini,
|
||||
.suspend = &rs600_suspend,
|
||||
.resume = &rs600_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rs600_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rs600_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic rs690_asic = {
|
||||
.init = &rs690_init,
|
||||
.fini = &rs690_fini,
|
||||
.suspend = &rs690_suspend,
|
||||
.resume = &rs690_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rs400_gart_tlb_flush,
|
||||
.gart_set_page = &rs400_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r200_copy_dma,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rs690_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic rv515_asic = {
|
||||
.init = &rv515_init,
|
||||
.fini = &rv515_fini,
|
||||
.suspend = &rv515_suspend,
|
||||
.resume = &rv515_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &rv515_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &rv515_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic r520_asic = {
|
||||
.init = &r520_init,
|
||||
.fini = &rv515_fini,
|
||||
.suspend = &rv515_suspend,
|
||||
.resume = &r520_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &rv515_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &rv515_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
static struct radeon_asic r600_asic = {
|
||||
.init = &r600_init,
|
||||
.fini = &r600_fini,
|
||||
.suspend = &r600_suspend,
|
||||
.resume = &r600_resume,
|
||||
.cp_commit = &r600_cp_commit,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gpu_reset = &r600_gpu_reset,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ring_ib_execute = &r600_ring_ib_execute,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.copy_blit = &r600_copy_blit,
|
||||
.copy_dma = &r600_copy_blit,
|
||||
.copy = &r600_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &r600_hpd_init,
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
static struct radeon_asic rs780_asic = {
|
||||
.init = &r600_init,
|
||||
.fini = &r600_fini,
|
||||
.suspend = &r600_suspend,
|
||||
.resume = &r600_resume,
|
||||
.cp_commit = &r600_cp_commit,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gpu_reset = &r600_gpu_reset,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ring_ib_execute = &r600_ring_ib_execute,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.copy_blit = &r600_copy_blit,
|
||||
.copy_dma = &r600_copy_blit,
|
||||
.copy = &r600_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = NULL,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &rs690_bandwidth_update,
|
||||
.hpd_init = &r600_hpd_init,
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
static struct radeon_asic rv770_asic = {
|
||||
.init = &rv770_init,
|
||||
.fini = &rv770_fini,
|
||||
.suspend = &rv770_suspend,
|
||||
.resume = &rv770_resume,
|
||||
.cp_commit = &r600_cp_commit,
|
||||
.gpu_reset = &rv770_gpu_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ring_ib_execute = &r600_ring_ib_execute,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.copy_blit = &r600_copy_blit,
|
||||
.copy_dma = &r600_copy_blit,
|
||||
.copy = &r600_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &r600_hpd_init,
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
static struct radeon_asic evergreen_asic = {
|
||||
.init = &evergreen_init,
|
||||
.fini = &evergreen_fini,
|
||||
.suspend = &evergreen_suspend,
|
||||
.resume = &evergreen_resume,
|
||||
.cp_commit = NULL,
|
||||
.gpu_reset = &evergreen_gpu_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = NULL,
|
||||
.ring_ib_execute = NULL,
|
||||
.irq_set = NULL,
|
||||
.irq_process = NULL,
|
||||
.get_vblank_counter = NULL,
|
||||
.fence_ring_emit = NULL,
|
||||
.cs_parse = NULL,
|
||||
.copy_blit = NULL,
|
||||
.copy_dma = NULL,
|
||||
.copy = NULL,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &evergreen_bandwidth_update,
|
||||
.hpd_init = &evergreen_hpd_init,
|
||||
.hpd_fini = &evergreen_hpd_fini,
|
||||
.hpd_sense = &evergreen_hpd_sense,
|
||||
.hpd_set_polarity = &evergreen_hpd_set_polarity,
|
||||
};
|
||||
|
||||
int radeon_asic_init(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_register_accessor_init(rdev);
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
rdev->asic = &r100_asic;
|
||||
break;
|
||||
case CHIP_R200:
|
||||
case CHIP_RV250:
|
||||
case CHIP_RS300:
|
||||
case CHIP_RV280:
|
||||
rdev->asic = &r200_asic;
|
||||
break;
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rdev->asic = &r300_asic_pcie;
|
||||
else
|
||||
rdev->asic = &r300_asic;
|
||||
break;
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
rdev->asic = &r420_asic;
|
||||
break;
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
rdev->asic = &rs400_asic;
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
rdev->asic = &rs600_asic;
|
||||
break;
|
||||
case CHIP_RS690:
|
||||
case CHIP_RS740:
|
||||
rdev->asic = &rs690_asic;
|
||||
break;
|
||||
case CHIP_RV515:
|
||||
rdev->asic = &rv515_asic;
|
||||
break;
|
||||
case CHIP_R520:
|
||||
case CHIP_RV530:
|
||||
case CHIP_RV560:
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
rdev->asic = &r520_asic;
|
||||
break;
|
||||
case CHIP_R600:
|
||||
case CHIP_RV610:
|
||||
case CHIP_RV630:
|
||||
case CHIP_RV620:
|
||||
case CHIP_RV635:
|
||||
case CHIP_RV670:
|
||||
rdev->asic = &r600_asic;
|
||||
break;
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
rdev->asic = &rs780_asic;
|
||||
break;
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV740:
|
||||
rdev->asic = &rv770_asic;
|
||||
break;
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
case CHIP_JUNIPER:
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
rdev->asic = &evergreen_asic;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
rdev->asic->get_memory_clock = NULL;
|
||||
rdev->asic->set_memory_clock = NULL;
|
||||
}
|
||||
|
||||
/* set the number of crtcs */
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC)
|
||||
rdev->num_crtc = 1;
|
||||
else {
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
rdev->num_crtc = 6;
|
||||
else
|
||||
rdev->num_crtc = 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper around modesetting bits. Move to radeon_clocks.c?
|
||||
*/
|
||||
int radeon_clocks_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_static_clocks_init(rdev->ddev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("Clocks initialized !\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_clocks_fini(struct radeon_device *rdev)
|
||||
{
|
||||
}
|
@ -45,10 +45,18 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
|
||||
/*
|
||||
* r100,rv100,rs100,rv200,rs200
|
||||
*/
|
||||
extern int r100_init(struct radeon_device *rdev);
|
||||
extern void r100_fini(struct radeon_device *rdev);
|
||||
extern int r100_suspend(struct radeon_device *rdev);
|
||||
extern int r100_resume(struct radeon_device *rdev);
|
||||
struct r100_mc_save {
|
||||
u32 GENMO_WT;
|
||||
u32 CRTC_EXT_CNTL;
|
||||
u32 CRTC_GEN_CNTL;
|
||||
u32 CRTC2_GEN_CNTL;
|
||||
u32 CUR_OFFSET;
|
||||
u32 CUR2_OFFSET;
|
||||
};
|
||||
int r100_init(struct radeon_device *rdev);
|
||||
void r100_fini(struct radeon_device *rdev);
|
||||
int r100_suspend(struct radeon_device *rdev);
|
||||
int r100_resume(struct radeon_device *rdev);
|
||||
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void r100_vga_set_state(struct radeon_device *rdev, bool state);
|
||||
@ -73,7 +81,7 @@ int r100_copy_blit(struct radeon_device *rdev,
|
||||
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
uint32_t offset, uint32_t obj_size);
|
||||
int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||
void r100_bandwidth_update(struct radeon_device *rdev);
|
||||
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int r100_ring_test(struct radeon_device *rdev);
|
||||
@ -82,44 +90,42 @@ void r100_hpd_fini(struct radeon_device *rdev);
|
||||
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void r100_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
|
||||
static struct radeon_asic r100_asic = {
|
||||
.init = &r100_init,
|
||||
.fini = &r100_fini,
|
||||
.suspend = &r100_suspend,
|
||||
.resume = &r100_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r100_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r100_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r100_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = NULL,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
int r100_debugfs_rbbm_init(struct radeon_device *rdev);
|
||||
int r100_debugfs_cp_init(struct radeon_device *rdev);
|
||||
void r100_cp_disable(struct radeon_device *rdev);
|
||||
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
|
||||
void r100_cp_fini(struct radeon_device *rdev);
|
||||
int r100_pci_gart_init(struct radeon_device *rdev);
|
||||
void r100_pci_gart_fini(struct radeon_device *rdev);
|
||||
int r100_pci_gart_enable(struct radeon_device *rdev);
|
||||
void r100_pci_gart_disable(struct radeon_device *rdev);
|
||||
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
|
||||
int r100_gui_wait_for_idle(struct radeon_device *rdev);
|
||||
void r100_ib_fini(struct radeon_device *rdev);
|
||||
int r100_ib_init(struct radeon_device *rdev);
|
||||
void r100_irq_disable(struct radeon_device *rdev);
|
||||
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
|
||||
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
|
||||
void r100_vram_init_sizes(struct radeon_device *rdev);
|
||||
void r100_wb_disable(struct radeon_device *rdev);
|
||||
void r100_wb_fini(struct radeon_device *rdev);
|
||||
int r100_wb_init(struct radeon_device *rdev);
|
||||
void r100_hdp_reset(struct radeon_device *rdev);
|
||||
int r100_rb2d_reset(struct radeon_device *rdev);
|
||||
int r100_cp_reset(struct radeon_device *rdev);
|
||||
void r100_vga_render_disable(struct radeon_device *rdev);
|
||||
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
struct radeon_bo *robj);
|
||||
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
const unsigned *auth, unsigned n,
|
||||
radeon_packet0_check_t check);
|
||||
int r100_cs_packet_parse(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx);
|
||||
void r100_enable_bm(struct radeon_device *rdev);
|
||||
void r100_set_common_regs(struct radeon_device *rdev);
|
||||
|
||||
/*
|
||||
* r200,rv250,rs300,rv280
|
||||
@ -129,43 +135,6 @@ extern int r200_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t dst_offset,
|
||||
unsigned num_pages,
|
||||
struct radeon_fence *fence);
|
||||
static struct radeon_asic r200_asic = {
|
||||
.init = &r100_init,
|
||||
.fini = &r100_fini,
|
||||
.suspend = &r100_suspend,
|
||||
.resume = &r100_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r100_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r100_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r100_fence_ring_emit,
|
||||
.cs_parse = &r100_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* r300,r350,rv350,rv380
|
||||
@ -186,82 +155,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v
|
||||
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
|
||||
|
||||
static struct radeon_asic r300_asic = {
|
||||
.init = &r300_init,
|
||||
.fini = &r300_fini,
|
||||
.suspend = &r300_suspend,
|
||||
.resume = &r300_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
|
||||
.gart_set_page = &r100_pci_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
static struct radeon_asic r300_asic_pcie = {
|
||||
.init = &r300_init,
|
||||
.fini = &r300_fini,
|
||||
.suspend = &r300_suspend,
|
||||
.resume = &r300_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* r420,r423,rv410
|
||||
*/
|
||||
@ -269,44 +162,6 @@ extern int r420_init(struct radeon_device *rdev);
|
||||
extern void r420_fini(struct radeon_device *rdev);
|
||||
extern int r420_suspend(struct radeon_device *rdev);
|
||||
extern int r420_resume(struct radeon_device *rdev);
|
||||
static struct radeon_asic r420_asic = {
|
||||
.init = &r420_init,
|
||||
.fini = &r420_fini,
|
||||
.suspend = &r420_suspend,
|
||||
.resume = &r420_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rs400,rs480
|
||||
@ -319,44 +174,6 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
|
||||
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
static struct radeon_asic rs400_asic = {
|
||||
.init = &rs400_init,
|
||||
.fini = &rs400_fini,
|
||||
.suspend = &rs400_suspend,
|
||||
.resume = &rs400_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rs400_gart_tlb_flush,
|
||||
.gart_set_page = &rs400_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &r100_irq_set,
|
||||
.irq_process = &r100_irq_process,
|
||||
.get_vblank_counter = &r100_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &r100_bandwidth_update,
|
||||
.hpd_init = &r100_hpd_init,
|
||||
.hpd_fini = &r100_hpd_fini,
|
||||
.hpd_sense = &r100_hpd_sense,
|
||||
.hpd_set_polarity = &r100_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rs600.
|
||||
@ -379,45 +196,6 @@ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void rs600_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
|
||||
static struct radeon_asic rs600_asic = {
|
||||
.init = &rs600_init,
|
||||
.fini = &rs600_fini,
|
||||
.suspend = &rs600_suspend,
|
||||
.resume = &rs600_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rs600_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rs600_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rs690,rs740
|
||||
*/
|
||||
@ -428,44 +206,6 @@ int rs690_suspend(struct radeon_device *rdev);
|
||||
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void rs690_bandwidth_update(struct radeon_device *rdev);
|
||||
static struct radeon_asic rs690_asic = {
|
||||
.init = &rs690_init,
|
||||
.fini = &rs690_fini,
|
||||
.suspend = &rs690_suspend,
|
||||
.resume = &rs690_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &r300_gpu_reset,
|
||||
.gart_tlb_flush = &rs400_gart_tlb_flush,
|
||||
.gart_set_page = &rs400_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &r300_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r200_copy_dma,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rs690_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* rv515
|
||||
@ -481,87 +221,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void rv515_bandwidth_update(struct radeon_device *rdev);
|
||||
int rv515_resume(struct radeon_device *rdev);
|
||||
int rv515_suspend(struct radeon_device *rdev);
|
||||
static struct radeon_asic rv515_asic = {
|
||||
.init = &rv515_init,
|
||||
.fini = &rv515_fini,
|
||||
.suspend = &rv515_suspend,
|
||||
.resume = &rv515_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &rv515_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &rv515_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* r520,rv530,rv560,rv570,r580
|
||||
*/
|
||||
int r520_init(struct radeon_device *rdev);
|
||||
int r520_resume(struct radeon_device *rdev);
|
||||
static struct radeon_asic r520_asic = {
|
||||
.init = &r520_init,
|
||||
.fini = &rv515_fini,
|
||||
.suspend = &rv515_suspend,
|
||||
.resume = &r520_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.gpu_reset = &rv515_gpu_reset,
|
||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rv370_pcie_gart_set_page,
|
||||
.cp_commit = &r100_cp_commit,
|
||||
.ring_start = &rv515_ring_start,
|
||||
.ring_test = &r100_ring_test,
|
||||
.ring_ib_execute = &r100_ring_ib_execute,
|
||||
.irq_set = &rs600_irq_set,
|
||||
.irq_process = &rs600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r300_fence_ring_emit,
|
||||
.cs_parse = &r300_cs_parse,
|
||||
.copy_blit = &r100_copy_blit,
|
||||
.copy_dma = &r200_copy_dma,
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r100_set_surface_reg,
|
||||
.clear_surface_reg = r100_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &rs600_hpd_init,
|
||||
.hpd_fini = &rs600_hpd_fini,
|
||||
.hpd_sense = &rs600_hpd_sense,
|
||||
.hpd_set_polarity = &rs600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
|
||||
@ -591,7 +256,7 @@ int r600_gpu_reset(struct radeon_device *rdev);
|
||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
uint32_t offset, uint32_t obj_size);
|
||||
int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int r600_ring_test(struct radeon_device *rdev);
|
||||
int r600_copy_blit(struct radeon_device *rdev,
|
||||
@ -604,43 +269,6 @@ void r600_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
|
||||
static struct radeon_asic r600_asic = {
|
||||
.init = &r600_init,
|
||||
.fini = &r600_fini,
|
||||
.suspend = &r600_suspend,
|
||||
.resume = &r600_resume,
|
||||
.cp_commit = &r600_cp_commit,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gpu_reset = &r600_gpu_reset,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ring_ib_execute = &r600_ring_ib_execute,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.copy_blit = &r600_copy_blit,
|
||||
.copy_dma = &r600_copy_blit,
|
||||
.copy = &r600_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &r600_hpd_init,
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
/*
|
||||
* rv770,rv730,rv710,rv740
|
||||
*/
|
||||
@ -650,43 +278,6 @@ int rv770_suspend(struct radeon_device *rdev);
|
||||
int rv770_resume(struct radeon_device *rdev);
|
||||
int rv770_gpu_reset(struct radeon_device *rdev);
|
||||
|
||||
static struct radeon_asic rv770_asic = {
|
||||
.init = &rv770_init,
|
||||
.fini = &rv770_fini,
|
||||
.suspend = &rv770_suspend,
|
||||
.resume = &rv770_resume,
|
||||
.cp_commit = &r600_cp_commit,
|
||||
.gpu_reset = &rv770_gpu_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ring_ib_execute = &r600_ring_ib_execute,
|
||||
.irq_set = &r600_irq_set,
|
||||
.irq_process = &r600_irq_process,
|
||||
.get_vblank_counter = &rs600_get_vblank_counter,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.copy_blit = &r600_copy_blit,
|
||||
.copy_dma = &r600_copy_blit,
|
||||
.copy = &r600_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.get_pcie_lanes = &rv370_get_pcie_lanes,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_atom_set_clock_gating,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &rv515_bandwidth_update,
|
||||
.hpd_init = &r600_hpd_init,
|
||||
.hpd_fini = &r600_hpd_fini,
|
||||
.hpd_sense = &r600_hpd_sense,
|
||||
.hpd_set_polarity = &r600_hpd_set_polarity,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
};
|
||||
|
||||
/*
|
||||
* evergreen
|
||||
*/
|
||||
@ -701,40 +292,4 @@ void evergreen_hpd_fini(struct radeon_device *rdev);
|
||||
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
|
||||
static struct radeon_asic evergreen_asic = {
|
||||
.init = &evergreen_init,
|
||||
.fini = &evergreen_fini,
|
||||
.suspend = &evergreen_suspend,
|
||||
.resume = &evergreen_resume,
|
||||
.cp_commit = NULL,
|
||||
.gpu_reset = &evergreen_gpu_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
|
||||
.gart_set_page = &rs600_gart_set_page,
|
||||
.ring_test = NULL,
|
||||
.ring_ib_execute = NULL,
|
||||
.irq_set = NULL,
|
||||
.irq_process = NULL,
|
||||
.get_vblank_counter = NULL,
|
||||
.fence_ring_emit = NULL,
|
||||
.cs_parse = NULL,
|
||||
.copy_blit = NULL,
|
||||
.copy_dma = NULL,
|
||||
.copy = NULL,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
.set_memory_clock = &radeon_atom_set_memory_clock,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = NULL,
|
||||
.set_surface_reg = r600_set_surface_reg,
|
||||
.clear_surface_reg = r600_clear_surface_reg,
|
||||
.bandwidth_update = &evergreen_bandwidth_update,
|
||||
.hpd_init = &evergreen_hpd_init,
|
||||
.hpd_fini = &evergreen_hpd_fini,
|
||||
.hpd_sense = &evergreen_hpd_sense,
|
||||
.hpd_set_polarity = &evergreen_hpd_set_polarity,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -75,46 +75,45 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
|
||||
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
|
||||
i2c.valid = false;
|
||||
|
||||
atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
|
||||
if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
|
||||
|
||||
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
|
||||
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
|
||||
gpio = &i2c_info->asGPIO_Info[i];
|
||||
|
||||
if (gpio->sucI2cId.ucAccess == id) {
|
||||
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
|
||||
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
|
||||
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
|
||||
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
|
||||
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
|
||||
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
|
||||
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
|
||||
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
|
||||
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
|
||||
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
|
||||
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
|
||||
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
|
||||
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
|
||||
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
|
||||
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
|
||||
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
|
||||
|
||||
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
|
||||
gpio = &i2c_info->asGPIO_Info[i];
|
||||
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
|
||||
i2c.hw_capable = true;
|
||||
else
|
||||
i2c.hw_capable = false;
|
||||
|
||||
if (gpio->sucI2cId.ucAccess == id) {
|
||||
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
|
||||
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
|
||||
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
|
||||
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
|
||||
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
|
||||
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
|
||||
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
|
||||
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
|
||||
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
|
||||
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
|
||||
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
|
||||
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
|
||||
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
|
||||
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
|
||||
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
|
||||
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
|
||||
if (gpio->sucI2cId.ucAccess == 0xa0)
|
||||
i2c.mm_i2c = true;
|
||||
else
|
||||
i2c.mm_i2c = false;
|
||||
|
||||
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
|
||||
i2c.hw_capable = true;
|
||||
else
|
||||
i2c.hw_capable = false;
|
||||
i2c.i2c_id = gpio->sucI2cId.ucAccess;
|
||||
|
||||
if (gpio->sucI2cId.ucAccess == 0xa0)
|
||||
i2c.mm_i2c = true;
|
||||
else
|
||||
i2c.mm_i2c = false;
|
||||
|
||||
i2c.i2c_id = gpio->sucI2cId.ucAccess;
|
||||
|
||||
i2c.valid = true;
|
||||
break;
|
||||
i2c.valid = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,20 +134,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
|
||||
memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
|
||||
gpio.valid = false;
|
||||
|
||||
atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
|
||||
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
|
||||
gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
|
||||
|
||||
gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
|
||||
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
|
||||
|
||||
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
|
||||
|
||||
for (i = 0; i < num_indices; i++) {
|
||||
pin = &gpio_info->asGPIO_Pin[i];
|
||||
if (id == pin->ucGPIO_ID) {
|
||||
gpio.id = pin->ucGPIO_ID;
|
||||
gpio.reg = pin->usGpioPin_AIndex * 4;
|
||||
gpio.mask = (1 << pin->ucGpioPinBitShift);
|
||||
gpio.valid = true;
|
||||
break;
|
||||
for (i = 0; i < num_indices; i++) {
|
||||
pin = &gpio_info->asGPIO_Pin[i];
|
||||
if (id == pin->ucGPIO_ID) {
|
||||
gpio.id = pin->ucGPIO_ID;
|
||||
gpio.reg = pin->usGpioPin_AIndex * 4;
|
||||
gpio.mask = (1 << pin->ucGpioPinBitShift);
|
||||
gpio.valid = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,6 +264,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
||||
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
|
||||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
|
||||
return false;
|
||||
if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
|
||||
*line_mux = 0x90;
|
||||
}
|
||||
|
||||
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
|
||||
@ -395,9 +397,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
||||
struct radeon_gpio_rec gpio;
|
||||
struct radeon_hpd hpd;
|
||||
|
||||
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
|
||||
|
||||
if (data_offset == 0)
|
||||
if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
|
||||
return false;
|
||||
|
||||
if (crev < 2)
|
||||
@ -449,37 +449,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
|
||||
GetIndexIntoMasterTable(DATA,
|
||||
IntegratedSystemInfo);
|
||||
|
||||
atom_parse_data_header(ctx, index, &size, &frev,
|
||||
&crev, &igp_offset);
|
||||
if (atom_parse_data_header(ctx, index, &size, &frev,
|
||||
&crev, &igp_offset)) {
|
||||
|
||||
if (crev >= 2) {
|
||||
igp_obj =
|
||||
(ATOM_INTEGRATED_SYSTEM_INFO_V2
|
||||
*) (ctx->bios + igp_offset);
|
||||
if (crev >= 2) {
|
||||
igp_obj =
|
||||
(ATOM_INTEGRATED_SYSTEM_INFO_V2
|
||||
*) (ctx->bios + igp_offset);
|
||||
|
||||
if (igp_obj) {
|
||||
uint32_t slot_config, ct;
|
||||
if (igp_obj) {
|
||||
uint32_t slot_config, ct;
|
||||
|
||||
if (con_obj_num == 1)
|
||||
slot_config =
|
||||
igp_obj->
|
||||
ulDDISlot1Config;
|
||||
else
|
||||
slot_config =
|
||||
igp_obj->
|
||||
ulDDISlot2Config;
|
||||
if (con_obj_num == 1)
|
||||
slot_config =
|
||||
igp_obj->
|
||||
ulDDISlot1Config;
|
||||
else
|
||||
slot_config =
|
||||
igp_obj->
|
||||
ulDDISlot2Config;
|
||||
|
||||
ct = (slot_config >> 16) & 0xff;
|
||||
connector_type =
|
||||
object_connector_convert
|
||||
[ct];
|
||||
connector_object_id = ct;
|
||||
igp_lane_info =
|
||||
slot_config & 0xffff;
|
||||
ct = (slot_config >> 16) & 0xff;
|
||||
connector_type =
|
||||
object_connector_convert
|
||||
[ct];
|
||||
connector_object_id = ct;
|
||||
igp_lane_info =
|
||||
slot_config & 0xffff;
|
||||
} else
|
||||
continue;
|
||||
} else
|
||||
continue;
|
||||
} else
|
||||
continue;
|
||||
} else {
|
||||
igp_lane_info = 0;
|
||||
connector_type =
|
||||
object_connector_convert[con_obj_id];
|
||||
connector_object_id = con_obj_id;
|
||||
}
|
||||
} else {
|
||||
igp_lane_info = 0;
|
||||
connector_type =
|
||||
@ -627,20 +633,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
|
||||
uint8_t frev, crev;
|
||||
ATOM_XTMDS_INFO *xtmds;
|
||||
|
||||
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
|
||||
xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
|
||||
if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
|
||||
xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
|
||||
|
||||
if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII)
|
||||
return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
|
||||
else
|
||||
return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
|
||||
} else {
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII)
|
||||
return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
|
||||
else
|
||||
return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
|
||||
}
|
||||
if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII)
|
||||
return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
|
||||
else
|
||||
return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
|
||||
} else {
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII)
|
||||
return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
|
||||
else
|
||||
return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
|
||||
}
|
||||
} else
|
||||
return supported_devices_connector_object_id_convert
|
||||
[connector_type];
|
||||
} else {
|
||||
return supported_devices_connector_object_id_convert
|
||||
[connector_type];
|
||||
@ -672,7 +681,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
||||
int i, j, max_device;
|
||||
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
|
||||
|
||||
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
|
||||
if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
|
||||
return false;
|
||||
|
||||
supported_devices =
|
||||
(union atom_supported_devices *)(ctx->bios + data_offset);
|
||||
@ -865,14 +875,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
struct radeon_pll *mpll = &rdev->clock.mpll;
|
||||
uint16_t data_offset;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
|
||||
&crev, &data_offset);
|
||||
|
||||
firmware_info =
|
||||
(union firmware_info *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
if (firmware_info) {
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
firmware_info =
|
||||
(union firmware_info *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
/* pixel clocks */
|
||||
p1pll->reference_freq =
|
||||
le16_to_cpu(firmware_info->info.usReferenceClock);
|
||||
@ -887,6 +894,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
p1pll->pll_out_max =
|
||||
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
|
||||
|
||||
if (crev >= 4) {
|
||||
p1pll->lcd_pll_out_min =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
|
||||
if (p1pll->lcd_pll_out_min == 0)
|
||||
p1pll->lcd_pll_out_min = p1pll->pll_out_min;
|
||||
p1pll->lcd_pll_out_max =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
|
||||
if (p1pll->lcd_pll_out_max == 0)
|
||||
p1pll->lcd_pll_out_max = p1pll->pll_out_max;
|
||||
} else {
|
||||
p1pll->lcd_pll_out_min = p1pll->pll_out_min;
|
||||
p1pll->lcd_pll_out_max = p1pll->pll_out_max;
|
||||
}
|
||||
|
||||
if (p1pll->pll_out_min == 0) {
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
p1pll->pll_out_min = 64800;
|
||||
@ -992,13 +1013,10 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
|
||||
u8 frev, crev;
|
||||
u16 data_offset;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
|
||||
&crev, &data_offset);
|
||||
|
||||
igp_info = (union igp_info *)(mode_info->atom_context->bios +
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
igp_info = (union igp_info *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
if (igp_info) {
|
||||
switch (crev) {
|
||||
case 1:
|
||||
if (igp_info->info.ucMemoryType & 0xf0)
|
||||
@ -1029,14 +1047,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
|
||||
uint16_t maxfreq;
|
||||
int i;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
|
||||
&crev, &data_offset);
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
tmds_info =
|
||||
(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
tmds_info =
|
||||
(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
|
||||
if (tmds_info) {
|
||||
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
|
||||
for (i = 0; i < 4; i++) {
|
||||
tmds->tmds_pll[i].freq =
|
||||
@ -1085,13 +1101,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
|
||||
if (id > ATOM_MAX_SS_ENTRY)
|
||||
return NULL;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
|
||||
&crev, &data_offset);
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
ss_info =
|
||||
(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
ss_info =
|
||||
(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
if (ss_info) {
|
||||
ss =
|
||||
kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
|
||||
|
||||
@ -1114,30 +1128,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
|
||||
return ss;
|
||||
}
|
||||
|
||||
static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
|
||||
struct radeon_encoder_atom_dig *lvds)
|
||||
{
|
||||
|
||||
/* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
|
||||
if ((dev->pdev->device == 0x95c4) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1179) &&
|
||||
(dev->pdev->subsystem_device == 0xff50)) {
|
||||
if ((lvds->native_mode.hdisplay == 1280) &&
|
||||
(lvds->native_mode.vdisplay == 800))
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
/* Dell Studio 15 laptop panel doesn't like new pll divider algo */
|
||||
if ((dev->pdev->device == 0x95c4) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(dev->pdev->subsystem_device == 0x029f)) {
|
||||
if ((lvds->native_mode.hdisplay == 1280) &&
|
||||
(lvds->native_mode.vdisplay == 800))
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
union lvds_info {
|
||||
struct _ATOM_LVDS_INFO info;
|
||||
struct _ATOM_LVDS_INFO_V12 info_12;
|
||||
@ -1156,13 +1146,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
|
||||
uint8_t frev, crev;
|
||||
struct radeon_encoder_atom_dig *lvds = NULL;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
|
||||
&crev, &data_offset);
|
||||
|
||||
lvds_info =
|
||||
(union lvds_info *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
if (lvds_info) {
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
lvds_info =
|
||||
(union lvds_info *)(mode_info->atom_context->bios + data_offset);
|
||||
lvds =
|
||||
kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
|
||||
|
||||
@ -1220,9 +1207,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
|
||||
lvds->pll_algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
|
||||
/* LVDS quirks */
|
||||
radeon_atom_apply_lvds_quirks(dev, lvds);
|
||||
|
||||
encoder->native_mode = lvds->native_mode;
|
||||
}
|
||||
return lvds;
|
||||
@ -1241,11 +1225,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
|
||||
uint8_t bg, dac;
|
||||
struct radeon_encoder_primary_dac *p_dac = NULL;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
dac_info = (struct _COMPASSIONATE_DATA *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
if (dac_info) {
|
||||
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
|
||||
|
||||
if (!p_dac)
|
||||
@ -1270,7 +1254,9 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
|
||||
u8 frev, crev;
|
||||
u16 data_offset, misc;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
|
||||
if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
|
||||
&frev, &crev, &data_offset))
|
||||
return false;
|
||||
|
||||
switch (crev) {
|
||||
case 1:
|
||||
@ -1362,47 +1348,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
|
||||
struct _ATOM_ANALOG_TV_INFO *tv_info;
|
||||
enum radeon_tv_std tv_std = TV_STD_NTSC;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
|
||||
tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
|
||||
tv_info = (struct _ATOM_ANALOG_TV_INFO *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
switch (tv_info->ucTV_BootUpDefaultStandard) {
|
||||
case ATOM_TV_NTSC:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Default TV standard: NTSC\n");
|
||||
break;
|
||||
case ATOM_TV_NTSCJ:
|
||||
tv_std = TV_STD_NTSC_J;
|
||||
DRM_INFO("Default TV standard: NTSC-J\n");
|
||||
break;
|
||||
case ATOM_TV_PAL:
|
||||
tv_std = TV_STD_PAL;
|
||||
DRM_INFO("Default TV standard: PAL\n");
|
||||
break;
|
||||
case ATOM_TV_PALM:
|
||||
tv_std = TV_STD_PAL_M;
|
||||
DRM_INFO("Default TV standard: PAL-M\n");
|
||||
break;
|
||||
case ATOM_TV_PALN:
|
||||
tv_std = TV_STD_PAL_N;
|
||||
DRM_INFO("Default TV standard: PAL-N\n");
|
||||
break;
|
||||
case ATOM_TV_PALCN:
|
||||
tv_std = TV_STD_PAL_CN;
|
||||
DRM_INFO("Default TV standard: PAL-CN\n");
|
||||
break;
|
||||
case ATOM_TV_PAL60:
|
||||
tv_std = TV_STD_PAL_60;
|
||||
DRM_INFO("Default TV standard: PAL-60\n");
|
||||
break;
|
||||
case ATOM_TV_SECAM:
|
||||
tv_std = TV_STD_SECAM;
|
||||
DRM_INFO("Default TV standard: SECAM\n");
|
||||
break;
|
||||
default:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
|
||||
break;
|
||||
switch (tv_info->ucTV_BootUpDefaultStandard) {
|
||||
case ATOM_TV_NTSC:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Default TV standard: NTSC\n");
|
||||
break;
|
||||
case ATOM_TV_NTSCJ:
|
||||
tv_std = TV_STD_NTSC_J;
|
||||
DRM_INFO("Default TV standard: NTSC-J\n");
|
||||
break;
|
||||
case ATOM_TV_PAL:
|
||||
tv_std = TV_STD_PAL;
|
||||
DRM_INFO("Default TV standard: PAL\n");
|
||||
break;
|
||||
case ATOM_TV_PALM:
|
||||
tv_std = TV_STD_PAL_M;
|
||||
DRM_INFO("Default TV standard: PAL-M\n");
|
||||
break;
|
||||
case ATOM_TV_PALN:
|
||||
tv_std = TV_STD_PAL_N;
|
||||
DRM_INFO("Default TV standard: PAL-N\n");
|
||||
break;
|
||||
case ATOM_TV_PALCN:
|
||||
tv_std = TV_STD_PAL_CN;
|
||||
DRM_INFO("Default TV standard: PAL-CN\n");
|
||||
break;
|
||||
case ATOM_TV_PAL60:
|
||||
tv_std = TV_STD_PAL_60;
|
||||
DRM_INFO("Default TV standard: PAL-60\n");
|
||||
break;
|
||||
case ATOM_TV_SECAM:
|
||||
tv_std = TV_STD_SECAM;
|
||||
DRM_INFO("Default TV standard: SECAM\n");
|
||||
break;
|
||||
default:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
return tv_std;
|
||||
}
|
||||
@ -1420,11 +1409,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
uint8_t bg, dac;
|
||||
struct radeon_encoder_tv_dac *tv_dac = NULL;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
|
||||
dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
|
||||
dac_info = (struct _COMPASSIONATE_DATA *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
if (dac_info) {
|
||||
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
|
||||
|
||||
if (!tv_dac)
|
||||
@ -1447,6 +1437,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
return tv_dac;
|
||||
}
|
||||
|
||||
static const char *thermal_controller_names[] = {
|
||||
"NONE",
|
||||
"LM63",
|
||||
"ADM1032",
|
||||
"ADM1030",
|
||||
"MUA6649",
|
||||
"LM64",
|
||||
"F75375",
|
||||
"ASC7512",
|
||||
};
|
||||
|
||||
static const char *pp_lib_thermal_controller_names[] = {
|
||||
"NONE",
|
||||
"LM63",
|
||||
"ADM1032",
|
||||
"ADM1030",
|
||||
"MUA6649",
|
||||
"LM64",
|
||||
"F75375",
|
||||
"RV6xx",
|
||||
"RV770",
|
||||
"ADT7473",
|
||||
};
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
struct _ATOM_POWERPLAY_INFO_V2 info_2;
|
||||
@ -1466,15 +1480,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
||||
struct _ATOM_PPLIB_STATE *power_state;
|
||||
int num_modes = 0, i, j;
|
||||
int state_index = 0, mode_index = 0;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
|
||||
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
||||
struct radeon_i2c_bus_rec i2c_bus;
|
||||
|
||||
rdev->pm.default_power_state = NULL;
|
||||
|
||||
if (power_info) {
|
||||
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
||||
if (frev < 4) {
|
||||
/* add the i2c bus for thermal/fan chip */
|
||||
if (power_info->info.ucOverdriveThermalController > 0) {
|
||||
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
|
||||
thermal_controller_names[power_info->info.ucOverdriveThermalController],
|
||||
power_info->info.ucOverdriveControllerAddress >> 1);
|
||||
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
|
||||
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
|
||||
}
|
||||
num_modes = power_info->info.ucNumOfPowerModeEntries;
|
||||
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
|
||||
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
|
||||
@ -1684,6 +1705,24 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
} else if (frev == 4) {
|
||||
/* add the i2c bus for thermal/fan chip */
|
||||
/* no support for internal controller yet */
|
||||
if (power_info->info_4.sThermalController.ucType > 0) {
|
||||
if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
|
||||
(power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
|
||||
DRM_INFO("Internal thermal controller %s fan control\n",
|
||||
(power_info->info_4.sThermalController.ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
} else {
|
||||
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
|
||||
pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
|
||||
power_info->info_4.sThermalController.ucI2cAddress >> 1,
|
||||
(power_info->info_4.sThermalController.ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
|
||||
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
|
||||
}
|
||||
}
|
||||
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
|
||||
mode_index = 0;
|
||||
power_state = (struct _ATOM_PPLIB_STATE *)
|
||||
|
@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
|
||||
case CHIP_RS300:
|
||||
switch (ddc_line) {
|
||||
case RADEON_GPIO_DVI_DDC:
|
||||
/* in theory this should be hw capable,
|
||||
* but it doesn't seem to work
|
||||
*/
|
||||
i2c.hw_capable = false;
|
||||
i2c.hw_capable = true;
|
||||
break;
|
||||
default:
|
||||
i2c.hw_capable = false;
|
||||
@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
|
||||
p1pll->reference_div = RBIOS16(pll_info + 0x10);
|
||||
p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
|
||||
p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
|
||||
p1pll->lcd_pll_out_min = p1pll->pll_out_min;
|
||||
p1pll->lcd_pll_out_max = p1pll->pll_out_max;
|
||||
|
||||
if (rev > 9) {
|
||||
p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
|
||||
|
@ -940,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
|
||||
if (radeon_connector->edid)
|
||||
kfree(radeon_connector->edid);
|
||||
if (radeon_dig_connector->dp_i2c_bus)
|
||||
radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
|
||||
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
|
||||
kfree(radeon_connector->con_priv);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
|
@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
||||
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
|
||||
}
|
||||
radeon_bo_list_unreserve(&parser->validated);
|
||||
for (i = 0; i < parser->nrelocs; i++) {
|
||||
if (parser->relocs[i].gobj)
|
||||
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
|
||||
if (parser->relocs != NULL) {
|
||||
for (i = 0; i < parser->nrelocs; i++) {
|
||||
if (parser->relocs[i].gobj)
|
||||
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
|
||||
}
|
||||
}
|
||||
kfree(parser->track);
|
||||
kfree(parser->relocs);
|
||||
@ -243,7 +245,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
}
|
||||
r = radeon_cs_parser_relocs(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to parse relocation !\n");
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to parse relocation %d!\n", r);
|
||||
radeon_cs_parser_fini(&parser, r);
|
||||
mutex_unlock(&rdev->cs_mutex);
|
||||
return r;
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
@ -242,6 +241,36 @@ bool radeon_card_posted(struct radeon_device *rdev)
|
||||
|
||||
}
|
||||
|
||||
void radeon_update_bandwidth_info(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u32 sclk, mclk;
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
mclk = rdev->clock.default_mclk;
|
||||
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
rdev->pm.mclk.full = rfixed_const(mclk);
|
||||
rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
|
||||
|
||||
a.full = rfixed_const(16);
|
||||
/* core_bandwidth = sclk(Mhz) * 16 */
|
||||
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
|
||||
} else {
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
mclk = radeon_get_memory_clock(rdev);
|
||||
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
rdev->pm.mclk.full = rfixed_const(mclk);
|
||||
rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
|
||||
}
|
||||
}
|
||||
|
||||
bool radeon_boot_test_post_card(struct radeon_device *rdev)
|
||||
{
|
||||
if (radeon_card_posted(rdev))
|
||||
@ -288,181 +317,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Registers accessors functions.
|
||||
*/
|
||||
uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
{
|
||||
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
|
||||
BUG_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
|
||||
reg, v);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
void radeon_register_accessor_init(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->mc_rreg = &radeon_invalid_rreg;
|
||||
rdev->mc_wreg = &radeon_invalid_wreg;
|
||||
rdev->pll_rreg = &radeon_invalid_rreg;
|
||||
rdev->pll_wreg = &radeon_invalid_wreg;
|
||||
rdev->pciep_rreg = &radeon_invalid_rreg;
|
||||
rdev->pciep_wreg = &radeon_invalid_wreg;
|
||||
|
||||
/* Don't change order as we are overridding accessor. */
|
||||
if (rdev->family < CHIP_RV515) {
|
||||
rdev->pcie_reg_mask = 0xff;
|
||||
} else {
|
||||
rdev->pcie_reg_mask = 0x7ff;
|
||||
}
|
||||
/* FIXME: not sure here */
|
||||
if (rdev->family <= CHIP_R580) {
|
||||
rdev->pll_rreg = &r100_pll_rreg;
|
||||
rdev->pll_wreg = &r100_pll_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_R420) {
|
||||
rdev->mc_rreg = &r420_mc_rreg;
|
||||
rdev->mc_wreg = &r420_mc_wreg;
|
||||
}
|
||||
if (rdev->family >= CHIP_RV515) {
|
||||
rdev->mc_rreg = &rv515_mc_rreg;
|
||||
rdev->mc_wreg = &rv515_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
|
||||
rdev->mc_rreg = &rs400_mc_rreg;
|
||||
rdev->mc_wreg = &rs400_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||
rdev->mc_rreg = &rs690_mc_rreg;
|
||||
rdev->mc_wreg = &rs690_mc_wreg;
|
||||
}
|
||||
if (rdev->family == CHIP_RS600) {
|
||||
rdev->mc_rreg = &rs600_mc_rreg;
|
||||
rdev->mc_wreg = &rs600_mc_wreg;
|
||||
}
|
||||
if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
|
||||
rdev->pciep_rreg = &r600_pciep_rreg;
|
||||
rdev->pciep_wreg = &r600_pciep_wreg;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ASIC
|
||||
*/
|
||||
int radeon_asic_init(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_register_accessor_init(rdev);
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
case CHIP_RV100:
|
||||
case CHIP_RS100:
|
||||
case CHIP_RV200:
|
||||
case CHIP_RS200:
|
||||
rdev->asic = &r100_asic;
|
||||
break;
|
||||
case CHIP_R200:
|
||||
case CHIP_RV250:
|
||||
case CHIP_RS300:
|
||||
case CHIP_RV280:
|
||||
rdev->asic = &r200_asic;
|
||||
break;
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
case CHIP_RV380:
|
||||
if (rdev->flags & RADEON_IS_PCIE)
|
||||
rdev->asic = &r300_asic_pcie;
|
||||
else
|
||||
rdev->asic = &r300_asic;
|
||||
break;
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
rdev->asic = &r420_asic;
|
||||
break;
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
rdev->asic = &rs400_asic;
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
rdev->asic = &rs600_asic;
|
||||
break;
|
||||
case CHIP_RS690:
|
||||
case CHIP_RS740:
|
||||
rdev->asic = &rs690_asic;
|
||||
break;
|
||||
case CHIP_RV515:
|
||||
rdev->asic = &rv515_asic;
|
||||
break;
|
||||
case CHIP_R520:
|
||||
case CHIP_RV530:
|
||||
case CHIP_RV560:
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
rdev->asic = &r520_asic;
|
||||
break;
|
||||
case CHIP_R600:
|
||||
case CHIP_RV610:
|
||||
case CHIP_RV630:
|
||||
case CHIP_RV620:
|
||||
case CHIP_RV635:
|
||||
case CHIP_RV670:
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
rdev->asic = &r600_asic;
|
||||
break;
|
||||
case CHIP_RV770:
|
||||
case CHIP_RV730:
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV740:
|
||||
rdev->asic = &rv770_asic;
|
||||
break;
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
case CHIP_JUNIPER:
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
rdev->asic = &evergreen_asic;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
rdev->asic->get_memory_clock = NULL;
|
||||
rdev->asic->set_memory_clock = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Wrapper around modesetting bits.
|
||||
*/
|
||||
int radeon_clocks_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_static_clocks_init(rdev->ddev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("Clocks initialized !\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_clocks_fini(struct radeon_device *rdev)
|
||||
{
|
||||
}
|
||||
|
||||
/* ATOM accessor methods */
|
||||
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
|
||||
{
|
||||
@ -567,29 +421,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
|
||||
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
||||
}
|
||||
|
||||
void radeon_agp_disable(struct radeon_device *rdev)
|
||||
{
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
if (rdev->family >= CHIP_R600) {
|
||||
DRM_INFO("Forcing AGP to PCIE mode\n");
|
||||
rdev->flags |= RADEON_IS_PCIE;
|
||||
} else if (rdev->family >= CHIP_RV515 ||
|
||||
rdev->family == CHIP_RV380 ||
|
||||
rdev->family == CHIP_RV410 ||
|
||||
rdev->family == CHIP_R423) {
|
||||
DRM_INFO("Forcing AGP to PCIE mode\n");
|
||||
rdev->flags |= RADEON_IS_PCIE;
|
||||
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
|
||||
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
|
||||
} else {
|
||||
DRM_INFO("Forcing AGP to PCI mode\n");
|
||||
rdev->flags |= RADEON_IS_PCI;
|
||||
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
|
||||
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
|
||||
}
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
}
|
||||
|
||||
void radeon_check_arguments(struct radeon_device *rdev)
|
||||
{
|
||||
/* vramlimit must be a power of two */
|
||||
@ -731,6 +562,14 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
return r;
|
||||
radeon_check_arguments(rdev);
|
||||
|
||||
/* all of the newer IGP chips have an internal gart
|
||||
* However some rs4xx report as AGP, so remove that here.
|
||||
*/
|
||||
if ((rdev->family >= CHIP_RS400) &&
|
||||
(rdev->flags & RADEON_IS_IGP)) {
|
||||
rdev->flags &= ~RADEON_IS_AGP;
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
|
||||
radeon_agp_disable(rdev);
|
||||
}
|
||||
|
@ -368,10 +368,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||
|
||||
if (rdev->bios) {
|
||||
if (rdev->is_atom_bios) {
|
||||
if (rdev->family >= CHIP_R600)
|
||||
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
|
||||
if (ret == false)
|
||||
ret = radeon_get_atom_connector_info_from_object_table(dev);
|
||||
else
|
||||
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
|
||||
} else {
|
||||
ret = radeon_get_legacy_connector_info_from_bios(dev);
|
||||
if (ret == false)
|
||||
@ -469,10 +468,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
|
||||
uint32_t best_error = 0xffffffff;
|
||||
uint32_t best_vco_diff = 1;
|
||||
uint32_t post_div;
|
||||
u32 pll_out_min, pll_out_max;
|
||||
|
||||
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
|
||||
freq = freq * 1000;
|
||||
|
||||
if (pll->flags & RADEON_PLL_IS_LCD) {
|
||||
pll_out_min = pll->lcd_pll_out_min;
|
||||
pll_out_max = pll->lcd_pll_out_max;
|
||||
} else {
|
||||
pll_out_min = pll->pll_out_min;
|
||||
pll_out_max = pll->pll_out_max;
|
||||
}
|
||||
|
||||
if (pll->flags & RADEON_PLL_USE_REF_DIV)
|
||||
min_ref_div = max_ref_div = pll->reference_div;
|
||||
else {
|
||||
@ -536,10 +544,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
|
||||
tmp = (uint64_t)pll->reference_freq * feedback_div;
|
||||
vco = radeon_div(tmp, ref_div);
|
||||
|
||||
if (vco < pll->pll_out_min) {
|
||||
if (vco < pll_out_min) {
|
||||
min_feed_div = feedback_div + 1;
|
||||
continue;
|
||||
} else if (vco > pll->pll_out_max) {
|
||||
} else if (vco > pll_out_max) {
|
||||
max_feed_div = feedback_div;
|
||||
continue;
|
||||
}
|
||||
@ -675,6 +683,15 @@ calc_fb_ref_div(struct radeon_pll *pll,
|
||||
{
|
||||
fixed20_12 ffreq, max_error, error, pll_out, a;
|
||||
u32 vco;
|
||||
u32 pll_out_min, pll_out_max;
|
||||
|
||||
if (pll->flags & RADEON_PLL_IS_LCD) {
|
||||
pll_out_min = pll->lcd_pll_out_min;
|
||||
pll_out_max = pll->lcd_pll_out_max;
|
||||
} else {
|
||||
pll_out_min = pll->pll_out_min;
|
||||
pll_out_max = pll->pll_out_max;
|
||||
}
|
||||
|
||||
ffreq.full = rfixed_const(freq);
|
||||
/* max_error = ffreq * 0.0025; */
|
||||
@ -686,7 +703,7 @@ calc_fb_ref_div(struct radeon_pll *pll,
|
||||
vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
|
||||
vco = vco / ((*ref_div) * 10);
|
||||
|
||||
if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
|
||||
if ((vco < pll_out_min) || (vco > pll_out_max))
|
||||
continue;
|
||||
|
||||
/* pll_out = vco / post_div; */
|
||||
@ -714,6 +731,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
|
||||
{
|
||||
u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
|
||||
u32 best_freq = 0, vco_frequency;
|
||||
u32 pll_out_min, pll_out_max;
|
||||
|
||||
if (pll->flags & RADEON_PLL_IS_LCD) {
|
||||
pll_out_min = pll->lcd_pll_out_min;
|
||||
pll_out_max = pll->lcd_pll_out_max;
|
||||
} else {
|
||||
pll_out_min = pll->pll_out_min;
|
||||
pll_out_max = pll->pll_out_max;
|
||||
}
|
||||
|
||||
/* freq = freq / 10; */
|
||||
do_div(freq, 10);
|
||||
@ -724,7 +750,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
|
||||
goto done;
|
||||
|
||||
vco_frequency = freq * post_div;
|
||||
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
|
||||
if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
|
||||
goto done;
|
||||
|
||||
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
|
||||
@ -749,7 +775,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
|
||||
continue;
|
||||
|
||||
vco_frequency = freq * post_div;
|
||||
if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
|
||||
if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
|
||||
continue;
|
||||
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
|
||||
ref_div = pll->reference_div;
|
||||
@ -945,6 +971,23 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_update_display_priority(struct radeon_device *rdev)
|
||||
{
|
||||
/* adjustment options for the display watermarks */
|
||||
if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
|
||||
/* set display priority to high for r3xx, rv515 chips
|
||||
* this avoids flickering due to underflow to the
|
||||
* display controllers during heavy acceleration.
|
||||
*/
|
||||
if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
|
||||
rdev->disp_priority = 2;
|
||||
else
|
||||
rdev->disp_priority = 0;
|
||||
} else
|
||||
rdev->disp_priority = radeon_disp_priority;
|
||||
|
||||
}
|
||||
|
||||
int radeon_modeset_init(struct radeon_device *rdev)
|
||||
{
|
||||
int i;
|
||||
@ -976,15 +1019,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
|
||||
radeon_combios_check_hardcoded_edid(rdev);
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC)
|
||||
rdev->num_crtc = 1;
|
||||
else {
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
rdev->num_crtc = 6;
|
||||
else
|
||||
rdev->num_crtc = 2;
|
||||
}
|
||||
|
||||
/* allocate crtcs */
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
radeon_crtc_init(rdev->ddev, i);
|
||||
|
@ -42,9 +42,10 @@
|
||||
* KMS wrapper.
|
||||
* - 2.0.0 - initial interface
|
||||
* - 2.1.0 - add square tiling interface
|
||||
* - 2.2.0 - add r6xx/r7xx const buffer support
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 1
|
||||
#define KMS_DRIVER_MINOR 2
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
@ -91,6 +92,8 @@ int radeon_tv = 1;
|
||||
int radeon_new_pll = -1;
|
||||
int radeon_dynpm = -1;
|
||||
int radeon_audio = 1;
|
||||
int radeon_disp_priority = 0;
|
||||
int radeon_hw_i2c = 0;
|
||||
|
||||
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
|
||||
module_param_named(no_wb, radeon_no_wb, int, 0444);
|
||||
@ -134,6 +137,12 @@ module_param_named(dynpm, radeon_dynpm, int, 0444);
|
||||
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
|
||||
module_param_named(audio, radeon_audio, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
|
||||
module_param_named(disp_priority, radeon_disp_priority, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
|
||||
module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
|
||||
|
||||
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -107,9 +107,10 @@
|
||||
* 1.30- Add support for occlusion queries
|
||||
* 1.31- Add support for num Z pipes from GET_PARAM
|
||||
* 1.32- fixes for rv740 setup
|
||||
* 1.33- Add r6xx/r7xx const buffer support
|
||||
*/
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 32
|
||||
#define DRIVER_MINOR 33
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
enum radeon_cp_microcode_version {
|
||||
|
@ -302,7 +302,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
if (ASIC_IS_DCE3(rdev) &&
|
||||
(radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
|
||||
(radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
radeon_dp_set_link_config(connector, mode);
|
||||
}
|
||||
@ -519,7 +519,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
|
||||
break;
|
||||
}
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
|
||||
return;
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
@ -593,7 +594,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
|
||||
}
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
r600_hdmi_enable(encoder, hdmi_detected);
|
||||
}
|
||||
|
||||
int
|
||||
@ -708,7 +708,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_get_atom_connector_priv_from_encoder(encoder);
|
||||
union dig_encoder_control args;
|
||||
int index = 0, num = 0;
|
||||
int index = 0;
|
||||
uint8_t frev, crev;
|
||||
|
||||
if (!dig || !dig_connector)
|
||||
@ -724,9 +724,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
|
||||
else
|
||||
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
|
||||
}
|
||||
num = dig->dig_encoder + 1;
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
|
||||
return;
|
||||
|
||||
args.v1.ucAction = action;
|
||||
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
|
||||
@ -785,7 +785,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
struct drm_connector *connector;
|
||||
struct radeon_connector *radeon_connector;
|
||||
union dig_transmitter_control args;
|
||||
int index = 0, num = 0;
|
||||
int index = 0;
|
||||
uint8_t frev, crev;
|
||||
bool is_dp = false;
|
||||
int pll_id = 0;
|
||||
@ -814,7 +814,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
}
|
||||
}
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
|
||||
return;
|
||||
|
||||
args.v1.ucAction = action;
|
||||
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
|
||||
@ -860,15 +861,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
args.v3.acConfig.ucTransmitterSel = 0;
|
||||
num = 0;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
args.v3.acConfig.ucTransmitterSel = 1;
|
||||
num = 1;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
args.v3.acConfig.ucTransmitterSel = 2;
|
||||
num = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -879,23 +877,19 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
args.v3.acConfig.fCoherentMode = 1;
|
||||
}
|
||||
} else if (ASIC_IS_DCE32(rdev)) {
|
||||
if (dig->dig_encoder == 1)
|
||||
args.v2.acConfig.ucEncoderSel = 1;
|
||||
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
|
||||
if (dig_connector->linkb)
|
||||
args.v2.acConfig.ucLinkSel = 1;
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
args.v2.acConfig.ucTransmitterSel = 0;
|
||||
num = 0;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
args.v2.acConfig.ucTransmitterSel = 1;
|
||||
num = 1;
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
args.v2.acConfig.ucTransmitterSel = 2;
|
||||
num = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -913,31 +907,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
else
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
if (radeon_encoder->pixel_clock > 165000) {
|
||||
if (dig_connector->igp_lane_info & 0x3)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
|
||||
else if (dig_connector->igp_lane_info & 0xc)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
|
||||
} else {
|
||||
if (dig_connector->igp_lane_info & 0x1)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
|
||||
else if (dig_connector->igp_lane_info & 0x2)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
|
||||
else if (dig_connector->igp_lane_info & 0x4)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
|
||||
else if (dig_connector->igp_lane_info & 0x8)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
|
||||
}
|
||||
if ((rdev->flags & RADEON_IS_IGP) &&
|
||||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
|
||||
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
|
||||
if (dig_connector->igp_lane_info & 0x1)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
|
||||
else if (dig_connector->igp_lane_info & 0x2)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
|
||||
else if (dig_connector->igp_lane_info & 0x4)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
|
||||
else if (dig_connector->igp_lane_info & 0x8)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
|
||||
} else {
|
||||
if (dig_connector->igp_lane_info & 0x3)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
|
||||
else if (dig_connector->igp_lane_info & 0xc)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (radeon_encoder->pixel_clock > 165000)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
|
||||
|
||||
if (dig_connector->linkb)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
|
||||
else
|
||||
@ -948,6 +936,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
||||
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
|
||||
if (dig->coherent_mode)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
|
||||
if (radeon_encoder->pixel_clock > 165000)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1054,16 +1044,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
|
||||
if (is_dig) {
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
|
||||
{
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
|
||||
dp_link_train(encoder, connector);
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
|
||||
}
|
||||
if (!ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
|
||||
if (!ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
@ -1104,7 +1103,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
|
||||
return;
|
||||
|
||||
switch (frev) {
|
||||
case 1:
|
||||
@ -1216,6 +1216,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
/* update scratch regs with new routing */
|
||||
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1326,19 +1329,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
|
||||
|
||||
if (radeon_encoder->active_device &
|
||||
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
if (dig)
|
||||
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
|
||||
}
|
||||
radeon_encoder->pixel_clock = adjusted_mode->clock;
|
||||
|
||||
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
|
||||
atombios_set_encoder_crtc_source(encoder);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
|
||||
atombios_yuv_setup(encoder, true);
|
||||
@ -1396,9 +1389,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
atombios_apply_encoder_quirks(encoder, adjusted_mode);
|
||||
|
||||
/* XXX */
|
||||
if (!ASIC_IS_DCE4(rdev))
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
|
||||
r600_hdmi_enable(encoder);
|
||||
r600_hdmi_setmode(encoder, adjusted_mode);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -1418,7 +1412,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
|
||||
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
|
||||
return false;
|
||||
|
||||
args.sDacload.ucMisc = 0;
|
||||
|
||||
@ -1492,8 +1487,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
|
||||
|
||||
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
if (radeon_encoder->active_device &
|
||||
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
if (dig)
|
||||
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
|
||||
}
|
||||
|
||||
radeon_atom_output_lock(encoder, true);
|
||||
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
|
||||
/* this is needed for the pll/ss setup to work correctly in some cases */
|
||||
atombios_set_encoder_crtc_source(encoder);
|
||||
}
|
||||
|
||||
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
|
||||
@ -1509,6 +1516,8 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
|
||||
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
|
||||
if (radeon_encoder_is_digital(encoder)) {
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
|
||||
r600_hdmi_disable(encoder);
|
||||
dig = radeon_encoder->enc_priv;
|
||||
dig->dig_encoder = -1;
|
||||
}
|
||||
@ -1659,6 +1668,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
|
||||
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
|
||||
break;
|
||||
}
|
||||
|
||||
r600_hdmi_init(encoder);
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* bit banging i2c */
|
||||
|
||||
static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
|
||||
{
|
||||
@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data)
|
||||
WREG32(rec->en_data_reg, val);
|
||||
}
|
||||
|
||||
static int pre_xfer(struct i2c_adapter *i2c_adap)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
|
||||
radeon_i2c_do_lock(i2c, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void post_xfer(struct i2c_adapter *i2c_adap)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
|
||||
radeon_i2c_do_lock(i2c, 0);
|
||||
}
|
||||
|
||||
/* hw i2c */
|
||||
|
||||
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_pll *spll = &rdev->clock.spll;
|
||||
u32 sclk = radeon_get_engine_clock(rdev);
|
||||
u32 prescale = 0;
|
||||
u32 n, m;
|
||||
u8 loop;
|
||||
u32 nm;
|
||||
u8 n, m, loop;
|
||||
int i2c_clock;
|
||||
|
||||
switch (rdev->family) {
|
||||
@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
|
||||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_RV350:
|
||||
n = (spll->reference_freq) / (4 * 6);
|
||||
i2c_clock = 60;
|
||||
nm = (sclk * 10) / (i2c_clock * 4);
|
||||
for (loop = 1; loop < 255; loop++) {
|
||||
if ((loop * (loop - 1)) > n)
|
||||
if ((nm / loop) < loop)
|
||||
break;
|
||||
}
|
||||
m = loop - 1;
|
||||
prescale = m | (loop << 8);
|
||||
n = loop - 1;
|
||||
m = loop - 2;
|
||||
prescale = m | (n << 8);
|
||||
break;
|
||||
case CHIP_RV380:
|
||||
case CHIP_RS400:
|
||||
@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
i2c_clock = 50;
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
if (rdev->family == CHIP_R520)
|
||||
prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
|
||||
else
|
||||
@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
prescale = radeon_get_i2c_prescale(rdev);
|
||||
|
||||
reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
|
||||
RADEON_I2C_DRIVE_EN |
|
||||
RADEON_I2C_START |
|
||||
RADEON_I2C_STOP |
|
||||
RADEON_I2C_GO);
|
||||
@ -757,26 +776,13 @@ done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
int ret;
|
||||
|
||||
radeon_i2c_do_lock(i2c, 1);
|
||||
ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
|
||||
radeon_i2c_do_lock(i2c, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
|
||||
struct radeon_device *rdev = i2c->dev->dev_private;
|
||||
struct radeon_i2c_bus_rec *rec = &i2c->rec;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_R100:
|
||||
@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
case CHIP_RV410:
|
||||
case CHIP_RS400:
|
||||
case CHIP_RS480:
|
||||
if (rec->hw_capable)
|
||||
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
else
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_RS600:
|
||||
case CHIP_RS690:
|
||||
case CHIP_RS740:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_RV515:
|
||||
case CHIP_R520:
|
||||
@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
case CHIP_RV560:
|
||||
case CHIP_RV570:
|
||||
case CHIP_R580:
|
||||
if (rec->hw_capable) {
|
||||
if (rec->mm_i2c)
|
||||
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
else
|
||||
ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
} else
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
if (rec->mm_i2c)
|
||||
ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
else
|
||||
ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_R600:
|
||||
case CHIP_RV610:
|
||||
case CHIP_RV630:
|
||||
case CHIP_RV670:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_RV620:
|
||||
case CHIP_RV635:
|
||||
@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
case CHIP_RV710:
|
||||
case CHIP_RV740:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
/* XXX fill in hw i2c implementation */
|
||||
ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("i2c: unhandled radeon chip\n");
|
||||
@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 radeon_i2c_func(struct i2c_adapter *adap)
|
||||
static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm radeon_i2c_algo = {
|
||||
.master_xfer = radeon_i2c_xfer,
|
||||
.functionality = radeon_i2c_func,
|
||||
.master_xfer = radeon_hw_i2c_xfer,
|
||||
.functionality = radeon_hw_i2c_func,
|
||||
};
|
||||
|
||||
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
struct radeon_i2c_bus_rec *rec,
|
||||
const char *name)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_i2c_chan *i2c;
|
||||
int ret;
|
||||
|
||||
@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
if (i2c == NULL)
|
||||
return NULL;
|
||||
|
||||
/* set the internal bit adapter */
|
||||
i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
|
||||
i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
|
||||
sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
|
||||
i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
|
||||
i2c->algo.radeon.bit_data.setsda = set_data;
|
||||
i2c->algo.radeon.bit_data.setscl = set_clock;
|
||||
i2c->algo.radeon.bit_data.getsda = get_data;
|
||||
i2c->algo.radeon.bit_data.getscl = get_clock;
|
||||
i2c->algo.radeon.bit_data.udelay = 20;
|
||||
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
|
||||
* make this, 2 jiffies is a lot more reliable */
|
||||
i2c->algo.radeon.bit_data.timeout = 2;
|
||||
i2c->algo.radeon.bit_data.data = i2c;
|
||||
ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register internal bit i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
/* set the radeon i2c adapter */
|
||||
i2c->dev = dev;
|
||||
i2c->rec = *rec;
|
||||
i2c->adapter.owner = THIS_MODULE;
|
||||
i2c->dev = dev;
|
||||
i2c_set_adapdata(&i2c->adapter, i2c);
|
||||
sprintf(i2c->adapter.name, "Radeon i2c %s", name);
|
||||
i2c->adapter.algo_data = &i2c->algo.radeon;
|
||||
i2c->adapter.algo = &radeon_i2c_algo;
|
||||
ret = i2c_add_adapter(&i2c->adapter);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register i2c %s\n", name);
|
||||
goto out_free;
|
||||
if (rec->mm_i2c ||
|
||||
(rec->hw_capable &&
|
||||
radeon_hw_i2c &&
|
||||
((rdev->family <= CHIP_RS480) ||
|
||||
((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
|
||||
/* set the radeon hw i2c adapter */
|
||||
sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
|
||||
i2c->adapter.algo = &radeon_i2c_algo;
|
||||
ret = i2c_add_adapter(&i2c->adapter);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register hw i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
} else {
|
||||
/* set the radeon bit adapter */
|
||||
sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
|
||||
i2c->adapter.algo_data = &i2c->algo.bit;
|
||||
i2c->algo.bit.pre_xfer = pre_xfer;
|
||||
i2c->algo.bit.post_xfer = post_xfer;
|
||||
i2c->algo.bit.setsda = set_data;
|
||||
i2c->algo.bit.setscl = set_clock;
|
||||
i2c->algo.bit.getsda = get_data;
|
||||
i2c->algo.bit.getscl = get_clock;
|
||||
i2c->algo.bit.udelay = 20;
|
||||
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
|
||||
* make this, 2 jiffies is a lot more reliable */
|
||||
i2c->algo.bit.timeout = 2;
|
||||
i2c->algo.bit.data = i2c;
|
||||
ret = i2c_bit_add_bus(&i2c->adapter);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to register bit i2c %s\n", name);
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
return i2c;
|
||||
@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
|
||||
{
|
||||
if (!i2c)
|
||||
return;
|
||||
i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
|
||||
i2c_del_adapter(&i2c->adapter);
|
||||
kfree(i2c);
|
||||
}
|
||||
|
||||
void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
|
||||
{
|
||||
if (!i2c)
|
||||
return;
|
||||
|
||||
i2c_del_adapter(&i2c->adapter);
|
||||
kfree(i2c);
|
||||
}
|
||||
|
@ -67,9 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
rdev->irq.sw_int = false;
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (i = 0; i < rdev->num_crtc; i++)
|
||||
rdev->irq.crtc_vblank_int[i] = false;
|
||||
}
|
||||
for (i = 0; i < 6; i++)
|
||||
rdev->irq.hpd[i] = false;
|
||||
radeon_irq_set(rdev);
|
||||
/* Clear bits */
|
||||
radeon_irq_process(rdev);
|
||||
@ -95,28 +96,29 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
|
||||
}
|
||||
/* Disable *all* interrupts */
|
||||
rdev->irq.sw_int = false;
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (i = 0; i < rdev->num_crtc; i++)
|
||||
rdev->irq.crtc_vblank_int[i] = false;
|
||||
for (i = 0; i < 6; i++)
|
||||
rdev->irq.hpd[i] = false;
|
||||
}
|
||||
radeon_irq_set(rdev);
|
||||
}
|
||||
|
||||
int radeon_irq_kms_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r = 0;
|
||||
int num_crtc = 2;
|
||||
|
||||
if (rdev->flags & RADEON_SINGLE_CRTC)
|
||||
num_crtc = 1;
|
||||
spin_lock_init(&rdev->irq.sw_lock);
|
||||
r = drm_vblank_init(rdev->ddev, num_crtc);
|
||||
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
/* enable msi */
|
||||
rdev->msi_enabled = 0;
|
||||
if (rdev->family >= CHIP_RV380) {
|
||||
/* MSIs don't seem to work reliably on all IGP
|
||||
* chips. Disable MSI on them for now.
|
||||
*/
|
||||
if ((rdev->family >= CHIP_RV380) &&
|
||||
(!(rdev->flags & RADEON_IS_IGP))) {
|
||||
int ret = pci_enable_msi(rdev->pdev);
|
||||
if (!ret) {
|
||||
rdev->msi_enabled = 1;
|
||||
|
@ -603,6 +603,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
|
||||
? RADEON_CRTC2_INTERLACE_EN
|
||||
: 0));
|
||||
|
||||
/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
|
||||
if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
|
||||
crtc2_gen_cntl |= RADEON_CRTC2_EN;
|
||||
|
||||
disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
|
||||
disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
|
||||
|
||||
@ -630,6 +634,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
|
||||
? RADEON_CRTC_INTERLACE_EN
|
||||
: 0));
|
||||
|
||||
/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
|
||||
if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
|
||||
crtc_gen_cntl |= RADEON_CRTC_EN;
|
||||
|
||||
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
|
||||
crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
|
||||
RADEON_CRTC_VSYNC_DIS |
|
||||
|
@ -57,6 +57,10 @@
|
||||
#define NTSC_TV_PLL_N_14 693
|
||||
#define NTSC_TV_PLL_P_14 7
|
||||
|
||||
#define PAL_TV_PLL_M_14 19
|
||||
#define PAL_TV_PLL_N_14 353
|
||||
#define PAL_TV_PLL_P_14 5
|
||||
|
||||
#define VERT_LEAD_IN_LINES 2
|
||||
#define FRAC_BITS 0xe
|
||||
#define FRAC_MASK 0x3fff
|
||||
@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
|
||||
630627, /* defRestart */
|
||||
347, /* crtcPLL_N */
|
||||
14, /* crtcPLL_M */
|
||||
8, /* crtcPLL_postDiv */
|
||||
8, /* crtcPLL_postDiv */
|
||||
1022, /* pixToTV */
|
||||
},
|
||||
{ /* PAL timing for 14 Mhz ref clk */
|
||||
800, /* horResolution */
|
||||
600, /* verResolution */
|
||||
TV_STD_PAL, /* standard */
|
||||
1131, /* horTotal */
|
||||
742, /* verTotal */
|
||||
813, /* horStart */
|
||||
840, /* horSyncStart */
|
||||
633, /* verSyncStart */
|
||||
708369, /* defRestart */
|
||||
211, /* crtcPLL_N */
|
||||
9, /* crtcPLL_M */
|
||||
8, /* crtcPLL_postDiv */
|
||||
759, /* pixToTV */
|
||||
},
|
||||
};
|
||||
|
||||
#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
|
||||
@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
|
||||
if (pll->reference_freq == 2700)
|
||||
const_ptr = &available_tv_modes[1];
|
||||
else
|
||||
const_ptr = &available_tv_modes[1]; /* FIX ME */
|
||||
const_ptr = &available_tv_modes[3];
|
||||
}
|
||||
return const_ptr;
|
||||
}
|
||||
@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
|
||||
n = PAL_TV_PLL_N_27;
|
||||
p = PAL_TV_PLL_P_27;
|
||||
} else {
|
||||
m = PAL_TV_PLL_M_27;
|
||||
n = PAL_TV_PLL_N_27;
|
||||
p = PAL_TV_PLL_P_27;
|
||||
m = PAL_TV_PLL_M_14;
|
||||
n = PAL_TV_PLL_N_14;
|
||||
p = PAL_TV_PLL_P_14;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,6 +129,7 @@ struct radeon_tmds_pll {
|
||||
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
|
||||
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
|
||||
#define RADEON_PLL_USE_POST_DIV (1 << 12)
|
||||
#define RADEON_PLL_IS_LCD (1 << 13)
|
||||
|
||||
/* pll algo */
|
||||
enum radeon_pll_algo {
|
||||
@ -149,6 +150,8 @@ struct radeon_pll {
|
||||
uint32_t pll_in_max;
|
||||
uint32_t pll_out_min;
|
||||
uint32_t pll_out_max;
|
||||
uint32_t lcd_pll_out_min;
|
||||
uint32_t lcd_pll_out_max;
|
||||
uint32_t best_vco;
|
||||
|
||||
/* divider limits */
|
||||
@ -170,17 +173,12 @@ struct radeon_pll {
|
||||
enum radeon_pll_algo algo;
|
||||
};
|
||||
|
||||
struct i2c_algo_radeon_data {
|
||||
struct i2c_adapter bit_adapter;
|
||||
struct i2c_algo_bit_data bit_data;
|
||||
};
|
||||
|
||||
struct radeon_i2c_chan {
|
||||
struct i2c_adapter adapter;
|
||||
struct drm_device *dev;
|
||||
union {
|
||||
struct i2c_algo_bit_data bit;
|
||||
struct i2c_algo_dp_aux_data dp;
|
||||
struct i2c_algo_radeon_data radeon;
|
||||
} algo;
|
||||
struct radeon_i2c_bus_rec rec;
|
||||
};
|
||||
@ -342,6 +340,7 @@ struct radeon_encoder {
|
||||
struct drm_display_mode native_mode;
|
||||
void *enc_priv;
|
||||
int hdmi_offset;
|
||||
int hdmi_config_offset;
|
||||
int hdmi_audio_workaround;
|
||||
int hdmi_buffer_status;
|
||||
};
|
||||
@ -431,7 +430,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
|
||||
struct radeon_i2c_bus_rec *rec,
|
||||
const char *name);
|
||||
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
|
||||
extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
|
||||
extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
|
||||
u8 slave_addr,
|
||||
u8 addr,
|
||||
|
@ -185,8 +185,10 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
return 0;
|
||||
}
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* force to pin into visible video ram */
|
||||
bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
if (domain == RADEON_GEM_DOMAIN_VRAM) {
|
||||
/* force to pin into visible video ram */
|
||||
bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
}
|
||||
for (i = 0; i < bo->placement.num_placement; i++)
|
||||
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#define RADEON_RECLOCK_DELAY_MS 200
|
||||
#define RADEON_WAIT_VBLANK_TIMEOUT 200
|
||||
|
||||
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
|
||||
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
|
||||
static void radeon_pm_set_clocks(struct radeon_device *rdev);
|
||||
static void radeon_pm_idle_work_handler(struct work_struct *work);
|
||||
@ -179,6 +180,16 @@ static void radeon_get_power_state(struct radeon_device *rdev,
|
||||
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
|
||||
}
|
||||
|
||||
static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->pm.active_crtcs) {
|
||||
rdev->pm.vblank_sync = false;
|
||||
wait_event_timeout(
|
||||
rdev->irq.vblank_queue, rdev->pm.vblank_sync,
|
||||
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_set_power_state(struct radeon_device *rdev)
|
||||
{
|
||||
/* if *_clock_mode are the same, *_power_state are as well */
|
||||
@ -189,11 +200,28 @@ static void radeon_set_power_state(struct radeon_device *rdev)
|
||||
rdev->pm.requested_clock_mode->sclk,
|
||||
rdev->pm.requested_clock_mode->mclk,
|
||||
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
|
||||
|
||||
/* set pcie lanes */
|
||||
/* TODO */
|
||||
|
||||
/* set voltage */
|
||||
/* TODO */
|
||||
|
||||
/* set engine clock */
|
||||
radeon_sync_with_vblank(rdev);
|
||||
radeon_pm_debug_check_in_vbl(rdev, false);
|
||||
radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
|
||||
radeon_pm_debug_check_in_vbl(rdev, true);
|
||||
|
||||
#if 0
|
||||
/* set memory clock */
|
||||
if (rdev->asic->set_memory_clock) {
|
||||
radeon_sync_with_vblank(rdev);
|
||||
radeon_pm_debug_check_in_vbl(rdev, false);
|
||||
radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
|
||||
radeon_pm_debug_check_in_vbl(rdev, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
rdev->pm.current_power_state = rdev->pm.requested_power_state;
|
||||
rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
|
||||
@ -229,6 +257,12 @@ int radeon_pm_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_pm_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->pm.i2c_bus)
|
||||
radeon_i2c_destroy(rdev->pm.i2c_bus);
|
||||
}
|
||||
|
||||
void radeon_pm_compute_clocks(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *ddev = rdev->ddev;
|
||||
@ -245,7 +279,8 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
|
||||
list_for_each_entry(connector,
|
||||
&ddev->mode_config.connector_list, head) {
|
||||
if (connector->encoder &&
|
||||
connector->dpms != DRM_MODE_DPMS_OFF) {
|
||||
connector->encoder->crtc &&
|
||||
connector->dpms != DRM_MODE_DPMS_OFF) {
|
||||
radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
|
||||
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
|
||||
++count;
|
||||
@ -333,10 +368,7 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
|
||||
break;
|
||||
}
|
||||
|
||||
/* check if we are in vblank */
|
||||
radeon_pm_debug_check_in_vbl(rdev, false);
|
||||
radeon_set_power_state(rdev);
|
||||
radeon_pm_debug_check_in_vbl(rdev, true);
|
||||
rdev->pm.planned_action = PM_ACTION_NONE;
|
||||
}
|
||||
|
||||
@ -353,10 +385,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
||||
rdev->pm.req_vblank |= (1 << 1);
|
||||
drm_vblank_get(rdev->ddev, 1);
|
||||
}
|
||||
if (rdev->pm.active_crtcs)
|
||||
wait_event_interruptible_timeout(
|
||||
rdev->irq.vblank_queue, 0,
|
||||
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
|
||||
radeon_pm_set_clocks_locked(rdev);
|
||||
if (rdev->pm.req_vblank & (1 << 0)) {
|
||||
rdev->pm.req_vblank &= ~(1 << 0);
|
||||
drm_vblank_put(rdev->ddev, 0);
|
||||
@ -366,7 +395,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
||||
drm_vblank_put(rdev->ddev, 1);
|
||||
}
|
||||
|
||||
radeon_pm_set_clocks_locked(rdev);
|
||||
mutex_unlock(&rdev->cp.mutex);
|
||||
}
|
||||
|
||||
|
@ -346,6 +346,7 @@
|
||||
# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
|
||||
# define RADEON_TVCLK_TURNOFF (1 << 31)
|
||||
#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
|
||||
# define RADEON_PM_MODE_SEL (1 << 13)
|
||||
# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
|
||||
#define RADEON_CLR_CMP_CLR_3D 0x1a24
|
||||
#define RADEON_CLR_CMP_CLR_DST 0x15c8
|
||||
|
@ -26,20 +26,16 @@ r600 0x9400
|
||||
0x00028408 VGT_INDX_OFFSET
|
||||
0x00028AA0 VGT_INSTANCE_STEP_RATE_0
|
||||
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
|
||||
0x000088C0 VGT_LAST_COPY_STATE
|
||||
0x00028400 VGT_MAX_VTX_INDX
|
||||
0x000088D8 VGT_MC_LAT_CNTL
|
||||
0x00028404 VGT_MIN_VTX_INDX
|
||||
0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
|
||||
0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
|
||||
0x00008970 VGT_NUM_INDICES
|
||||
0x00008974 VGT_NUM_INSTANCES
|
||||
0x00028A10 VGT_OUTPUT_PATH_CNTL
|
||||
0x00028C5C VGT_OUT_DEALLOC_CNTL
|
||||
0x00028A84 VGT_PRIMITIVEID_EN
|
||||
0x00008958 VGT_PRIMITIVE_TYPE
|
||||
0x00028AB4 VGT_REUSE_OFF
|
||||
0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
|
||||
0x00028AB8 VGT_VTX_CNT_EN
|
||||
0x000088B0 VGT_VTX_VECT_EJECT_REG
|
||||
0x00028810 PA_CL_CLIP_CNTL
|
||||
@ -280,7 +276,6 @@ r600 0x9400
|
||||
0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
|
||||
0x00028814 PA_SU_SC_MODE_CNTL
|
||||
0x00028C08 PA_SU_VTX_CNTL
|
||||
0x00008C00 SQ_CONFIG
|
||||
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
|
||||
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
|
||||
0x00008C10 SQ_STACK_RESOURCE_MGMT_1
|
||||
@ -320,18 +315,6 @@ r600 0x9400
|
||||
0x000283FC SQ_VTX_SEMANTIC_31
|
||||
0x000288E0 SQ_VTX_SEMANTIC_CLEAR
|
||||
0x0003CFF4 SQ_VTX_START_INST_LOC
|
||||
0x0003C000 SQ_TEX_SAMPLER_WORD0_0
|
||||
0x0003C004 SQ_TEX_SAMPLER_WORD1_0
|
||||
0x0003C008 SQ_TEX_SAMPLER_WORD2_0
|
||||
0x00030000 SQ_ALU_CONSTANT0_0
|
||||
0x00030004 SQ_ALU_CONSTANT1_0
|
||||
0x00030008 SQ_ALU_CONSTANT2_0
|
||||
0x0003000C SQ_ALU_CONSTANT3_0
|
||||
0x0003E380 SQ_BOOL_CONST_0
|
||||
0x0003E384 SQ_BOOL_CONST_1
|
||||
0x0003E388 SQ_BOOL_CONST_2
|
||||
0x0003E200 SQ_LOOP_CONST_0
|
||||
0x0003E200 SQ_LOOP_CONST_DX10_0
|
||||
0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
|
||||
0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
|
||||
0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
|
||||
@ -380,54 +363,6 @@ r600 0x9400
|
||||
0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
|
||||
0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
|
||||
0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
|
||||
0x000289C0 SQ_ALU_CONST_CACHE_GS_0
|
||||
0x000289C4 SQ_ALU_CONST_CACHE_GS_1
|
||||
0x000289C8 SQ_ALU_CONST_CACHE_GS_2
|
||||
0x000289CC SQ_ALU_CONST_CACHE_GS_3
|
||||
0x000289D0 SQ_ALU_CONST_CACHE_GS_4
|
||||
0x000289D4 SQ_ALU_CONST_CACHE_GS_5
|
||||
0x000289D8 SQ_ALU_CONST_CACHE_GS_6
|
||||
0x000289DC SQ_ALU_CONST_CACHE_GS_7
|
||||
0x000289E0 SQ_ALU_CONST_CACHE_GS_8
|
||||
0x000289E4 SQ_ALU_CONST_CACHE_GS_9
|
||||
0x000289E8 SQ_ALU_CONST_CACHE_GS_10
|
||||
0x000289EC SQ_ALU_CONST_CACHE_GS_11
|
||||
0x000289F0 SQ_ALU_CONST_CACHE_GS_12
|
||||
0x000289F4 SQ_ALU_CONST_CACHE_GS_13
|
||||
0x000289F8 SQ_ALU_CONST_CACHE_GS_14
|
||||
0x000289FC SQ_ALU_CONST_CACHE_GS_15
|
||||
0x00028940 SQ_ALU_CONST_CACHE_PS_0
|
||||
0x00028944 SQ_ALU_CONST_CACHE_PS_1
|
||||
0x00028948 SQ_ALU_CONST_CACHE_PS_2
|
||||
0x0002894C SQ_ALU_CONST_CACHE_PS_3
|
||||
0x00028950 SQ_ALU_CONST_CACHE_PS_4
|
||||
0x00028954 SQ_ALU_CONST_CACHE_PS_5
|
||||
0x00028958 SQ_ALU_CONST_CACHE_PS_6
|
||||
0x0002895C SQ_ALU_CONST_CACHE_PS_7
|
||||
0x00028960 SQ_ALU_CONST_CACHE_PS_8
|
||||
0x00028964 SQ_ALU_CONST_CACHE_PS_9
|
||||
0x00028968 SQ_ALU_CONST_CACHE_PS_10
|
||||
0x0002896C SQ_ALU_CONST_CACHE_PS_11
|
||||
0x00028970 SQ_ALU_CONST_CACHE_PS_12
|
||||
0x00028974 SQ_ALU_CONST_CACHE_PS_13
|
||||
0x00028978 SQ_ALU_CONST_CACHE_PS_14
|
||||
0x0002897C SQ_ALU_CONST_CACHE_PS_15
|
||||
0x00028980 SQ_ALU_CONST_CACHE_VS_0
|
||||
0x00028984 SQ_ALU_CONST_CACHE_VS_1
|
||||
0x00028988 SQ_ALU_CONST_CACHE_VS_2
|
||||
0x0002898C SQ_ALU_CONST_CACHE_VS_3
|
||||
0x00028990 SQ_ALU_CONST_CACHE_VS_4
|
||||
0x00028994 SQ_ALU_CONST_CACHE_VS_5
|
||||
0x00028998 SQ_ALU_CONST_CACHE_VS_6
|
||||
0x0002899C SQ_ALU_CONST_CACHE_VS_7
|
||||
0x000289A0 SQ_ALU_CONST_CACHE_VS_8
|
||||
0x000289A4 SQ_ALU_CONST_CACHE_VS_9
|
||||
0x000289A8 SQ_ALU_CONST_CACHE_VS_10
|
||||
0x000289AC SQ_ALU_CONST_CACHE_VS_11
|
||||
0x000289B0 SQ_ALU_CONST_CACHE_VS_12
|
||||
0x000289B4 SQ_ALU_CONST_CACHE_VS_13
|
||||
0x000289B8 SQ_ALU_CONST_CACHE_VS_14
|
||||
0x000289BC SQ_ALU_CONST_CACHE_VS_15
|
||||
0x000288D8 SQ_PGM_CF_OFFSET_ES
|
||||
0x000288DC SQ_PGM_CF_OFFSET_FS
|
||||
0x000288D4 SQ_PGM_CF_OFFSET_GS
|
||||
@ -494,12 +429,7 @@ r600 0x9400
|
||||
0x00028438 SX_ALPHA_REF
|
||||
0x00028410 SX_ALPHA_TEST_CONTROL
|
||||
0x00028350 SX_MISC
|
||||
0x0000A020 SMX_DC_CTL0
|
||||
0x0000A024 SMX_DC_CTL1
|
||||
0x0000A028 SMX_DC_CTL2
|
||||
0x00009608 TC_CNTL
|
||||
0x00009604 TC_INVALIDATE
|
||||
0x00009490 TD_CNTL
|
||||
0x00009400 TD_FILTER4
|
||||
0x00009404 TD_FILTER4_1
|
||||
0x00009408 TD_FILTER4_2
|
||||
@ -824,14 +754,9 @@ r600 0x9400
|
||||
0x00028428 CB_FOG_GREEN
|
||||
0x00028424 CB_FOG_RED
|
||||
0x00008040 WAIT_UNTIL
|
||||
0x00008950 CC_GC_SHADER_PIPE_CONFIG
|
||||
0x00008954 GC_USER_SHADER_PIPE_CONFIG
|
||||
0x00009714 VC_ENHANCE
|
||||
0x00009830 DB_DEBUG
|
||||
0x00009838 DB_WATERMARKS
|
||||
0x00028D28 DB_SRESULTS_COMPARE_STATE0
|
||||
0x00028D44 DB_ALPHA_TO_MASK
|
||||
0x00009504 TA_CNTL
|
||||
0x00009700 VC_CNTL
|
||||
0x00009718 VC_CONFIG
|
||||
0x0000A02C SMX_DC_MC_INTF_CTL
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "rs400d.h"
|
||||
|
||||
/* This files gather functions specifics to : rs400,rs480 */
|
||||
@ -202,9 +203,9 @@ void rs400_gart_disable(struct radeon_device *rdev)
|
||||
|
||||
void rs400_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_gart_fini(rdev);
|
||||
rs400_gart_disable(rdev);
|
||||
radeon_gart_table_ram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
||||
@ -264,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev)
|
||||
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
@ -388,6 +390,8 @@ static int rs400_startup(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r100_set_common_regs(rdev);
|
||||
|
||||
rs400_mc_program(rdev);
|
||||
/* Resume clock */
|
||||
r300_clock_startup(rdev);
|
||||
@ -453,6 +457,7 @@ int rs400_suspend(struct radeon_device *rdev)
|
||||
|
||||
void rs400_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -37,6 +37,7 @@
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
#include "rs600d.h"
|
||||
|
||||
@ -267,9 +268,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
|
||||
|
||||
void rs600_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_gart_fini(rdev);
|
||||
rs600_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
#define R600_PTE_VALID (1 << 0)
|
||||
@ -392,10 +393,12 @@ int rs600_irq_process(struct radeon_device *rdev)
|
||||
/* Vertical blank interrupts */
|
||||
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
|
||||
drm_handle_vblank(rdev->ddev, 0);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
}
|
||||
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
|
||||
drm_handle_vblank(rdev->ddev, 1);
|
||||
rdev->pm.vblank_sync = true;
|
||||
wake_up(&rdev->irq.vblank_queue);
|
||||
}
|
||||
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
|
||||
@ -472,13 +475,38 @@ void rs600_mc_init(struct radeon_device *rdev)
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
base = RREG32_MC(R_000004_MC_FB_LOCATION);
|
||||
base = G_000004_MC_FB_START(base) << 16;
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
void rs600_bandwidth_update(struct radeon_device *rdev)
|
||||
{
|
||||
/* FIXME: implement, should this be like rs690 ? */
|
||||
struct drm_display_mode *mode0 = NULL;
|
||||
struct drm_display_mode *mode1 = NULL;
|
||||
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
|
||||
/* FIXME: implement full support */
|
||||
|
||||
radeon_update_display_priority(rdev);
|
||||
|
||||
if (rdev->mode_info.crtcs[0]->base.enabled)
|
||||
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
|
||||
if (rdev->mode_info.crtcs[1]->base.enabled)
|
||||
mode1 = &rdev->mode_info.crtcs[1]->base.mode;
|
||||
|
||||
rs690_line_buffer_adjust(rdev, mode0, mode1);
|
||||
|
||||
if (rdev->disp_priority == 2) {
|
||||
d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
|
||||
d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
|
||||
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
@ -598,6 +626,7 @@ int rs600_suspend(struct radeon_device *rdev)
|
||||
|
||||
void rs600_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -535,4 +535,57 @@
|
||||
#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
|
||||
#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
|
||||
|
||||
#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
|
||||
#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
|
||||
#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
|
||||
#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
|
||||
#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
|
||||
#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
|
||||
#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
|
||||
#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
|
||||
#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
|
||||
#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
|
||||
#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
|
||||
#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
|
||||
#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
|
||||
#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
|
||||
#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
|
||||
#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
|
||||
#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
|
||||
#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
|
||||
#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
|
||||
#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
|
||||
#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
|
||||
#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
|
||||
#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
|
||||
#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
|
||||
#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
|
||||
#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
|
||||
#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
|
||||
#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
|
||||
#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
|
||||
#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
|
||||
#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
|
||||
#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
|
||||
#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
|
||||
#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
|
||||
#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
|
||||
#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
|
||||
#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
|
||||
#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
|
||||
#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
|
||||
#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
|
||||
#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
|
||||
#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
|
||||
#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
|
||||
#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
|
||||
#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
|
||||
#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
|
||||
|
||||
#endif
|
||||
|
@ -27,6 +27,7 @@
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
#include "rs690d.h"
|
||||
|
||||
@ -57,42 +58,57 @@ static void rs690_gpu_init(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
union igp_info {
|
||||
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
|
||||
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
|
||||
};
|
||||
|
||||
void rs690_pm_info(struct radeon_device *rdev)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
|
||||
struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
|
||||
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
|
||||
void *ptr;
|
||||
union igp_info *info;
|
||||
uint16_t data_offset;
|
||||
uint8_t frev, crev;
|
||||
fixed20_12 tmp;
|
||||
|
||||
atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset);
|
||||
ptr = rdev->mode_info.atom_context->bios + data_offset;
|
||||
info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
|
||||
info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
|
||||
/* Get various system informations from bios */
|
||||
switch (crev) {
|
||||
case 1:
|
||||
tmp.full = rfixed_const(100);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
|
||||
rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
|
||||
break;
|
||||
case 2:
|
||||
tmp.full = rfixed_const(100);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
|
||||
rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
|
||||
break;
|
||||
default:
|
||||
if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
|
||||
|
||||
/* Get various system informations from bios */
|
||||
switch (crev) {
|
||||
case 1:
|
||||
tmp.full = rfixed_const(100);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
|
||||
rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
|
||||
break;
|
||||
case 2:
|
||||
tmp.full = rfixed_const(100);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
|
||||
rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
|
||||
break;
|
||||
default:
|
||||
tmp.full = rfixed_const(100);
|
||||
/* We assume the slower possible clock ie worst case */
|
||||
/* DDR 333Mhz */
|
||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
|
||||
/* FIXME: system clock ? */
|
||||
rdev->pm.igp_system_mclk.full = rfixed_const(100);
|
||||
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
|
||||
rdev->pm.igp_ht_link_width.full = rfixed_const(8);
|
||||
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
tmp.full = rfixed_const(100);
|
||||
/* We assume the slower possible clock ie worst case */
|
||||
/* DDR 333Mhz */
|
||||
@ -103,7 +119,6 @@ void rs690_pm_info(struct radeon_device *rdev)
|
||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
|
||||
rdev->pm.igp_ht_link_width.full = rfixed_const(8);
|
||||
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
|
||||
break;
|
||||
}
|
||||
/* Compute various bandwidth */
|
||||
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
|
||||
@ -131,7 +146,6 @@ void rs690_pm_info(struct radeon_device *rdev)
|
||||
|
||||
void rs690_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u64 base;
|
||||
|
||||
rs400_gart_adjust_size(rdev);
|
||||
@ -145,18 +159,10 @@ void rs690_mc_init(struct radeon_device *rdev)
|
||||
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
|
||||
base = G_000100_MC_FB_START(base) << 16;
|
||||
rs690_pm_info(rdev);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
a.full = rfixed_const(16);
|
||||
/* core_bandwidth = sclk(Mhz) * 16 */
|
||||
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
radeon_vram_location(rdev, &rdev->mc, base);
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
void rs690_line_buffer_adjust(struct radeon_device *rdev,
|
||||
@ -394,10 +400,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||
struct drm_display_mode *mode1 = NULL;
|
||||
struct rs690_watermark wm0;
|
||||
struct rs690_watermark wm1;
|
||||
u32 tmp;
|
||||
u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
|
||||
fixed20_12 priority_mark02, priority_mark12, fill_rate;
|
||||
fixed20_12 a, b;
|
||||
|
||||
radeon_update_display_priority(rdev);
|
||||
|
||||
if (rdev->mode_info.crtcs[0]->base.enabled)
|
||||
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
|
||||
if (rdev->mode_info.crtcs[1]->base.enabled)
|
||||
@ -407,7 +415,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||
* modes if the user specifies HIGH for displaypriority
|
||||
* option.
|
||||
*/
|
||||
if (rdev->disp_priority == 2) {
|
||||
if ((rdev->disp_priority == 2) &&
|
||||
((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
|
||||
tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
|
||||
tmp &= C_000104_MC_DISP0R_INIT_LAT;
|
||||
tmp &= C_000104_MC_DISP1R_INIT_LAT;
|
||||
@ -482,10 +491,16 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||
priority_mark12.full = 0;
|
||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||
priority_mark12.full = wm1.priority_mark_max.full;
|
||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
|
||||
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
|
||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
|
||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
|
||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
||||
if (rdev->disp_priority == 2) {
|
||||
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||
}
|
||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||
} else if (mode0) {
|
||||
if (rfixed_trunc(wm0.dbpp) > 64)
|
||||
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
|
||||
@ -512,8 +527,11 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||
priority_mark02.full = 0;
|
||||
if (wm0.priority_mark_max.full > priority_mark02.full)
|
||||
priority_mark02.full = wm0.priority_mark_max.full;
|
||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
|
||||
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
|
||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
||||
if (rdev->disp_priority == 2)
|
||||
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
|
||||
S_006D48_D2MODE_PRIORITY_A_OFF(1));
|
||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
|
||||
@ -544,12 +562,15 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||
priority_mark12.full = 0;
|
||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||
priority_mark12.full = wm1.priority_mark_max.full;
|
||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
||||
if (rdev->disp_priority == 2)
|
||||
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
|
||||
S_006548_D1MODE_PRIORITY_A_OFF(1));
|
||||
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
|
||||
S_00654C_D1MODE_PRIORITY_B_OFF(1));
|
||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
|
||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
|
||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -657,6 +678,7 @@ int rs690_suspend(struct radeon_device *rdev)
|
||||
|
||||
void rs690_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
|
@ -182,6 +182,9 @@
|
||||
#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
|
||||
#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
|
||||
#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
|
||||
#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
|
||||
#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
|
||||
#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
|
||||
#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
|
||||
#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
|
||||
#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "drmP.h"
|
||||
#include "rv515d.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "atom.h"
|
||||
#include "rv515_reg_safe.h"
|
||||
|
||||
@ -279,19 +280,13 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
|
||||
|
||||
void rv515_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
|
||||
rv515_vram_get_type(rdev);
|
||||
r100_vram_init_sizes(rdev);
|
||||
radeon_vram_location(rdev, &rdev->mc, 0);
|
||||
if (!(rdev->flags & RADEON_IS_AGP))
|
||||
radeon_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
}
|
||||
|
||||
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||
@ -539,6 +534,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
|
||||
|
||||
void rv515_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r100_cp_fini(rdev);
|
||||
r100_wb_fini(rdev);
|
||||
r100_ib_fini(rdev);
|
||||
@ -1020,7 +1016,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||
struct drm_display_mode *mode1 = NULL;
|
||||
struct rv515_watermark wm0;
|
||||
struct rv515_watermark wm1;
|
||||
u32 tmp;
|
||||
u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
|
||||
fixed20_12 priority_mark02, priority_mark12, fill_rate;
|
||||
fixed20_12 a, b;
|
||||
|
||||
@ -1088,10 +1084,16 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||
priority_mark12.full = 0;
|
||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||
priority_mark12.full = wm1.priority_mark_max.full;
|
||||
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
|
||||
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
|
||||
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
|
||||
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
|
||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
||||
if (rdev->disp_priority == 2) {
|
||||
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||
}
|
||||
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||
} else if (mode0) {
|
||||
if (rfixed_trunc(wm0.dbpp) > 64)
|
||||
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
|
||||
@ -1118,8 +1120,11 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||
priority_mark02.full = 0;
|
||||
if (wm0.priority_mark_max.full > priority_mark02.full)
|
||||
priority_mark02.full = wm0.priority_mark_max.full;
|
||||
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
|
||||
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
|
||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
||||
if (rdev->disp_priority == 2)
|
||||
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
|
||||
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
|
||||
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
|
||||
} else {
|
||||
@ -1148,10 +1153,13 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||
priority_mark12.full = 0;
|
||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||
priority_mark12.full = wm1.priority_mark_max.full;
|
||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
||||
if (rdev->disp_priority == 2)
|
||||
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
|
||||
WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
|
||||
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
|
||||
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
|
||||
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1161,6 +1169,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
|
||||
struct drm_display_mode *mode0 = NULL;
|
||||
struct drm_display_mode *mode1 = NULL;
|
||||
|
||||
radeon_update_display_priority(rdev);
|
||||
|
||||
if (rdev->mode_info.crtcs[0]->base.enabled)
|
||||
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
|
||||
if (rdev->mode_info.crtcs[1]->base.enabled)
|
||||
@ -1170,7 +1180,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
|
||||
* modes if the user specifies HIGH for displaypriority
|
||||
* option.
|
||||
*/
|
||||
if (rdev->disp_priority == 2) {
|
||||
if ((rdev->disp_priority == 2) &&
|
||||
(rdev->family == CHIP_RV515)) {
|
||||
tmp = RREG32_MC(MC_MISC_LAT_TIMER);
|
||||
tmp &= ~MC_DISP1R_INIT_LAT_MASK;
|
||||
tmp &= ~MC_DISP0R_INIT_LAT_MASK;
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include "drmP.h"
|
||||
#include "radeon.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "rv770d.h"
|
||||
#include "atom.h"
|
||||
@ -125,9 +126,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
|
||||
|
||||
void rv770_pcie_gart_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_gart_fini(rdev);
|
||||
rv770_pcie_gart_disable(rdev);
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
radeon_gart_fini(rdev);
|
||||
}
|
||||
|
||||
|
||||
@ -647,10 +648,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
|
||||
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
|
||||
|
||||
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
||||
WREG32(CGTS_TCC_DISABLE, 0);
|
||||
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
|
||||
WREG32(CGTS_USER_TCC_DISABLE, 0);
|
||||
|
||||
num_qd_pipes =
|
||||
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
|
||||
@ -864,7 +868,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
int rv770_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u32 tmp;
|
||||
int chansize, numchan;
|
||||
|
||||
@ -908,12 +911,8 @@ int rv770_mc_init(struct radeon_device *rdev)
|
||||
rdev->mc.real_vram_size = rdev->mc.aper_size;
|
||||
}
|
||||
r600_vram_gtt_location(rdev, &rdev->mc);
|
||||
/* FIXME: we should enforce default clock in case GPU is not in
|
||||
* default setup
|
||||
*/
|
||||
a.full = rfixed_const(100);
|
||||
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
|
||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1013,6 +1012,13 @@ int rv770_resume(struct radeon_device *rdev)
|
||||
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "radeon: audio init failed\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
||||
}
|
||||
@ -1021,6 +1027,7 @@ int rv770_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r600_audio_fini(rdev);
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
r700_cp_stop(rdev);
|
||||
rdev->cp.ready = false;
|
||||
@ -1144,11 +1151,19 @@ int rv770_init(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r = r600_audio_init(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "radeon: audio init failed\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rv770_fini(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_pm_fini(rdev);
|
||||
r600_blit_fini(rdev);
|
||||
r600_cp_fini(rdev);
|
||||
r600_wb_fini(rdev);
|
||||
|
@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
|
||||
|
||||
atomic_set(&glob->bo_count, 0);
|
||||
|
||||
kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
|
||||
ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
|
||||
ret = kobject_init_and_add(
|
||||
&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
|
||||
if (unlikely(ret != 0))
|
||||
kobject_put(&glob->kobj);
|
||||
return ret;
|
||||
|
@ -260,8 +260,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_kernel = zone;
|
||||
kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
|
||||
ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
@ -296,8 +296,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_highmem = zone;
|
||||
kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
|
||||
ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
@ -343,8 +343,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_dma32 = zone;
|
||||
kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
|
||||
ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
@ -365,10 +365,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
|
||||
INIT_WORK(&glob->work, ttm_shrink_work);
|
||||
init_waitqueue_head(&glob->queue);
|
||||
kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
|
||||
ret = kobject_add(&glob->kobj,
|
||||
ttm_get_kobj(),
|
||||
"memory_accounting");
|
||||
ret = kobject_init_and_add(
|
||||
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&glob->kobj);
|
||||
return ret;
|
||||
|
@ -28,13 +28,13 @@
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/swap.h>
|
||||
#include "drm_cache.h"
|
||||
#include "drm_mem_util.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
@ -43,32 +43,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*
|
||||
* Uses kmalloc if possible. Otherwise falls back to vmalloc.
|
||||
*/
|
||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
|
||||
if (size <= PAGE_SIZE)
|
||||
ttm->pages = kzalloc(size, GFP_KERNEL);
|
||||
|
||||
if (!ttm->pages) {
|
||||
ttm->pages = vmalloc_user(size);
|
||||
if (ttm->pages)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
|
||||
}
|
||||
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
|
||||
}
|
||||
|
||||
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
|
||||
vfree(ttm->pages);
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
|
||||
} else {
|
||||
kfree(ttm->pages);
|
||||
}
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI
|
||||
depends on DRM && PCI && FB
|
||||
select FB_DEFERRED_IO
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
|
@ -1545,39 +1545,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
|
||||
{
|
||||
if (size != 0 && nmemb > ULONG_MAX / size)
|
||||
return NULL;
|
||||
|
||||
if (size * nmemb <= PAGE_SIZE)
|
||||
return kcalloc(nmemb, size, GFP_KERNEL);
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
|
||||
static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
|
||||
{
|
||||
if (size != 0 && nmemb > ULONG_MAX / size)
|
||||
return NULL;
|
||||
|
||||
if (size * nmemb <= PAGE_SIZE)
|
||||
return kmalloc(nmemb * size, GFP_KERNEL);
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
static __inline void drm_free_large(void *ptr)
|
||||
{
|
||||
if (!is_vmalloc_addr(ptr))
|
||||
return kfree(ptr);
|
||||
|
||||
vfree(ptr);
|
||||
}
|
||||
#include "drm_mem_util.h"
|
||||
/*@}*/
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
65
include/drm/drm_mem_util.h
Normal file
65
include/drm/drm_mem_util.h
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright © 2008 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Jesse Barnes <jbarnes@virtuousgeek.org>
|
||||
*
|
||||
*/
|
||||
#ifndef _DRM_MEM_UTIL_H_
|
||||
#define _DRM_MEM_UTIL_H_
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
|
||||
{
|
||||
if (size != 0 && nmemb > ULONG_MAX / size)
|
||||
return NULL;
|
||||
|
||||
if (size * nmemb <= PAGE_SIZE)
|
||||
return kcalloc(nmemb, size, GFP_KERNEL);
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
|
||||
static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
|
||||
{
|
||||
if (size != 0 && nmemb > ULONG_MAX / size)
|
||||
return NULL;
|
||||
|
||||
if (size * nmemb <= PAGE_SIZE)
|
||||
return kmalloc(nmemb * size, GFP_KERNEL);
|
||||
|
||||
return __vmalloc(size * nmemb,
|
||||
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
static __inline void drm_free_large(void *ptr)
|
||||
{
|
||||
if (!is_vmalloc_addr(ptr))
|
||||
return kfree(ptr);
|
||||
|
||||
vfree(ptr);
|
||||
}
|
||||
|
||||
#endif
|
@ -410,6 +410,7 @@
|
||||
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
||||
{0, 0, 0}
|
||||
|
||||
#define r128_PCI_IDS \
|
||||
|
@ -115,7 +115,6 @@ struct ttm_backend {
|
||||
struct ttm_backend_func *func;
|
||||
};
|
||||
|
||||
#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
|
||||
#define TTM_PAGE_FLAG_USER (1 << 1)
|
||||
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
|
||||
#define TTM_PAGE_FLAG_WRITE (1 << 3)
|
||||
|
Loading…
Reference in New Issue
Block a user