forked from Minki/linux
Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel into drm-fixes
* 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel: drm/i915: fix corruptions on i8xx due to relaxed fencing drm/i915: skip FDI & PCH enabling for DP_A agp/intel: Experiment with a 855GM GWB bit drm/i915: don't enable FDI & transcoder interrupts after all drm/i915: Ignore a hung GPU when flushing the framebuffer prior to a switch
This commit is contained in:
commit
fbf92bea68
@ -130,6 +130,7 @@
|
||||
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
|
||||
|
||||
#define I915_IFPADDR 0x60
|
||||
#define I830_HIC 0x70
|
||||
|
||||
/* Intel 965G registers */
|
||||
#define I965_MSAC 0x62
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/smp.h>
|
||||
#include "agp.h"
|
||||
#include "intel-agp.h"
|
||||
@ -70,12 +71,8 @@ static struct _intel_private {
|
||||
u32 __iomem *gtt; /* I915G */
|
||||
bool clear_fake_agp; /* on first access via agp, fill with scratch */
|
||||
int num_dcache_entries;
|
||||
union {
|
||||
void __iomem *i9xx_flush_page;
|
||||
void *i8xx_flush_page;
|
||||
};
|
||||
void __iomem *i9xx_flush_page;
|
||||
char *i81x_gtt_table;
|
||||
struct page *i8xx_page;
|
||||
struct resource ifp_resource;
|
||||
int resource_valid;
|
||||
struct page *scratch_page;
|
||||
@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
|
||||
|
||||
static void i830_cleanup(void)
|
||||
{
|
||||
if (intel_private.i8xx_flush_page) {
|
||||
kunmap(intel_private.i8xx_flush_page);
|
||||
intel_private.i8xx_flush_page = NULL;
|
||||
}
|
||||
|
||||
__free_page(intel_private.i8xx_page);
|
||||
intel_private.i8xx_page = NULL;
|
||||
}
|
||||
|
||||
static void intel_i830_setup_flush(void)
|
||||
{
|
||||
/* return if we've already set the flush mechanism up */
|
||||
if (intel_private.i8xx_page)
|
||||
return;
|
||||
|
||||
intel_private.i8xx_page = alloc_page(GFP_KERNEL);
|
||||
if (!intel_private.i8xx_page)
|
||||
return;
|
||||
|
||||
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
|
||||
if (!intel_private.i8xx_flush_page)
|
||||
i830_cleanup();
|
||||
}
|
||||
|
||||
/* The chipset_flush interface needs to get data that has already been
|
||||
@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
|
||||
*/
|
||||
static void i830_chipset_flush(void)
|
||||
{
|
||||
unsigned int *pg = intel_private.i8xx_flush_page;
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
|
||||
|
||||
memset(pg, 0, 1024);
|
||||
/* Forcibly evict everything from the CPU write buffers.
|
||||
* clflush appears to be insufficient.
|
||||
*/
|
||||
wbinvd_on_all_cpus();
|
||||
|
||||
if (cpu_has_clflush)
|
||||
clflush_cache_range(pg, 1024);
|
||||
else if (wbinvd_on_all_cpus() != 0)
|
||||
printk(KERN_ERR "Timed out waiting for cache flush.\n");
|
||||
/* Now we've only seen documents for this magic bit on 855GM,
|
||||
* we hope it exists for the other gen2 chipsets...
|
||||
*
|
||||
* Also works as advertised on my 845G.
|
||||
*/
|
||||
writel(readl(intel_private.registers+I830_HIC) | (1<<31),
|
||||
intel_private.registers+I830_HIC);
|
||||
|
||||
while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
|
||||
if (time_after(jiffies, timeout))
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
}
|
||||
}
|
||||
|
||||
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
|
||||
@ -849,8 +837,6 @@ static int i830_setup(void)
|
||||
|
||||
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
|
||||
|
||||
intel_i830_setup_flush();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||
static bool
|
||||
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
{
|
||||
int tile_width;
|
||||
int tile_width, tile_height;
|
||||
|
||||
/* Linear is always fine */
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev) ||
|
||||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
||||
tile_height = 32;
|
||||
else
|
||||
tile_height = 8;
|
||||
/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
|
||||
* number of tile rows. */
|
||||
if (IS_GEN2(dev))
|
||||
tile_height *= 2;
|
||||
|
||||
/* Size needs to be aligned to a full tile row */
|
||||
if (size & (tile_height * stride - 1))
|
||||
return false;
|
||||
|
||||
/* 965+ just needs multiples of tile width */
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (stride & (tile_width - 1))
|
||||
|
@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
|
||||
if (encoder->hot_plug)
|
||||
encoder->hot_plug(encoder);
|
||||
@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
} else {
|
||||
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
|
||||
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
|
||||
hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
|
||||
I915_WRITE(FDI_RXA_IMR, 0);
|
||||
I915_WRITE(FDI_RXB_IMR, 0);
|
||||
hotplug_mask |= SDE_AUX_MASK;
|
||||
}
|
||||
|
||||
dev_priv->pch_irq_mask = ~hotplug_mask;
|
||||
|
@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
|
||||
|
||||
wait_event(dev_priv->pending_flip_queue,
|
||||
atomic_read(&dev_priv->mm.wedged) ||
|
||||
atomic_read(&obj->pending_flip) == 0);
|
||||
|
||||
/* Big Hammer, we also need to ensure that any pending
|
||||
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
|
||||
* current scanout is retired before unpinning the old
|
||||
* framebuffer.
|
||||
*
|
||||
* This should only fail upon a hung GPU, in which case we
|
||||
* can safely continue.
|
||||
*/
|
||||
ret = i915_gem_object_flush_gpu(obj, false);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
(void) ret;
|
||||
}
|
||||
|
||||
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
|
||||
@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
||||
atomic_read(&obj->pending_flip) == 0);
|
||||
}
|
||||
|
||||
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/*
|
||||
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
|
||||
* must be driven by its own crtc; no sharing is possible.
|
||||
*/
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
if (encoder->base.crtc != crtc)
|
||||
continue;
|
||||
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_EDP:
|
||||
if (!intel_encoder_is_pch_edp(&encoder->base))
|
||||
return false;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
u32 reg, temp;
|
||||
bool is_pch_port = false;
|
||||
|
||||
if (intel_crtc->active)
|
||||
return;
|
||||
@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
|
||||
}
|
||||
|
||||
ironlake_fdi_enable(crtc);
|
||||
is_pch_port = intel_crtc_driving_pch(crtc);
|
||||
|
||||
if (is_pch_port)
|
||||
ironlake_fdi_enable(crtc);
|
||||
else {
|
||||
/* disable CPU FDI tx and PCH FDI rx */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
|
||||
POSTING_READ(reg);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~(0x7 << 16);
|
||||
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
|
||||
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
|
||||
|
||||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
|
||||
/* Ironlake workaround, disable clock pointer after downing FDI */
|
||||
if (HAS_PCH_IBX(dev))
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
||||
I915_READ(FDI_RX_CHICKEN(pipe) &
|
||||
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
||||
|
||||
/* still set train pattern 1 */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
|
||||
} else {
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_PATTERN_1;
|
||||
}
|
||||
/* BPC in FDI rx is consistent with that in PIPECONF */
|
||||
temp &= ~(0x07 << 16);
|
||||
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
/* Enable panel fitting for LVDS */
|
||||
if (dev_priv->pch_pf_size &&
|
||||
@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
intel_flush_display_plane(dev, plane);
|
||||
}
|
||||
|
||||
/* Skip the PCH stuff if possible */
|
||||
if (!is_pch_port)
|
||||
goto done;
|
||||
|
||||
/* For PCH output, training FDI link */
|
||||
if (IS_GEN6(dev))
|
||||
gen6_fdi_link_train(crtc);
|
||||
@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
I915_WRITE(reg, temp | TRANS_ENABLE);
|
||||
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
|
||||
DRM_ERROR("failed to enable transcoder %d\n", pipe);
|
||||
|
||||
done:
|
||||
intel_crtc_load_lut(crtc);
|
||||
intel_update_fbc(dev);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
Loading…
Reference in New Issue
Block a user