mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "AST, i915, radeon and msm fixes, all over the place. All fixing build issues, regressions, oopses or failure to detect cards" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/ast: AST2000 cannot be detected correctly drm/ast: open key before detect chips drm/msm: don't crash if no msm.vram param drm/msm/hdmi: fix build break on non-CCF platforms drm/msm: Change nested function to static function drm/radeon/dpm: set the thermal type properly for special configs drm/radeon: reduce memory footprint for debugging drm/radeon: add connector quirk for fujitsu board drm/radeon: fix semaphore value init drm/radeon: only use me/pfp sync on evergreen+ drm/i915: Wait for vblank before enabling the TV encoder drm/i915: Evict CS TLBs between batches drm/i915: Fix irq enable tracking in driver load drm/i915: Fix EIO/wedged handling in gem fault handler drm/i915: Prevent recursive deadlock on releasing a busy userptr
This commit is contained in:
commit
850ebc0c0c
@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev)
|
|||||||
{
|
{
|
||||||
struct ast_private *ast = dev->dev_private;
|
struct ast_private *ast = dev->dev_private;
|
||||||
uint32_t data, jreg;
|
uint32_t data, jreg;
|
||||||
|
ast_open_key(ast);
|
||||||
|
|
||||||
if (dev->pdev->device == PCI_CHIP_AST1180) {
|
if (dev->pdev->device == PCI_CHIP_AST1180) {
|
||||||
ast->chip = AST1100;
|
ast->chip = AST1100;
|
||||||
@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
ast->vga2_clone = false;
|
ast->vga2_clone = false;
|
||||||
} else {
|
} else {
|
||||||
ast->chip = 2000;
|
ast->chip = AST2000;
|
||||||
DRM_INFO("AST 2000 detected\n");
|
DRM_INFO("AST 2000 detected\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||||||
|
|
||||||
intel_power_domains_init_hw(dev_priv);
|
intel_power_domains_init_hw(dev_priv);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We enable some interrupt sources in our postinstall hooks, so mark
|
||||||
|
* interrupts as enabled _before_ actually enabling them to avoid
|
||||||
|
* special cases in our ordering checks.
|
||||||
|
*/
|
||||||
|
dev_priv->pm._irqs_disabled = false;
|
||||||
|
|
||||||
ret = drm_irq_install(dev, dev->pdev->irq);
|
ret = drm_irq_install(dev, dev->pdev->irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup_gem_stolen;
|
goto cleanup_gem_stolen;
|
||||||
|
|
||||||
dev_priv->pm._irqs_disabled = false;
|
|
||||||
|
|
||||||
/* Important: The output setup functions called by modeset_init need
|
/* Important: The output setup functions called by modeset_init need
|
||||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||||
intel_modeset_init(dev);
|
intel_modeset_init(dev);
|
||||||
|
@ -184,6 +184,7 @@ enum hpd_pin {
|
|||||||
if ((1 << (domain)) & (mask))
|
if ((1 << (domain)) & (mask))
|
||||||
|
|
||||||
struct drm_i915_private;
|
struct drm_i915_private;
|
||||||
|
struct i915_mm_struct;
|
||||||
struct i915_mmu_object;
|
struct i915_mmu_object;
|
||||||
|
|
||||||
enum intel_dpll_id {
|
enum intel_dpll_id {
|
||||||
@ -1506,9 +1507,8 @@ struct drm_i915_private {
|
|||||||
struct i915_gtt gtt; /* VM representing the global address space */
|
struct i915_gtt gtt; /* VM representing the global address space */
|
||||||
|
|
||||||
struct i915_gem_mm mm;
|
struct i915_gem_mm mm;
|
||||||
#if defined(CONFIG_MMU_NOTIFIER)
|
DECLARE_HASHTABLE(mm_structs, 7);
|
||||||
DECLARE_HASHTABLE(mmu_notifiers, 7);
|
struct mutex mm_lock;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Kernel Modesetting */
|
/* Kernel Modesetting */
|
||||||
|
|
||||||
@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
|
|||||||
unsigned workers :4;
|
unsigned workers :4;
|
||||||
#define I915_GEM_USERPTR_MAX_WORKERS 15
|
#define I915_GEM_USERPTR_MAX_WORKERS 15
|
||||||
|
|
||||||
struct mm_struct *mm;
|
struct i915_mm_struct *mm;
|
||||||
struct i915_mmu_object *mn;
|
struct i915_mmu_object *mmu_object;
|
||||||
struct work_struct *work;
|
struct work_struct *work;
|
||||||
} userptr;
|
} userptr;
|
||||||
};
|
};
|
||||||
|
@ -1590,10 +1590,13 @@ unlock:
|
|||||||
out:
|
out:
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case -EIO:
|
case -EIO:
|
||||||
/* If this -EIO is due to a gpu hang, give the reset code a
|
/*
|
||||||
* chance to clean up the mess. Otherwise return the proper
|
* We eat errors when the gpu is terminally wedged to avoid
|
||||||
* SIGBUS. */
|
* userspace unduly crashing (gl has no provisions for mmaps to
|
||||||
if (i915_terminally_wedged(&dev_priv->gpu_error)) {
|
* fail). But any other -EIO isn't ours (e.g. swap in failure)
|
||||||
|
* and so needs to be reported.
|
||||||
|
*/
|
||||||
|
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
|
||||||
ret = VM_FAULT_SIGBUS;
|
ret = VM_FAULT_SIGBUS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,15 @@
|
|||||||
#include <linux/mempolicy.h>
|
#include <linux/mempolicy.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
|
|
||||||
|
struct i915_mm_struct {
|
||||||
|
struct mm_struct *mm;
|
||||||
|
struct drm_device *dev;
|
||||||
|
struct i915_mmu_notifier *mn;
|
||||||
|
struct hlist_node node;
|
||||||
|
struct kref kref;
|
||||||
|
struct work_struct work;
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_MMU_NOTIFIER)
|
#if defined(CONFIG_MMU_NOTIFIER)
|
||||||
#include <linux/interval_tree.h>
|
#include <linux/interval_tree.h>
|
||||||
|
|
||||||
@ -41,16 +50,12 @@ struct i915_mmu_notifier {
|
|||||||
struct mmu_notifier mn;
|
struct mmu_notifier mn;
|
||||||
struct rb_root objects;
|
struct rb_root objects;
|
||||||
struct list_head linear;
|
struct list_head linear;
|
||||||
struct drm_device *dev;
|
|
||||||
struct mm_struct *mm;
|
|
||||||
struct work_struct work;
|
|
||||||
unsigned long count;
|
|
||||||
unsigned long serial;
|
unsigned long serial;
|
||||||
bool has_linear;
|
bool has_linear;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct i915_mmu_object {
|
struct i915_mmu_object {
|
||||||
struct i915_mmu_notifier *mmu;
|
struct i915_mmu_notifier *mn;
|
||||||
struct interval_tree_node it;
|
struct interval_tree_node it;
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
|
|||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
struct i915_mmu_object *mmu;
|
struct i915_mmu_object *mo;
|
||||||
unsigned long serial;
|
unsigned long serial;
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
serial = mn->serial;
|
serial = mn->serial;
|
||||||
list_for_each_entry(mmu, &mn->linear, link) {
|
list_for_each_entry(mo, &mn->linear, link) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
if (mmu->it.last < start || mmu->it.start > end)
|
if (mo->it.last < start || mo->it.start > end)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
obj = mmu->obj;
|
obj = mo->obj;
|
||||||
drm_gem_object_reference(&obj->base);
|
drm_gem_object_reference(&obj->base);
|
||||||
spin_unlock(&mn->lock);
|
spin_unlock(&mn->lock);
|
||||||
|
|
||||||
@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct i915_mmu_notifier *
|
static struct i915_mmu_notifier *
|
||||||
__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
|
i915_mmu_notifier_create(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct i915_mmu_notifier *mn;
|
||||||
struct i915_mmu_notifier *mmu;
|
|
||||||
|
|
||||||
/* Protected by dev->struct_mutex */
|
|
||||||
hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
|
|
||||||
if (mmu->mm == mm)
|
|
||||||
return mmu;
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct i915_mmu_notifier *
|
|
||||||
i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
||||||
struct i915_mmu_notifier *mmu;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&dev->struct_mutex);
|
mn = kmalloc(sizeof(*mn), GFP_KERNEL);
|
||||||
|
if (mn == NULL)
|
||||||
mmu = __i915_mmu_notifier_lookup(dev, mm);
|
|
||||||
if (mmu)
|
|
||||||
return mmu;
|
|
||||||
|
|
||||||
mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
|
|
||||||
if (mmu == NULL)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
spin_lock_init(&mmu->lock);
|
spin_lock_init(&mn->lock);
|
||||||
mmu->dev = dev;
|
mn->mn.ops = &i915_gem_userptr_notifier;
|
||||||
mmu->mn.ops = &i915_gem_userptr_notifier;
|
mn->objects = RB_ROOT;
|
||||||
mmu->mm = mm;
|
mn->serial = 1;
|
||||||
mmu->objects = RB_ROOT;
|
INIT_LIST_HEAD(&mn->linear);
|
||||||
mmu->count = 0;
|
mn->has_linear = false;
|
||||||
mmu->serial = 1;
|
|
||||||
INIT_LIST_HEAD(&mmu->linear);
|
|
||||||
mmu->has_linear = false;
|
|
||||||
|
|
||||||
/* Protected by mmap_sem (write-lock) */
|
/* Protected by mmap_sem (write-lock) */
|
||||||
ret = __mmu_notifier_register(&mmu->mn, mm);
|
ret = __mmu_notifier_register(&mn->mn, mm);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(mmu);
|
kfree(mn);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Protected by dev->struct_mutex */
|
return mn;
|
||||||
hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
|
|
||||||
return mmu;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
|
||||||
__i915_mmu_notifier_destroy_worker(struct work_struct *work)
|
|
||||||
{
|
{
|
||||||
struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
|
if (++mn->serial == 0)
|
||||||
mmu_notifier_unregister(&mmu->mn, mmu->mm);
|
mn->serial = 1;
|
||||||
kfree(mmu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
|
|
||||||
{
|
|
||||||
lockdep_assert_held(&mmu->dev->struct_mutex);
|
|
||||||
|
|
||||||
/* Protected by dev->struct_mutex */
|
|
||||||
hash_del(&mmu->node);
|
|
||||||
|
|
||||||
/* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
|
|
||||||
* We enter the function holding struct_mutex, therefore we need
|
|
||||||
* to drop our mutex prior to calling mmu_notifier_unregister in
|
|
||||||
* order to prevent lock inversion (and system-wide deadlock)
|
|
||||||
* between the mmap_sem and struct-mutex. Hence we defer the
|
|
||||||
* unregistration to a workqueue where we hold no locks.
|
|
||||||
*/
|
|
||||||
INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
|
|
||||||
schedule_work(&mmu->work);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
|
|
||||||
{
|
|
||||||
if (++mmu->serial == 0)
|
|
||||||
mmu->serial = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
|
|
||||||
{
|
|
||||||
struct i915_mmu_object *mn;
|
|
||||||
|
|
||||||
list_for_each_entry(mn, &mmu->linear, link)
|
|
||||||
if (mn->is_linear)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
|
|
||||||
struct i915_mmu_object *mn)
|
|
||||||
{
|
|
||||||
lockdep_assert_held(&mmu->dev->struct_mutex);
|
|
||||||
|
|
||||||
spin_lock(&mmu->lock);
|
|
||||||
list_del(&mn->link);
|
|
||||||
if (mn->is_linear)
|
|
||||||
mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
|
|
||||||
else
|
|
||||||
interval_tree_remove(&mn->it, &mmu->objects);
|
|
||||||
__i915_mmu_notifier_update_serial(mmu);
|
|
||||||
spin_unlock(&mmu->lock);
|
|
||||||
|
|
||||||
/* Protected against _add() by dev->struct_mutex */
|
|
||||||
if (--mmu->count == 0)
|
|
||||||
__i915_mmu_notifier_destroy(mmu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
|
i915_mmu_notifier_add(struct drm_device *dev,
|
||||||
struct i915_mmu_object *mn)
|
struct i915_mmu_notifier *mn,
|
||||||
|
struct i915_mmu_object *mo)
|
||||||
{
|
{
|
||||||
struct interval_tree_node *it;
|
struct interval_tree_node *it;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i915_mutex_lock_interruptible(mmu->dev);
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
|
|||||||
* remove the objects from the interval tree) before we do
|
* remove the objects from the interval tree) before we do
|
||||||
* the check for overlapping objects.
|
* the check for overlapping objects.
|
||||||
*/
|
*/
|
||||||
i915_gem_retire_requests(mmu->dev);
|
i915_gem_retire_requests(dev);
|
||||||
|
|
||||||
spin_lock(&mmu->lock);
|
spin_lock(&mn->lock);
|
||||||
it = interval_tree_iter_first(&mmu->objects,
|
it = interval_tree_iter_first(&mn->objects,
|
||||||
mn->it.start, mn->it.last);
|
mo->it.start, mo->it.last);
|
||||||
if (it) {
|
if (it) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
|
|||||||
|
|
||||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||||
if (!obj->userptr.workers)
|
if (!obj->userptr.workers)
|
||||||
mmu->has_linear = mn->is_linear = true;
|
mn->has_linear = mo->is_linear = true;
|
||||||
else
|
else
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
} else
|
} else
|
||||||
interval_tree_insert(&mn->it, &mmu->objects);
|
interval_tree_insert(&mo->it, &mn->objects);
|
||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
list_add(&mn->link, &mmu->linear);
|
list_add(&mo->link, &mn->linear);
|
||||||
__i915_mmu_notifier_update_serial(mmu);
|
__i915_mmu_notifier_update_serial(mn);
|
||||||
}
|
}
|
||||||
spin_unlock(&mmu->lock);
|
spin_unlock(&mn->lock);
|
||||||
mutex_unlock(&mmu->dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
|
||||||
|
{
|
||||||
|
struct i915_mmu_object *mo;
|
||||||
|
|
||||||
|
list_for_each_entry(mo, &mn->linear, link)
|
||||||
|
if (mo->is_linear)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
|
||||||
|
struct i915_mmu_object *mo)
|
||||||
|
{
|
||||||
|
spin_lock(&mn->lock);
|
||||||
|
list_del(&mo->link);
|
||||||
|
if (mo->is_linear)
|
||||||
|
mn->has_linear = i915_mmu_notifier_has_linear(mn);
|
||||||
|
else
|
||||||
|
interval_tree_remove(&mo->it, &mn->objects);
|
||||||
|
__i915_mmu_notifier_update_serial(mn);
|
||||||
|
spin_unlock(&mn->lock);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct i915_mmu_object *mn;
|
struct i915_mmu_object *mo;
|
||||||
|
|
||||||
mn = obj->userptr.mn;
|
mo = obj->userptr.mmu_object;
|
||||||
if (mn == NULL)
|
if (mo == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
i915_mmu_notifier_del(mn->mmu, mn);
|
i915_mmu_notifier_del(mo->mn, mo);
|
||||||
obj->userptr.mn = NULL;
|
kfree(mo);
|
||||||
|
|
||||||
|
obj->userptr.mmu_object = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct i915_mmu_notifier *
|
||||||
|
i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||||
|
{
|
||||||
|
if (mm->mn == NULL) {
|
||||||
|
down_write(&mm->mm->mmap_sem);
|
||||||
|
mutex_lock(&to_i915(mm->dev)->mm_lock);
|
||||||
|
if (mm->mn == NULL)
|
||||||
|
mm->mn = i915_mmu_notifier_create(mm->mm);
|
||||||
|
mutex_unlock(&to_i915(mm->dev)->mm_lock);
|
||||||
|
up_write(&mm->mm->mmap_sem);
|
||||||
|
}
|
||||||
|
return mm->mn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
struct i915_mmu_notifier *mmu;
|
struct i915_mmu_notifier *mn;
|
||||||
struct i915_mmu_object *mn;
|
struct i915_mmu_object *mo;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (flags & I915_USERPTR_UNSYNCHRONIZED)
|
if (flags & I915_USERPTR_UNSYNCHRONIZED)
|
||||||
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
|
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
|
||||||
|
|
||||||
down_write(&obj->userptr.mm->mmap_sem);
|
if (WARN_ON(obj->userptr.mm == NULL))
|
||||||
ret = i915_mutex_lock_interruptible(obj->base.dev);
|
return -EINVAL;
|
||||||
if (ret == 0) {
|
|
||||||
mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
|
mn = i915_mmu_notifier_find(obj->userptr.mm);
|
||||||
if (!IS_ERR(mmu))
|
if (IS_ERR(mn))
|
||||||
mmu->count++; /* preemptive add to act as a refcount */
|
return PTR_ERR(mn);
|
||||||
else
|
|
||||||
ret = PTR_ERR(mmu);
|
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
|
||||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
if (mo == NULL)
|
||||||
}
|
return -ENOMEM;
|
||||||
up_write(&obj->userptr.mm->mmap_sem);
|
|
||||||
if (ret)
|
mo->mn = mn;
|
||||||
|
mo->it.start = obj->userptr.ptr;
|
||||||
|
mo->it.last = mo->it.start + obj->base.size - 1;
|
||||||
|
mo->obj = obj;
|
||||||
|
|
||||||
|
ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
|
||||||
|
if (ret) {
|
||||||
|
kfree(mo);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mn = kzalloc(sizeof(*mn), GFP_KERNEL);
|
|
||||||
if (mn == NULL) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto destroy_mmu;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mn->mmu = mmu;
|
obj->userptr.mmu_object = mo;
|
||||||
mn->it.start = obj->userptr.ptr;
|
|
||||||
mn->it.last = mn->it.start + obj->base.size - 1;
|
|
||||||
mn->obj = obj;
|
|
||||||
|
|
||||||
ret = i915_mmu_notifier_add(mmu, mn);
|
|
||||||
if (ret)
|
|
||||||
goto free_mn;
|
|
||||||
|
|
||||||
obj->userptr.mn = mn;
|
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
free_mn:
|
static void
|
||||||
|
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
|
||||||
|
struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
if (mn == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mmu_notifier_unregister(&mn->mn, mm);
|
||||||
kfree(mn);
|
kfree(mn);
|
||||||
destroy_mmu:
|
|
||||||
mutex_lock(&obj->base.dev->struct_mutex);
|
|
||||||
if (--mmu->count == 0)
|
|
||||||
__i915_mmu_notifier_destroy(mmu);
|
|
||||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
|
||||||
|
struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static struct i915_mm_struct *
|
||||||
|
__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
|
||||||
|
{
|
||||||
|
struct i915_mm_struct *mm;
|
||||||
|
|
||||||
|
/* Protected by dev_priv->mm_lock */
|
||||||
|
hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
|
||||||
|
if (mm->mm == real)
|
||||||
|
return mm;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||||
|
struct i915_mm_struct *mm;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/* During release of the GEM object we hold the struct_mutex. This
|
||||||
|
* precludes us from calling mmput() at that time as that may be
|
||||||
|
* the last reference and so call exit_mmap(). exit_mmap() will
|
||||||
|
* attempt to reap the vma, and if we were holding a GTT mmap
|
||||||
|
* would then call drm_gem_vm_close() and attempt to reacquire
|
||||||
|
* the struct mutex. So in order to avoid that recursion, we have
|
||||||
|
* to defer releasing the mm reference until after we drop the
|
||||||
|
* struct_mutex, i.e. we need to schedule a worker to do the clean
|
||||||
|
* up.
|
||||||
|
*/
|
||||||
|
mutex_lock(&dev_priv->mm_lock);
|
||||||
|
mm = __i915_mm_struct_find(dev_priv, current->mm);
|
||||||
|
if (mm == NULL) {
|
||||||
|
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
|
||||||
|
if (mm == NULL) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
kref_init(&mm->kref);
|
||||||
|
mm->dev = obj->base.dev;
|
||||||
|
|
||||||
|
mm->mm = current->mm;
|
||||||
|
atomic_inc(¤t->mm->mm_count);
|
||||||
|
|
||||||
|
mm->mn = NULL;
|
||||||
|
|
||||||
|
/* Protected by dev_priv->mm_lock */
|
||||||
|
hash_add(dev_priv->mm_structs,
|
||||||
|
&mm->node, (unsigned long)mm->mm);
|
||||||
|
} else
|
||||||
|
kref_get(&mm->kref);
|
||||||
|
|
||||||
|
obj->userptr.mm = mm;
|
||||||
|
out:
|
||||||
|
mutex_unlock(&dev_priv->mm_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__i915_mm_struct_free__worker(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
|
||||||
|
i915_mmu_notifier_free(mm->mn, mm->mm);
|
||||||
|
mmdrop(mm->mm);
|
||||||
|
kfree(mm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__i915_mm_struct_free(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
|
||||||
|
|
||||||
|
/* Protected by dev_priv->mm_lock */
|
||||||
|
hash_del(&mm->node);
|
||||||
|
mutex_unlock(&to_i915(mm->dev)->mm_lock);
|
||||||
|
|
||||||
|
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
|
||||||
|
schedule_work(&mm->work);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
if (obj->userptr.mm == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
kref_put_mutex(&obj->userptr.mm->kref,
|
||||||
|
__i915_mm_struct_free,
|
||||||
|
&to_i915(obj->base.dev)->mm_lock);
|
||||||
|
obj->userptr.mm = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
struct get_pages_work {
|
struct get_pages_work {
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_SWIOTLB)
|
#if IS_ENABLED(CONFIG_SWIOTLB)
|
||||||
#define swiotlb_active() swiotlb_nr_tbl()
|
#define swiotlb_active() swiotlb_nr_tbl()
|
||||||
#else
|
#else
|
||||||
@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||||||
if (pvec == NULL)
|
if (pvec == NULL)
|
||||||
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
|
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
|
||||||
if (pvec != NULL) {
|
if (pvec != NULL) {
|
||||||
struct mm_struct *mm = obj->userptr.mm;
|
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
while (pinned < num_pages) {
|
while (pinned < num_pages) {
|
||||||
@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|||||||
|
|
||||||
pvec = NULL;
|
pvec = NULL;
|
||||||
pinned = 0;
|
pinned = 0;
|
||||||
if (obj->userptr.mm == current->mm) {
|
if (obj->userptr.mm->mm == current->mm) {
|
||||||
pvec = kmalloc(num_pages*sizeof(struct page *),
|
pvec = kmalloc(num_pages*sizeof(struct page *),
|
||||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||||
if (pvec == NULL) {
|
if (pvec == NULL) {
|
||||||
@ -651,17 +708,13 @@ static void
|
|||||||
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
|
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
i915_gem_userptr_release__mmu_notifier(obj);
|
i915_gem_userptr_release__mmu_notifier(obj);
|
||||||
|
i915_gem_userptr_release__mm_struct(obj);
|
||||||
if (obj->userptr.mm) {
|
|
||||||
mmput(obj->userptr.mm);
|
|
||||||
obj->userptr.mm = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
|
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
if (obj->userptr.mn)
|
if (obj->userptr.mmu_object)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return i915_gem_userptr_init__mmu_notifier(obj, 0);
|
return i915_gem_userptr_init__mmu_notifier(obj, 0);
|
||||||
@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate the new object */
|
|
||||||
obj = i915_gem_object_alloc(dev);
|
obj = i915_gem_object_alloc(dev);
|
||||||
if (obj == NULL)
|
if (obj == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||||||
* at binding. This means that we need to hook into the mmu_notifier
|
* at binding. This means that we need to hook into the mmu_notifier
|
||||||
* in order to detect if the mmu is destroyed.
|
* in order to detect if the mmu is destroyed.
|
||||||
*/
|
*/
|
||||||
ret = -ENOMEM;
|
ret = i915_gem_userptr_init__mm_struct(obj);
|
||||||
if ((obj->userptr.mm = get_task_mm(current)))
|
if (ret == 0)
|
||||||
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
|
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = drm_gem_handle_create(file, &obj->base, &handle);
|
ret = drm_gem_handle_create(file, &obj->base, &handle);
|
||||||
@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||||||
int
|
int
|
||||||
i915_gem_init_userptr(struct drm_device *dev)
|
i915_gem_init_userptr(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_MMU_NOTIFIER)
|
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
hash_init(dev_priv->mmu_notifiers);
|
mutex_init(&dev_priv->mm_lock);
|
||||||
#endif
|
hash_init(dev_priv->mm_structs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -334,16 +334,20 @@
|
|||||||
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
|
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
|
||||||
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
|
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
|
||||||
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
|
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
|
||||||
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
|
|
||||||
|
#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
|
||||||
|
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
|
||||||
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
||||||
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
|
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
|
||||||
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
|
#define BLT_WRITE_A (2<<20)
|
||||||
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
|
#define BLT_WRITE_RGB (1<<20)
|
||||||
|
#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
|
||||||
#define BLT_DEPTH_8 (0<<24)
|
#define BLT_DEPTH_8 (0<<24)
|
||||||
#define BLT_DEPTH_16_565 (1<<24)
|
#define BLT_DEPTH_16_565 (1<<24)
|
||||||
#define BLT_DEPTH_16_1555 (2<<24)
|
#define BLT_DEPTH_16_1555 (2<<24)
|
||||||
#define BLT_DEPTH_32 (3<<24)
|
#define BLT_DEPTH_32 (3<<24)
|
||||||
#define BLT_ROP_GXCOPY (0xcc<<16)
|
#define BLT_ROP_SRC_COPY (0xcc<<16)
|
||||||
|
#define BLT_ROP_COLOR_COPY (0xf0<<16)
|
||||||
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
|
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
|
||||||
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
|
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
|
||||||
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
|
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
|
||||||
|
@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||||||
|
|
||||||
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
|
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
|
||||||
#define I830_BATCH_LIMIT (256*1024)
|
#define I830_BATCH_LIMIT (256*1024)
|
||||||
|
#define I830_TLB_ENTRIES (2)
|
||||||
|
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
|
||||||
static int
|
static int
|
||||||
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||||
u64 offset, u32 len,
|
u64 offset, u32 len,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
|
u32 cs_offset = ring->scratch.gtt_offset;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (flags & I915_DISPATCH_PINNED) {
|
ret = intel_ring_begin(ring, 6);
|
||||||
ret = intel_ring_begin(ring, 4);
|
if (ret)
|
||||||
if (ret)
|
return ret;
|
||||||
return ret;
|
|
||||||
|
|
||||||
intel_ring_emit(ring, MI_BATCH_BUFFER);
|
/* Evict the invalid PTE TLBs */
|
||||||
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
|
intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
|
||||||
intel_ring_emit(ring, offset + len - 8);
|
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
|
||||||
intel_ring_emit(ring, MI_NOOP);
|
intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
|
||||||
intel_ring_advance(ring);
|
intel_ring_emit(ring, cs_offset);
|
||||||
} else {
|
intel_ring_emit(ring, 0xdeadbeef);
|
||||||
u32 cs_offset = ring->scratch.gtt_offset;
|
intel_ring_emit(ring, MI_NOOP);
|
||||||
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
|
if ((flags & I915_DISPATCH_PINNED) == 0) {
|
||||||
if (len > I830_BATCH_LIMIT)
|
if (len > I830_BATCH_LIMIT)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
ret = intel_ring_begin(ring, 9+3);
|
ret = intel_ring_begin(ring, 6 + 2);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
/* Blit the batch (which has now all relocs applied) to the stable batch
|
|
||||||
* scratch bo area (so that the CS never stumbles over its tlb
|
/* Blit the batch (which has now all relocs applied) to the
|
||||||
* invalidation bug) ... */
|
* stable batch scratch bo area (so that the CS never
|
||||||
intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
|
* stumbles over its tlb invalidation bug) ...
|
||||||
XY_SRC_COPY_BLT_WRITE_ALPHA |
|
*/
|
||||||
XY_SRC_COPY_BLT_WRITE_RGB);
|
intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
|
||||||
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
|
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
|
||||||
intel_ring_emit(ring, 0);
|
intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
|
||||||
intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
|
|
||||||
intel_ring_emit(ring, cs_offset);
|
intel_ring_emit(ring, cs_offset);
|
||||||
intel_ring_emit(ring, 0);
|
|
||||||
intel_ring_emit(ring, 4096);
|
intel_ring_emit(ring, 4096);
|
||||||
intel_ring_emit(ring, offset);
|
intel_ring_emit(ring, offset);
|
||||||
|
|
||||||
intel_ring_emit(ring, MI_FLUSH);
|
intel_ring_emit(ring, MI_FLUSH);
|
||||||
|
intel_ring_emit(ring, MI_NOOP);
|
||||||
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
/* ... and execute it. */
|
/* ... and execute it. */
|
||||||
intel_ring_emit(ring, MI_BATCH_BUFFER);
|
offset = cs_offset;
|
||||||
intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
|
|
||||||
intel_ring_emit(ring, cs_offset + len - 8);
|
|
||||||
intel_ring_advance(ring);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = intel_ring_begin(ring, 4);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
intel_ring_emit(ring, MI_BATCH_BUFFER);
|
||||||
|
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
|
||||||
|
intel_ring_emit(ring, offset + len - 8);
|
||||||
|
intel_ring_emit(ring, MI_NOOP);
|
||||||
|
intel_ring_advance(ring);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
|||||||
|
|
||||||
/* Workaround batchbuffer to combat CS tlb bug. */
|
/* Workaround batchbuffer to combat CS tlb bug. */
|
||||||
if (HAS_BROKEN_CS_TLB(dev)) {
|
if (HAS_BROKEN_CS_TLB(dev)) {
|
||||||
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
|
obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
|
||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
DRM_ERROR("Failed to allocate batch bo\n");
|
DRM_ERROR("Failed to allocate batch bo\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
|
|||||||
struct drm_device *dev = encoder->base.dev;
|
struct drm_device *dev = encoder->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
|
||||||
|
intel_wait_for_vblank(encoder->base.dev,
|
||||||
|
to_intel_crtc(encoder->base.crtc)->pipe);
|
||||||
|
|
||||||
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
|
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev,
|
|||||||
priv->hdmi_pdev = pdev;
|
priv->hdmi_pdev = pdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_OF
|
||||||
|
static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
|
||||||
|
{
|
||||||
|
int gpio = of_get_named_gpio(of_node, name, 0);
|
||||||
|
if (gpio < 0) {
|
||||||
|
char name2[32];
|
||||||
|
snprintf(name2, sizeof(name2), "%s-gpio", name);
|
||||||
|
gpio = of_get_named_gpio(of_node, name2, 0);
|
||||||
|
if (gpio < 0) {
|
||||||
|
dev_err(dev, "failed to get gpio: %s (%d)\n",
|
||||||
|
name, gpio);
|
||||||
|
gpio = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return gpio;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
||||||
{
|
{
|
||||||
static struct hdmi_platform_config config = {};
|
static struct hdmi_platform_config config = {};
|
||||||
#ifdef CONFIG_OF
|
#ifdef CONFIG_OF
|
||||||
struct device_node *of_node = dev->of_node;
|
struct device_node *of_node = dev->of_node;
|
||||||
|
|
||||||
int get_gpio(const char *name)
|
|
||||||
{
|
|
||||||
int gpio = of_get_named_gpio(of_node, name, 0);
|
|
||||||
if (gpio < 0) {
|
|
||||||
char name2[32];
|
|
||||||
snprintf(name2, sizeof(name2), "%s-gpio", name);
|
|
||||||
gpio = of_get_named_gpio(of_node, name2, 0);
|
|
||||||
if (gpio < 0) {
|
|
||||||
dev_err(dev, "failed to get gpio: %s (%d)\n",
|
|
||||||
name, gpio);
|
|
||||||
gpio = -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return gpio;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
|
if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
|
||||||
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
|
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
|
||||||
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
|
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
|
||||||
@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
config.mmio_name = "core_physical";
|
config.mmio_name = "core_physical";
|
||||||
config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
|
config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
|
||||||
config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
|
config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
|
||||||
config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
|
config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
|
||||||
config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
|
config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
|
||||||
config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
|
config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
|
||||||
config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
|
config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static const char *hpd_clk_names[] = {
|
static const char *hpd_clk_names[] = {
|
||||||
|
@ -15,19 +15,25 @@
|
|||||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMMON_CLK
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "hdmi.h"
|
#include "hdmi.h"
|
||||||
|
|
||||||
struct hdmi_phy_8960 {
|
struct hdmi_phy_8960 {
|
||||||
struct hdmi_phy base;
|
struct hdmi_phy base;
|
||||||
struct hdmi *hdmi;
|
struct hdmi *hdmi;
|
||||||
|
#ifdef CONFIG_COMMON_CLK
|
||||||
struct clk_hw pll_hw;
|
struct clk_hw pll_hw;
|
||||||
struct clk *pll;
|
struct clk *pll;
|
||||||
unsigned long pixclk;
|
unsigned long pixclk;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
|
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMMON_CLK
|
||||||
#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
|
#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -374,7 +380,7 @@ static struct clk_init_data pll_init = {
|
|||||||
.parent_names = hdmi_pll_parents,
|
.parent_names = hdmi_pll_parents,
|
||||||
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
|
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* HDMI Phy:
|
* HDMI Phy:
|
||||||
@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
|
|||||||
{
|
{
|
||||||
struct hdmi_phy_8960 *phy_8960;
|
struct hdmi_phy_8960 *phy_8960;
|
||||||
struct hdmi_phy *phy = NULL;
|
struct hdmi_phy *phy = NULL;
|
||||||
int ret, i;
|
int ret;
|
||||||
|
#ifdef CONFIG_COMMON_CLK
|
||||||
|
int i;
|
||||||
|
|
||||||
/* sanity check: */
|
/* sanity check: */
|
||||||
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
|
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
|
||||||
if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
|
if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
#endif
|
||||||
|
|
||||||
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
|
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
|
||||||
if (!phy_8960) {
|
if (!phy_8960) {
|
||||||
@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
|
|||||||
|
|
||||||
phy_8960->hdmi = hdmi;
|
phy_8960->hdmi = hdmi;
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMMON_CLK
|
||||||
phy_8960->pll_hw.init = &pll_init;
|
phy_8960->pll_hw.init = &pll_init;
|
||||||
phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
|
phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
|
||||||
if (IS_ERR(phy_8960->pll)) {
|
if (IS_ERR(phy_8960->pll)) {
|
||||||
@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
|
|||||||
phy_8960->pll = NULL;
|
phy_8960->pll = NULL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return phy;
|
return phy;
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ module_param(reglog, bool, 0600);
|
|||||||
#define reglog 0
|
#define reglog 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static char *vram;
|
static char *vram = "16m";
|
||||||
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
|
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
|
||||||
module_param(vram, charp, 0);
|
module_param(vram, charp, 0);
|
||||||
|
|
||||||
|
@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
|
|||||||
u8 msg[DP_DPCD_SIZE];
|
u8 msg[DP_DPCD_SIZE];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
char dpcd_hex_dump[DP_DPCD_SIZE * 3];
|
|
||||||
|
|
||||||
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
|
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
|
||||||
DP_DPCD_SIZE);
|
DP_DPCD_SIZE);
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
|
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
|
||||||
|
|
||||||
hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
|
DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
|
||||||
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
|
dig_connector->dpcd);
|
||||||
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
|
|
||||||
|
|
||||||
radeon_dp_probe_oui(radeon_connector);
|
radeon_dp_probe_oui(radeon_connector);
|
||||||
|
|
||||||
|
@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
|
|||||||
radeon_ring_write(ring, lower_32_bits(addr));
|
radeon_ring_write(ring, lower_32_bits(addr));
|
||||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
|
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
|
||||||
|
|
||||||
/* PFP_SYNC_ME packet only exists on 7xx+ */
|
/* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
|
||||||
if (emit_wait && (rdev->family >= CHIP_RV770)) {
|
if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
|
||||||
/* Prevent the PFP from running ahead of the semaphore wait */
|
/* Prevent the PFP from running ahead of the semaphore wait */
|
||||||
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||||
radeon_ring_write(ring, 0x0);
|
radeon_ring_write(ring, 0x0);
|
||||||
|
@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
|
||||||
|
if ((dev->pdev->device == 0x9805) &&
|
||||||
|
(dev->pdev->subsystem_vendor == 0x1734) &&
|
||||||
|
(dev->pdev->subsystem_device == 0x11bd)) {
|
||||||
|
if (*connector_type == DRM_MODE_CONNECTOR_VGA)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
|
|||||||
(controller->ucFanParameters &
|
(controller->ucFanParameters &
|
||||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||||
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
|
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
|
||||||
} else if ((controller->ucType ==
|
} else if (controller->ucType ==
|
||||||
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
|
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
|
||||||
(controller->ucType ==
|
DRM_INFO("External GPIO thermal controller %s fan control\n",
|
||||||
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
|
(controller->ucFanParameters &
|
||||||
(controller->ucType ==
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||||
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
|
rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
|
||||||
DRM_INFO("Special thermal controller config\n");
|
} else if (controller->ucType ==
|
||||||
|
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
|
||||||
|
DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
|
||||||
|
(controller->ucFanParameters &
|
||||||
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||||
|
rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
|
||||||
|
} else if (controller->ucType ==
|
||||||
|
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
|
||||||
|
DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
|
||||||
|
(controller->ucFanParameters &
|
||||||
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||||
|
rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
|
||||||
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
|
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
|
||||||
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
|
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
|
||||||
pp_lib_thermal_controller_names[controller->ucType],
|
pp_lib_thermal_controller_names[controller->ucType],
|
||||||
controller->ucI2cAddress >> 1,
|
controller->ucI2cAddress >> 1,
|
||||||
(controller->ucFanParameters &
|
(controller->ucFanParameters &
|
||||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||||
|
rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
|
||||||
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
|
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
|
||||||
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
|
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
|
||||||
if (rdev->pm.i2c_bus) {
|
if (rdev->pm.i2c_bus) {
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
int radeon_semaphore_create(struct radeon_device *rdev,
|
int radeon_semaphore_create(struct radeon_device *rdev,
|
||||||
struct radeon_semaphore **semaphore)
|
struct radeon_semaphore **semaphore)
|
||||||
{
|
{
|
||||||
uint32_t *cpu_addr;
|
uint64_t *cpu_addr;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
|
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
|
||||||
|
Loading…
Reference in New Issue
Block a user