UAPI Changes:

- Only enable char/agp uapi when CONFIG_DRM_LEGACY is set
 
 Cross-subsystem Changes:
 
 - vma_set_file helper to make vma->vm_file changing less brittle,
   acked by Andrew
 
 Core Changes:
 
 - dma-buf heaps improvements
 - pass full atomic modeset state to driver callbacks
 - shmem helpers: cached bo by default
 - cleanups for fbdev, fb-helpers
 - better docs for drm modes and SCALING_FITLER uapi
 - ttm: fix dma32 page pool regression
 
 Driver Changes:
 
 - multi-hop regression fixes for amdgpu, radeon, nouveau
 - lots of small amdgpu hw enabling fixes (display, pm, ...)
 - fixes for imx, mcde, meson, some panels, virtio, qxl, i915, all
   fairly minor
 - some cleanups for legacy drm/fbdev drivers
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEb4nG6jLu8Y5XI+PfTA9ye/CYqnEFAl/c2zIACgkQTA9ye/CY
 qnGC2w//QBfbqpyBeRzs/PRx8U8qvI1f8ySGGXrh39zri9bwOG9KOOO5OKKEB0kW
 oLuGXZtGP5L9wqAGu2EfjYGPwveEOlsC+CQtMC3krSa/7d69Xj/VysGV1MJVWPPA
 FyZj8wbRDsFamQ0Ai7c7i0wXpchtJ3CT9VaY5FL46n7DrAM4sfmCjMqd7TWkkXss
 GdFc4tkIw6dBC7H32fMGAUi3sl51YMvZRnDzs8ImRk2W5hEVr4wHyboaFl8/co6W
 aakitufYcTPxK7nMFOlFXSZYBeeA7oOJqX4DQElLzxCndgkphjWiNb9EoGOlg3lH
 lbvP896XoA5g1WDZ0AUiFbNyX/BAZrUedIuUdA/J/OBIAmCumrg6o6yRzZwE/wDQ
 VeMCtZBUOIFv2uTrow1Ow+U/5Qa+REu3h/SLmR/BbGOLaw0A5XmKC9NN59Us9MSv
 lKVlmOMlUQb4D32Bu5I6RPO9eo4MLa+ZidZkCMj/FCufKxj3MvZhkBNH9aSOqL2V
 PCYBy5ixqbkhtXcYW+1Er9Tbz1R7Do6iYFdluvbQx8FkR2OgVlcmCXOxFVRy9xIh
 qoXQGAwhMECv1WosPqElmLBqAocSWJfVGrsOClgldZgVX5R1QNv93iIcP29jgmTj
 UBdvFJxRxmMBhEbnosWhb/wXzPIGWlEd0JzDSq6wdJlhWllho4k=
 =BhU/
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2020-12-18' of git://anongit.freedesktop.org/drm/drm

Pull more drm updates from Daniel Vetter:
 "UAPI Changes:

   - Only enable char/agp uapi when CONFIG_DRM_LEGACY is set

  Cross-subsystem Changes:

   - vma_set_file helper to make vma->vm_file changing less brittle,
     acked by Andrew

  Core Changes:

   - dma-buf heaps improvements

   - pass full atomic modeset state to driver callbacks

   - shmem helpers: cached bo by default

   - cleanups for fbdev, fb-helpers

   - better docs for drm modes and SCALING_FITLER uapi

   - ttm: fix dma32 page pool regression

  Driver Changes:

   - multi-hop regression fixes for amdgpu, radeon, nouveau

   - lots of small amdgpu hw enabling fixes (display, pm, ...)

   - fixes for imx, mcde, meson, some panels, virtio, qxl, i915, all
     fairly minor

   - some cleanups for legacy drm/fbdev drivers"

* tag 'drm-next-2020-12-18' of git://anongit.freedesktop.org/drm/drm: (117 commits)
  drm/qxl: don't allocate a dma_address array
  drm/nouveau: fix multihop when move doesn't work.
  drm/i915/tgl: Fix REVID macros for TGL to fetch correct stepping
  drm/i915: Fix mismatch between misplaced vma check and vma insert
  drm/i915/perf: also include Gen11 in OATAILPTR workaround
  Revert "drm/i915: re-order if/else ladder for hpd_irq_setup"
  drm/amdgpu/disply: fix documentation warnings in display manager
  drm/amdgpu: print mmhub client name for dimgrey_cavefish
  drm/amdgpu: set mode1 reset as default for dimgrey_cavefish
  drm/amd/display: Add get_dig_frontend implementation for DCEx
  drm/radeon: remove h from printk format specifier
  drm/amdgpu: remove h from printk format specifier
  drm/amdgpu: Fix spelling mistake "Heterogenous" -> "Heterogeneous"
  drm/amdgpu: fix regression in vbios reservation handling on headless
  drm/amdgpu/SRIOV: Extend VF reset request wait period
  drm/amdkfd: correct amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu log.
  drm/amd/display: Adding prototype for dccg21_update_dpp_dto()
  drm/amdgpu: print what method we are using for runtime pm
  drm/amdgpu: simplify logic in atpx resume handling
  drm/amdgpu: no need to call pci_ignore_hotplug for _PR3
  ...
This commit is contained in:
Linus Torvalds 2020-12-18 12:38:28 -08:00
commit c59c7588fc
134 changed files with 2760 additions and 1196 deletions

View File

@ -159,6 +159,8 @@ properties:
- innolux,g121x1-l03
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bge
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
- innolux,n125hce-gn1
# InnoLux 15.6" WXGA TFT LCD panel
- innolux,n156bge-l21
# Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel

View File

@ -190,7 +190,7 @@ DMA Fence uABI/Sync File
Indefinite DMA Fences
~~~~~~~~~~~~~~~~~~~~~
At various times &dma_fence with an indefinite time until dma_fence_wait()
At various times struct dma_fence with an indefinite time until dma_fence_wait()
finishes have been proposed. Examples include:
* Future fences, used in HWC1 to signal when a buffer isn't used by the display

View File

@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
agpgart-y := backend.o frontend.o generic.o isoch.o
agpgart-y := backend.o generic.o isoch.o
ifeq ($(CONFIG_DRM_LEGACY),y)
agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
agpgart-y += frontend.o
endif
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o

View File

@ -186,8 +186,13 @@ int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
/* Frontend routines. */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int agp_frontend_initialize(void);
void agp_frontend_cleanup(void);
#else
static inline int agp_frontend_initialize(void) { return 0; }
static inline void agp_frontend_cleanup(void) {}
#endif
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);

View File

@ -1166,9 +1166,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
struct file *oldfile;
int ret;
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@ -1186,22 +1183,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
get_file(dmabuf->file);
oldfile = vma->vm_file;
vma->vm_file = dmabuf->file;
vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
ret = dmabuf->ops->mmap(dmabuf, vma);
if (ret) {
/* restore old parameters on failure */
vma->vm_file = oldfile;
fput(dmabuf->file);
} else {
if (oldfile)
fput(oldfile);
}
return ret;
return dmabuf->ops->mmap(dmabuf, vma);
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);

View File

@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
max = max(old->shared_count + num_fences,
old->shared_max * 2);
} else {
max = 4;
max = max(4ul, roundup_pow_of_two(num_fences));
}
new = dma_resv_list_alloc(max);

View File

@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += heap-helpers.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o

View File

@ -2,76 +2,305 @@
/*
* DMABUF CMA heap exporter
*
* Copyright (C) 2012, 2019 Linaro Ltd.
* Copyright (C) 2012, 2019, 2020 Linaro Ltd.
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
*
* Also utilizing parts of Andrew Davis' SRAM heap:
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
#include <linux/cma.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include "heap-helpers.h"
struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
};
static void cma_heap_free(struct heap_helper_buffer *buffer)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
unsigned long nr_pages = buffer->pagecount;
struct page *cma_pages = buffer->priv_virt;
struct cma_heap_buffer {
struct cma_heap *heap;
struct list_head attachments;
struct mutex lock;
unsigned long len;
struct page *cma_pages;
struct page **pages;
pgoff_t pagecount;
int vmap_cnt;
void *vaddr;
};
/* free page list */
kfree(buffer->pages);
/* release memory */
cma_release(cma_heap->cma, cma_pages, nr_pages);
struct dma_heap_attachment {
struct device *dev;
struct sg_table table;
struct list_head list;
bool mapped;
};
static int cma_heap_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
int ret;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
buffer->pagecount, 0,
buffer->pagecount << PAGE_SHIFT,
GFP_KERNEL);
if (ret) {
kfree(a);
return ret;
}
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
attachment->priv = a;
mutex_lock(&buffer->lock);
list_add(&a->list, &buffer->attachments);
mutex_unlock(&buffer->lock);
return 0;
}
static void cma_heap_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a = attachment->priv;
mutex_lock(&buffer->lock);
list_del(&a->list);
mutex_unlock(&buffer->lock);
sg_free_table(&a->table);
kfree(a);
}
static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
if (ret)
return ERR_PTR(-ENOMEM);
a->mapped = true;
return table;
}
static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, 0);
}
static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
dma_sync_sgtable_for_device(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct cma_heap_buffer *buffer = vma->vm_private_data;
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
vmf->page = buffer->pages[vmf->pgoff];
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct dma_heap_vm_ops = {
.fault = cma_heap_vm_fault,
};
static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;
return 0;
}
static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
{
void *vaddr;
vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
if (!vaddr)
return ERR_PTR(-ENOMEM);
return vaddr;
}
static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
void *vaddr;
int ret = 0;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt) {
buffer->vmap_cnt++;
dma_buf_map_set_vaddr(map, buffer->vaddr);
goto out;
}
vaddr = cma_heap_do_vmap(buffer);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto out;
}
buffer->vaddr = vaddr;
buffer->vmap_cnt++;
dma_buf_map_set_vaddr(map, buffer->vaddr);
out:
mutex_unlock(&buffer->lock);
return ret;
}
static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
if (!--buffer->vmap_cnt) {
vunmap(buffer->vaddr);
buffer->vaddr = NULL;
}
mutex_unlock(&buffer->lock);
dma_buf_map_clear(map);
}
static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
struct cma_heap *cma_heap = buffer->heap;
if (buffer->vmap_cnt > 0) {
WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
vunmap(buffer->vaddr);
buffer->vaddr = NULL;
}
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}
/* dmabuf heap CMA operations functions */
static const struct dma_buf_ops cma_heap_buf_ops = {
.attach = cma_heap_attach,
.detach = cma_heap_detach,
.map_dma_buf = cma_heap_map_dma_buf,
.unmap_dma_buf = cma_heap_unmap_dma_buf,
.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
.mmap = cma_heap_mmap,
.vmap = cma_heap_vmap,
.vunmap = cma_heap_vunmap,
.release = cma_heap_dma_buf_release,
};
static int cma_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct heap_helper_buffer *helper_buffer;
struct page *cma_pages;
struct cma_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
size_t size = PAGE_ALIGN(len);
unsigned long nr_pages = size >> PAGE_SHIFT;
pgoff_t pagecount = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
struct page *cma_pages;
struct dma_buf *dmabuf;
int ret = -ENOMEM;
pgoff_t pg;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->len = size;
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
if (!helper_buffer)
return -ENOMEM;
init_heap_helper_buffer(helper_buffer, cma_heap_free);
helper_buffer->heap = heap;
helper_buffer->size = len;
cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
if (!cma_pages)
goto free_buf;
goto free_buffer;
/* Clear the cma pages */
if (PageHighMem(cma_pages)) {
unsigned long nr_clear_pages = nr_pages;
unsigned long nr_clear_pages = pagecount;
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
@ -85,7 +314,6 @@ static int cma_heap_allocate(struct dma_heap *heap,
*/
if (fatal_signal_pending(current))
goto free_cma;
page++;
nr_clear_pages--;
}
@ -93,28 +321,30 @@ static int cma_heap_allocate(struct dma_heap *heap,
memset(page_address(cma_pages), 0, size);
}
helper_buffer->pagecount = nr_pages;
helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
sizeof(*helper_buffer->pages),
GFP_KERNEL);
if (!helper_buffer->pages) {
buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
if (!buffer->pages) {
ret = -ENOMEM;
goto free_cma;
}
for (pg = 0; pg < helper_buffer->pagecount; pg++)
helper_buffer->pages[pg] = &cma_pages[pg];
for (pg = 0; pg < pagecount; pg++)
buffer->pages[pg] = &cma_pages[pg];
buffer->cma_pages = cma_pages;
buffer->heap = cma_heap;
buffer->pagecount = pagecount;
/* create the dmabuf */
dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
exp_info.ops = &cma_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
goto free_pages;
}
helper_buffer->dmabuf = dmabuf;
helper_buffer->priv_virt = cma_pages;
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
@ -125,11 +355,12 @@ static int cma_heap_allocate(struct dma_heap *heap,
return ret;
free_pages:
kfree(helper_buffer->pages);
kfree(buffer->pages);
free_cma:
cma_release(cma_heap->cma, cma_pages, nr_pages);
free_buf:
kfree(helper_buffer);
cma_release(cma_heap->cma, cma_pages, pagecount);
free_buffer:
kfree(buffer);
return ret;
}

View File

@ -1,274 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <uapi/linux/dma-heap.h>
#include "heap-helpers.h"
void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
void (*free)(struct heap_helper_buffer *))
{
buffer->priv_virt = NULL;
mutex_init(&buffer->lock);
buffer->vmap_cnt = 0;
buffer->vaddr = NULL;
buffer->pagecount = 0;
buffer->pages = NULL;
INIT_LIST_HEAD(&buffer->attachments);
buffer->free = free;
}
struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
int fd_flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &heap_helper_ops;
exp_info.size = buffer->size;
exp_info.flags = fd_flags;
exp_info.priv = buffer;
return dma_buf_export(&exp_info);
}
static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
{
void *vaddr;
vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
if (!vaddr)
return ERR_PTR(-ENOMEM);
return vaddr;
}
static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
{
if (buffer->vmap_cnt > 0) {
WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
vunmap(buffer->vaddr);
}
buffer->free(buffer);
}
static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
{
void *vaddr;
if (buffer->vmap_cnt) {
buffer->vmap_cnt++;
return buffer->vaddr;
}
vaddr = dma_heap_map_kernel(buffer);
if (IS_ERR(vaddr))
return vaddr;
buffer->vaddr = vaddr;
buffer->vmap_cnt++;
return vaddr;
}
static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
{
if (!--buffer->vmap_cnt) {
vunmap(buffer->vaddr);
buffer->vaddr = NULL;
}
}
struct dma_heaps_attachment {
struct device *dev;
struct sg_table table;
struct list_head list;
};
static int dma_heap_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct dma_heaps_attachment *a;
struct heap_helper_buffer *buffer = dmabuf->priv;
int ret;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
buffer->pagecount, 0,
buffer->pagecount << PAGE_SHIFT,
GFP_KERNEL);
if (ret) {
kfree(a);
return ret;
}
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
attachment->priv = a;
mutex_lock(&buffer->lock);
list_add(&a->list, &buffer->attachments);
mutex_unlock(&buffer->lock);
return 0;
}
static void dma_heap_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct dma_heaps_attachment *a = attachment->priv;
struct heap_helper_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
list_del(&a->list);
mutex_unlock(&buffer->lock);
sg_free_table(&a->table);
kfree(a);
}
static
struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heaps_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
if (ret)
table = ERR_PTR(ret);
return table;
}
static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
dma_unmap_sgtable(attachment->dev, table, direction, 0);
}
static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct heap_helper_buffer *buffer = vma->vm_private_data;
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
vmf->page = buffer->pages[vmf->pgoff];
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct dma_heap_vm_ops = {
.fault = dma_heap_vm_fault,
};
static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;
return 0;
}
static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
dma_heap_buffer_destroy(buffer);
}
static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
struct dma_heaps_attachment *a;
int ret = 0;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
direction);
}
mutex_unlock(&buffer->lock);
return ret;
}
static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
struct dma_heaps_attachment *a;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->size);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
void *vaddr;
mutex_lock(&buffer->lock);
vaddr = dma_heap_buffer_vmap_get(buffer);
mutex_unlock(&buffer->lock);
if (!vaddr)
return -ENOMEM;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
dma_heap_buffer_vmap_put(buffer);
mutex_unlock(&buffer->lock);
}
const struct dma_buf_ops heap_helper_ops = {
.map_dma_buf = dma_heap_map_dma_buf,
.unmap_dma_buf = dma_heap_unmap_dma_buf,
.mmap = dma_heap_mmap,
.release = dma_heap_dma_buf_release,
.attach = dma_heap_attach,
.detach = dma_heap_detach,
.begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
.end_cpu_access = dma_heap_dma_buf_end_cpu_access,
.vmap = dma_heap_dma_buf_vmap,
.vunmap = dma_heap_dma_buf_vunmap,
};

View File

@ -1,53 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* DMABUF Heaps helper code
*
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2019 Linaro Ltd.
*/
#ifndef _HEAP_HELPERS_H
#define _HEAP_HELPERS_H
#include <linux/dma-heap.h>
#include <linux/list.h>
/**
* struct heap_helper_buffer - helper buffer metadata
* @heap: back pointer to the heap the buffer came from
* @dmabuf: backing dma-buf for this buffer
* @size: size of the buffer
* @priv_virt pointer to heap specific private value
* @lock mutext to protect the data in this structure
* @vmap_cnt count of vmap references on the buffer
* @vaddr vmap'ed virtual address
* @pagecount number of pages in the buffer
* @pages list of page pointers
* @attachments list of device attachments
*
* @free heap callback to free the buffer
*/
struct heap_helper_buffer {
struct dma_heap *heap;
struct dma_buf *dmabuf;
size_t size;
void *priv_virt;
struct mutex lock;
int vmap_cnt;
void *vaddr;
pgoff_t pagecount;
struct page **pages;
struct list_head attachments;
void (*free)(struct heap_helper_buffer *buffer);
};
void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
void (*free)(struct heap_helper_buffer *));
struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
int fd_flags);
extern const struct dma_buf_ops heap_helper_ops;
#endif /* _HEAP_HELPERS_H */

View File

@ -3,7 +3,11 @@
* DMABUF System heap exporter
*
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2019 Linaro Ltd.
* Copyright (C) 2019, 2020 Linaro Ltd.
*
* Portions based off of Andrew Davis' SRAM heap:
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
#include <linux/dma-buf.h>
@ -15,87 +19,404 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <asm/page.h>
#include <linux/vmalloc.h>
#include "heap-helpers.h"
static struct dma_heap *sys_heap;
struct dma_heap *sys_heap;
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
struct mutex lock;
unsigned long len;
struct sg_table sg_table;
int vmap_cnt;
void *vaddr;
};
static void system_heap_free(struct heap_helper_buffer *buffer)
struct dma_heap_attachment {
struct device *dev;
struct sg_table *table;
struct list_head list;
bool mapped;
};
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
/*
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
* of order 0 pages can significantly improve the performance of many IOMMUs
* by reducing TLB pressure and time spent updating page tables.
*/
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
static struct sg_table *dup_sg_table(struct sg_table *table)
{
pgoff_t pg;
struct sg_table *new_table;
int ret, i;
struct scatterlist *sg, *new_sg;
for (pg = 0; pg < buffer->pagecount; pg++)
__free_page(buffer->pages[pg]);
kfree(buffer->pages);
new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
if (!new_table)
return ERR_PTR(-ENOMEM);
ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
if (ret) {
kfree(new_table);
return ERR_PTR(-ENOMEM);
}
new_sg = new_table->sgl;
for_each_sgtable_sg(table, sg, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
new_sg = sg_next(new_sg);
}
return new_table;
}
static int system_heap_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
struct sg_table *table;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
table = dup_sg_table(&buffer->sg_table);
if (IS_ERR(table)) {
kfree(a);
return -ENOMEM;
}
a->table = table;
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
attachment->priv = a;
mutex_lock(&buffer->lock);
list_add(&a->list, &buffer->attachments);
mutex_unlock(&buffer->lock);
return 0;
}
static void system_heap_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a = attachment->priv;
mutex_lock(&buffer->lock);
list_del(&a->list);
mutex_unlock(&buffer->lock);
sg_free_table(a->table);
kfree(a->table);
kfree(a);
}
static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = a->table;
int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
if (ret)
return ERR_PTR(ret);
a->mapped = true;
return table;
}
static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, 0);
}
static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
dma_sync_sgtable_for_device(a->dev, a->table, direction);
}
mutex_unlock(&buffer->lock);
return 0;
}
static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table = &buffer->sg_table;
unsigned long addr = vma->vm_start;
struct sg_page_iter piter;
int ret;
for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
struct page *page = sg_page_iter_page(&piter);
ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
vma->vm_page_prot);
if (ret)
return ret;
addr += PAGE_SIZE;
if (addr >= vma->vm_end)
return 0;
}
return 0;
}
static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
{
struct sg_table *table = &buffer->sg_table;
int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
struct page **pages = vmalloc(sizeof(struct page *) * npages);
struct page **tmp = pages;
struct sg_page_iter piter;
void *vaddr;
if (!pages)
return ERR_PTR(-ENOMEM);
for_each_sgtable_page(table, &piter, 0) {
WARN_ON(tmp - pages >= npages);
*tmp++ = sg_page_iter_page(&piter);
}
vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
vfree(pages);
if (!vaddr)
return ERR_PTR(-ENOMEM);
return vaddr;
}
static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct system_heap_buffer *buffer = dmabuf->priv;
void *vaddr;
int ret = 0;
mutex_lock(&buffer->lock);
if (buffer->vmap_cnt) {
buffer->vmap_cnt++;
dma_buf_map_set_vaddr(map, buffer->vaddr);
goto out;
}
vaddr = system_heap_do_vmap(buffer);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto out;
}
buffer->vaddr = vaddr;
buffer->vmap_cnt++;
dma_buf_map_set_vaddr(map, buffer->vaddr);
out:
mutex_unlock(&buffer->lock);
return ret;
}
static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct system_heap_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
if (!--buffer->vmap_cnt) {
vunmap(buffer->vaddr);
buffer->vaddr = NULL;
}
mutex_unlock(&buffer->lock);
dma_buf_map_clear(map);
}
static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table;
struct scatterlist *sg;
int i;
table = &buffer->sg_table;
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
__free_pages(page, compound_order(page));
}
sg_free_table(table);
kfree(buffer);
}
static const struct dma_buf_ops system_heap_buf_ops = {
.attach = system_heap_attach,
.detach = system_heap_detach,
.map_dma_buf = system_heap_map_dma_buf,
.unmap_dma_buf = system_heap_unmap_dma_buf,
.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
.end_cpu_access = system_heap_dma_buf_end_cpu_access,
.mmap = system_heap_mmap,
.vmap = system_heap_vmap,
.vunmap = system_heap_vunmap,
.release = system_heap_dma_buf_release,
};
static struct page *alloc_largest_available(unsigned long size,
unsigned int max_order)
{
struct page *page;
int i;
for (i = 0; i < NUM_ORDERS; i++) {
if (size < (PAGE_SIZE << orders[i]))
continue;
if (max_order < orders[i])
continue;
page = alloc_pages(order_flags[i], orders[i]);
if (!page)
continue;
return page;
}
return NULL;
}
static int system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
struct heap_helper_buffer *helper_buffer;
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
unsigned long size_remaining = len;
unsigned int max_order = orders[0];
struct dma_buf *dmabuf;
int ret = -ENOMEM;
pgoff_t pg;
struct sg_table *table;
struct scatterlist *sg;
struct list_head pages;
struct page *page, *tmp_page;
int i, ret = -ENOMEM;
helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
if (!helper_buffer)
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_heap_helper_buffer(helper_buffer, system_heap_free);
helper_buffer->heap = heap;
helper_buffer->size = len;
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
buffer->heap = heap;
buffer->len = len;
helper_buffer->pagecount = len / PAGE_SIZE;
helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
sizeof(*helper_buffer->pages),
GFP_KERNEL);
if (!helper_buffer->pages) {
ret = -ENOMEM;
goto err0;
}
for (pg = 0; pg < helper_buffer->pagecount; pg++) {
INIT_LIST_HEAD(&pages);
i = 0;
while (size_remaining > 0) {
/*
* Avoid trying to allocate memory if the process
* has been killed by by SIGKILL
* has been killed by SIGKILL
*/
if (fatal_signal_pending(current))
goto err1;
goto free_buffer;
helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!helper_buffer->pages[pg])
goto err1;
page = alloc_largest_available(size_remaining, max_order);
if (!page)
goto free_buffer;
list_add_tail(&page->lru, &pages);
size_remaining -= page_size(page);
max_order = compound_order(page);
i++;
}
table = &buffer->sg_table;
if (sg_alloc_table(table, i, GFP_KERNEL))
goto free_buffer;
sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
sg_set_page(sg, page, page_size(page), 0);
sg = sg_next(sg);
list_del(&page->lru);
}
/* create the dmabuf */
dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
exp_info.ops = &system_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
goto err1;
goto free_pages;
}
helper_buffer->dmabuf = dmabuf;
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
/* just return, as put will call release and that will free */
return ret;
}
return ret;
err1:
while (pg > 0)
__free_page(helper_buffer->pages[--pg]);
kfree(helper_buffer->pages);
err0:
kfree(helper_buffer);
free_pages:
for_each_sgtable_sg(table, sg, i) {
struct page *p = sg_page(sg);
__free_pages(p, compound_order(p));
}
sg_free_table(table);
free_buffer:
list_for_each_entry_safe(page, tmp_page, &pages, lru)
__free_pages(page, compound_order(page));
kfree(buffer);
return ret;
}
@ -107,7 +428,6 @@ static const struct dma_heap_ops system_heap_ops = {
static int system_heap_create(void)
{
struct dma_heap_export_info exp_info;
int ret = 0;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
@ -115,9 +435,9 @@ static int system_heap_create(void)
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
ret = PTR_ERR(sys_heap);
return PTR_ERR(sys_heap);
return ret;
return 0;
}
module_init(system_heap_create);
MODULE_LICENSE("GPL v2");

View File

@ -1024,6 +1024,7 @@ struct amdgpu_device {
/* enable runtime pm on the device */
bool runpm;
bool in_runpm;
bool has_pr3;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@ -1230,6 +1231,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@ -1280,6 +1282,8 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
int amdgpu_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
/*
* functions used by amdgpu_encoder.c
@ -1311,11 +1315,11 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_is_s0ix_supported(void);
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_is_s0ix_supported(void) { return false; }
static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
#endif
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,

View File

@ -901,10 +901,12 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
*
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_s0ix_supported(void)
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
return true;
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
if (adev->flags & AMD_IS_APU)
return true;
}
return false;
}

View File

@ -1213,7 +1213,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}

View File

@ -212,7 +212,24 @@ static DEVICE_ATTR(serial_number, S_IRUGO,
amdgpu_device_get_serial_number, NULL);
/**
* amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
* amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with HG/PX power control,
* otherwise return false.
*/
bool amdgpu_device_supports_atpx(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
if (adev->flags & AMD_IS_PX)
return true;
return false;
}
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
* @dev: drm_device pointer
*
@ -223,7 +240,7 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
if (adev->flags & AMD_IS_PX)
if (adev->has_pr3)
return true;
return false;
}
@ -1398,7 +1415,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@ -2650,7 +2667,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev)) {
if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
@ -3177,7 +3194,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool boco = false;
bool atpx = false;
u32 max_MBps;
adev->shutdown = false;
@ -3349,15 +3366,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
if (amdgpu_device_supports_boco(ddev))
boco = true;
if (amdgpu_device_supports_atpx(ddev))
atpx = true;
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, boco);
if (boco)
&amdgpu_switcheroo_ops, atpx);
if (atpx)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
@ -3540,7 +3557,7 @@ fence_driver_init:
failed:
amdgpu_vf_error_trans_all(adev);
if (boco)
if (atpx)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap:
@ -3604,7 +3621,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
if (amdgpu_device_supports_boco(adev_to_drm(adev)))
if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
@ -3710,7 +3727,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_fence_driver_suspend(adev);
if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev))
if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
@ -3744,7 +3761,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (amdgpu_acpi_is_s0ix_supported())
if (amdgpu_acpi_is_s0ix_supported(adev))
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */

View File

@ -1340,7 +1340,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
adev->in_runpm = true;
if (amdgpu_device_supports_boco(drm_dev))
if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@ -1348,13 +1348,11 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (ret)
return ret;
if (amdgpu_device_supports_boco(drm_dev)) {
if (amdgpu_device_supports_atpx(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
if (amdgpu_is_atpx_hybrid()) {
pci_ignore_hotplug(pdev);
} else {
if (!amdgpu_is_atpx_hybrid()) {
amdgpu_device_cache_pci_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@ -1378,28 +1376,31 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!adev->runpm)
return -EINVAL;
if (amdgpu_device_supports_boco(drm_dev)) {
if (amdgpu_device_supports_atpx(drm_dev)) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
if (amdgpu_is_atpx_hybrid()) {
pci_set_master(pdev);
} else {
if (!amdgpu_is_atpx_hybrid()) {
pci_set_power_state(pdev, PCI_D0);
amdgpu_device_load_pci_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
pci_set_master(pdev);
}
pci_set_master(pdev);
} else if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
pci_set_master(pdev);
} else if (amdgpu_device_supports_baco(drm_dev)) {
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_boco(drm_dev))
if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0;
@ -1533,8 +1534,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),

View File

@ -496,13 +496,14 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
break;
}
if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE))
if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
size = 0;
else
} else {
size = amdgpu_gmc_get_vbios_fb_size(adev);
if (adev->mman.keep_stolen_vga_memory)
size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
if (adev->mman.keep_stolen_vga_memory)
size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
}
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))

View File

@ -133,6 +133,7 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
struct drm_device *dev;
struct pci_dev *parent;
int r, acpi_status;
dev = adev_to_drm(adev);
@ -144,6 +145,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
!pci_is_thunderbolt_attached(dev->pdev))
flags |= AMD_IS_PX;
parent = pci_upstream_bridge(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
* or memory manager initialization failure, it must
@ -156,9 +160,14 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
if (amdgpu_device_supports_boco(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
if (amdgpu_device_supports_atpx(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
adev->runpm = true;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
} else if (amdgpu_device_supports_boco(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
adev->runpm = true;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm != 0)) {
switch (adev->asic_type) {
@ -180,6 +189,8 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
adev->runpm = true;
break;
}
if (adev->runpm)
dev_info(adev->dev, "Using BACO for runtime pm\n");
}
/* Call ACPI methods: require modeset init
@ -192,7 +203,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (adev->runpm) {
/* only need to skip on ATPX */
if (amdgpu_device_supports_boco(dev) &&
if (amdgpu_device_supports_atpx(dev) &&
!amdgpu_is_atpx_hybrid())
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);

View File

@ -358,10 +358,11 @@ TRACE_EVENT(amdgpu_vm_update_ptes,
}
),
TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
" flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
" flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid,
__entry->vm_ctx, __entry->start, __entry->end,
__entry->flags, __entry->incr, __print_array(
__get_dynamic_array(dst), __entry->nptes, 8))
__get_dynamic_array(dst), min(__entry->nptes, 32u), 8),
__entry->nptes > 32 ? "..." : "")
);
TRACE_EVENT(amdgpu_vm_set_ptes,

View File

@ -240,7 +240,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
/*
@ -267,7 +267,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
enc_major, enc_minor, dec_minor, family_id);
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;

View File

@ -179,7 +179,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));

View File

@ -181,7 +181,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
@ -189,7 +189,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
}

View File

@ -136,6 +136,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
break;
default:

View File

@ -187,7 +187,16 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
static int xgpu_ai_request_reset(struct amdgpu_device *adev)
{
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
int ret, i = 0;
while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
if (!ret)
break;
i++;
}
return ret;
}
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,

View File

@ -25,8 +25,9 @@
#define __MXGPU_AI_H__
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
#define AI_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,

View File

@ -200,7 +200,16 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
static int xgpu_nv_request_reset(struct amdgpu_device *adev)
{
return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
int ret, i = 0;
while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
if (!ret)
break;
i++;
}
return ret;
}
static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,

View File

@ -27,6 +27,7 @@
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
#define NV_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,

View File

@ -362,6 +362,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
return AMD_RESET_METHOD_MODE1;
default:
if (smu_baco_is_support(smu))

View File

@ -153,6 +153,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
return 0;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@ -807,6 +810,37 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
static int sdma_v5_2_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset;
u32 tmp;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
grbm_soft_reset = REG_SET_FIELD(0,
GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
1);
grbm_soft_reset <<= i;
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
tmp |= grbm_soft_reset;
DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
udelay(50);
tmp &= ~grbm_soft_reset;
WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
udelay(50);
}
return 0;
}
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@ -838,6 +872,7 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
/* enable sdma ring preemption */
@ -1366,13 +1401,6 @@ static int sdma_v5_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
static int sdma_v5_2_soft_reset(void *handle)
{
/* todo */
return 0;
}
static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
#
# Heterogenous system architecture configuration
# Heterogeneous system architecture configuration
#
config HSA_AMD

View File

@ -72,8 +72,8 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
int i;
int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;
int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
+ pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)

View File

@ -196,10 +196,6 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@ -2212,7 +2208,7 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.get_format_info = amd_get_format_info,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
.atomic_commit = drm_atomic_helper_commit,
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@ -5124,9 +5120,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
#endif
uint32_t link_bandwidth_kbps;
#endif
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
@ -5208,11 +5203,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
&dsc_caps);
#endif
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
@ -5349,7 +5342,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
}
#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state,
struct drm_property *property,
uint64_t val)
@ -5373,7 +5366,7 @@ int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
return 0;
}
int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val)
@ -8070,20 +8063,6 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
/*
* Add check here for SoC's that support hardware cursor plane, to
* unset legacy_cursor_update
*/
return drm_atomic_helper_commit(dev, state, nonblock);
/*TODO Handle EINTR, reenable IRQ*/
}
/**
* amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
* @state: The atomic state to commit

View File

@ -337,10 +337,29 @@ struct amdgpu_display_manager {
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
#ifdef CONFIG_DEBUG_FS
/* set the crc calculation window*/
/**
* @crc_win_x_start_property:
*
* X start of the crc calculation window
*/
struct drm_property *crc_win_x_start_property;
/**
* @crc_win_y_start_property:
*
* Y start of the crc calculation window
*/
struct drm_property *crc_win_y_start_property;
/**
* @crc_win_x_end_property:
*
* X end of the crc calculation window
*/
struct drm_property *crc_win_x_end_property;
/**
* @crc_win_y_end_property:
*
* Y end of the crc calculation window
*/
struct drm_property *crc_win_y_end_property;
#endif
/**

View File

@ -81,6 +81,14 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
{
dm_crtc_state->crc_window.x_start = 0;
dm_crtc_state->crc_window.y_start = 0;
dm_crtc_state->crc_window.x_end = 0;
dm_crtc_state->crc_window.y_end = 0;
}
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
{
bool ret = true;
@ -141,7 +149,10 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
/* Enable CRTC CRC generation if necessary. */
if (dm_is_crc_source_crtc(source)) {
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!enable)
amdgpu_dm_set_crc_window_default(dm_crtc_state);
if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
crc_window = &tmp_window;

View File

@ -24,6 +24,7 @@
*/
#include <linux/version.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_dp_helper.h>
@ -252,8 +253,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);

View File

@ -746,24 +746,24 @@ static struct wm_table ddr4_wm_table_rn = {
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 12.48,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 12.48,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 10.12,
.sr_enter_plus_exit_time_us = 11.48,
.sr_exit_time_us = 11.12,
.sr_enter_plus_exit_time_us = 12.48,
.valid = true,
},
}

View File

@ -2625,6 +2625,26 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
if (update_type != UPDATE_TYPE_FAST) {
// If changing VTG FP2: wait until back in vactive to program FP2
// Need to ensure that pipe unlock happens soon after to minimize race condition
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->top_pipe || pipe_ctx->stream != stream)
continue;
if (!pipe_ctx->update_flags.bits.global_sync)
continue;
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
}
}
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, false);
else

View File

@ -3267,9 +3267,6 @@ void core_link_enable_stream(
}
}
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#endif
/* turn off otg test pattern if enable */
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,

View File

@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
#define DC_VER "3.2.115"
#define DC_VER "3.2.116"
#define MAX_SURFACES 3
#define MAX_PLANES 6

View File

@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
.get_max_link_cap = dce110_link_encoder_get_max_link_cap
.get_max_link_cap = dce110_link_encoder_get_max_link_cap,
.get_dig_frontend = dce110_get_dig_frontend,
};
static enum bp_result link_transmitter_control(
@ -235,6 +236,44 @@ static void set_link_training_complete(
}
unsigned int dce110_get_dig_frontend(struct link_encoder *enc)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
u32 value;
enum engine_id result;
REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
switch (value) {
case DCE110_DIG_FE_SOURCE_SELECT_DIGA:
result = ENGINE_ID_DIGA;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGB:
result = ENGINE_ID_DIGB;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGC:
result = ENGINE_ID_DIGC;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGD:
result = ENGINE_ID_DIGD;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGE:
result = ENGINE_ID_DIGE;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGF:
result = ENGINE_ID_DIGF;
break;
case DCE110_DIG_FE_SOURCE_SELECT_DIGG:
result = ENGINE_ID_DIGG;
break;
default:
// invalid source select DIG
result = ENGINE_ID_UNKNOWN;
}
return result;
}
void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index)
@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
.get_max_link_cap = dce110_link_encoder_get_max_link_cap
.get_max_link_cap = dce110_link_encoder_get_max_link_cap,
.get_dig_frontend = dce110_get_dig_frontend
};
void dce60_link_encoder_construct(

View File

@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe(
enum engine_id engine,
bool connect);
unsigned int dce110_get_dig_frontend(struct link_encoder *enc);
void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index);

View File

@ -1268,7 +1268,7 @@ void dce120_timing_generator_construct(
tg110->min_h_front_porch = 0;
tg110->min_h_back_porch = 0;
tg110->min_h_sync_width = 8;
tg110->min_h_sync_width = 4;
tg110->min_v_sync_width = 1;
tg110->min_v_blank = 3;
}

View File

@ -124,11 +124,11 @@ bool hubbub1_verify_allow_pstate_change_high(
* still not asserted, we are probably stuck and going to hang
*
* TODO: Figure out why it takes ~100us on linux
* pstate takes around ~100us on linux. Unknown currently as to
* why it takes that long on linux
* pstate takes around ~100us (up to 200us) on linux. Unknown currently
* as to why it takes that long on linux
*/
const unsigned int pstate_wait_timeout_us = 200;
const unsigned int pstate_wait_expected_timeout_us = 40;
const unsigned int pstate_wait_expected_timeout_us = 180;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */

View File

@ -2736,7 +2736,7 @@ static void dcn10_program_all_pipe_in_tree(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);

View File

@ -272,7 +272,7 @@ void optc1_program_timing(
vupdate_offset,
vupdate_width);
optc->funcs->set_vtg_params(optc, dc_crtc_timing);
optc->funcs->set_vtg_params(optc, dc_crtc_timing, true);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
@ -312,7 +312,7 @@ void optc1_program_timing(
}
void optc1_set_vtg_params(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end;
@ -348,9 +348,12 @@ void optc1_set_vtg_params(struct timing_generator *optc,
}
}
REG_UPDATE_2(CONTROL,
VTG0_FP2, v_fp2,
VTG0_VCOUNT_INIT, v_init);
if (program_fp2)
REG_UPDATE_2(CONTROL,
VTG0_FP2, v_fp2,
VTG0_VCOUNT_INIT, v_init);
else
REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init);
}
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
@ -1540,7 +1543,7 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 8;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}

View File

@ -700,6 +700,6 @@ bool optc1_get_crc(struct timing_generator *optc,
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
void optc1_set_vtg_params(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */

View File

@ -81,7 +81,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@ -126,7 +128,9 @@ struct dcn10_stream_enc_registers {
uint32_t DP_MSE_RATE_UPDATE;
uint32_t DP_PIXEL_FORMAT;
uint32_t DP_SEC_CNTL;
uint32_t DP_SEC_CNTL1;
uint32_t DP_SEC_CNTL2;
uint32_t DP_SEC_CNTL5;
uint32_t DP_SEC_CNTL6;
uint32_t DP_STEER_FIFO;
uint32_t DP_VID_M;
@ -411,6 +415,8 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP3_ENABLE;\
type DP_SEC_GSP4_ENABLE;\
type DP_SEC_GSP5_ENABLE;\
type DP_SEC_GSP5_LINE_NUM;\
type DP_SEC_GSP5_LINE_REFERENCE;\
type DP_SEC_GSP6_ENABLE;\
type DP_SEC_GSP7_ENABLE;\
type DP_SEC_GSP7_PPS;\

View File

@ -1595,7 +1595,7 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@ -1695,14 +1695,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* wait for outstanding pending changes before adding or removing planes */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
context->res_ctx.pipe_ctx[i].update_flags.bits.enable) {
dc->hwss.wait_for_pending_cleared(dc, context);
break;
}
}
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@ -1856,7 +1848,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
if (pipe_ctx->prev_odm_pipe == NULL)
hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
@ -2251,11 +2243,11 @@ void dcn20_get_mpctree_visual_confirm_color(
{
const struct tg_color pipe_colors[6] = {
{MAX_TG_COLOR_VALUE, 0, 0}, // red
{MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
{0, MAX_TG_COLOR_VALUE, 0}, // blue
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, // orange
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, // yellow
{0, MAX_TG_COLOR_VALUE, 0}, // green
{0, 0, MAX_TG_COLOR_VALUE}, // blue
{MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
{0, 0, MAX_TG_COLOR_VALUE}, // green
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
};
struct pipe_ctx *top_pipe = pipe_ctx;
@ -2280,14 +2272,11 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
// input to MPCC is always RGB, by default leave black_color at 0
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
hws->funcs.get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
hws->funcs.get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
dcn20_get_mpctree_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)

View File

@ -83,6 +83,8 @@
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh)
void dcn20_stream_encoder_construct(

View File

@ -32,5 +32,6 @@ struct dccg *dccg21_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
#endif /* __DCN21_DCCG_H__ */

View File

@ -51,7 +51,7 @@
(enc10->link_regs->index)
static bool dcn30_link_encoder_validate_output_with_stream(
bool dcn30_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream)
{

View File

@ -78,4 +78,8 @@ void dcn30_link_encoder_construct(
void enc3_hw_init(struct link_encoder *enc);
bool dcn30_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream);
#endif /* __DC_LINK_ENCODER__DCN30_H__ */

View File

@ -668,7 +668,7 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
if (!is_hdmi_tmds)
if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)

View File

@ -350,7 +350,7 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 8;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}

View File

@ -271,7 +271,7 @@ struct timing_generator_funcs {
struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
void (*set_dsc_config)(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,

View File

@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
#define DMUB_FW_VERSION_GIT_HASH 0x931573111
#define DMUB_FW_VERSION_GIT_HASH 0xa18e25995
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
#define DMUB_FW_VERSION_REVISION 45
#define DMUB_FW_VERSION_REVISION 46
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@ -514,12 +514,20 @@ enum dp_aux_request_action {
enum aux_return_code_type {
AUX_RET_SUCCESS = 0,
AUX_RET_ERROR_UNKNOWN,
AUX_RET_ERROR_INVALID_REPLY,
AUX_RET_ERROR_TIMEOUT,
AUX_RET_ERROR_NO_DATA,
AUX_RET_ERROR_HPD_DISCON,
AUX_RET_ERROR_ENGINE_ACQUIRE,
AUX_RET_ERROR_INVALID_OPERATION,
AUX_RET_ERROR_PROTOCOL_ERROR,
};
enum aux_channel_type {
AUX_CHANNEL_LEGACY_DDC,
AUX_CHANNEL_DPIA
};
/* DP AUX command */
struct aux_transaction_parameters {
uint8_t is_i2c_over_aux;
@ -532,9 +540,10 @@ struct aux_transaction_parameters {
struct dmub_cmd_dp_aux_control_data {
uint32_t handle;
uint8_t port_index;
uint8_t instance;
uint8_t sw_crc_enabled;
uint16_t timeout;
enum aux_channel_type type;
struct aux_transaction_parameters dpaux;
};
@ -558,7 +567,7 @@ struct aux_reply_data {
struct aux_reply_control_data {
uint32_t handle;
uint8_t phy_port_index;
uint8_t instance;
uint8_t result;
uint16_t pad;
};
@ -581,7 +590,7 @@ enum dp_hpd_status {
};
struct dp_hpd_data {
uint8_t phy_port_index;
uint8_t instance;
uint8_t hpd_type;
uint8_t hpd_status;
uint8_t pad;
@ -732,27 +741,30 @@ enum dmub_cmd_abm_type {
struct abm_config_table {
/* Parameters for crgb conversion */
uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; // 0B
uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 15B
uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 31B
uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 16B
uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 32B
/* Parameters for custom curve */
uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 47B
uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 79B
uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 48B
uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 78B
uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 111B
uint16_t min_abm_backlight; // 121B
uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 112B
uint16_t min_abm_backlight; // 122B
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 123B
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 143B
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 163B
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 183B
uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 203B
uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 207B
uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 211B
uint8_t min_knee[NUM_AGGR_LEVEL]; // 215B
uint8_t max_knee[NUM_AGGR_LEVEL]; // 219B
uint8_t iir_curve[NUM_AMBI_LEVEL]; // 223B
uint8_t pad3[3]; // 228B
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 124B
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 144B
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 164B
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 184B
uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 204B
uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 208B
uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 212B
uint8_t min_knee[NUM_AGGR_LEVEL]; // 216B
uint8_t max_knee[NUM_AGGR_LEVEL]; // 220B
uint8_t iir_curve[NUM_AMBI_LEVEL]; // 224B
uint8_t pad3[3]; // 229B
uint16_t blRampReduction[NUM_AGGR_LEVEL]; // 232B
uint16_t blRampStart[NUM_AGGR_LEVEL]; // 240B
};
struct dmub_cmd_abm_set_pipe_data {

View File

@ -30,6 +30,14 @@
#include "opp.h"
#include "color_gamma.h"
/* When calculating LUT values the first region and at least one subsequent
* region are calculated with full precision. These defines are a demarcation
* of where the second region starts and ends.
* These are hardcoded values to avoid recalculating them in loops.
*/
#define PRECISE_LUT_REGION_START 224
#define PRECISE_LUT_REGION_END 239
static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
// these are helpers for calculations to reduce stack usage
@ -346,7 +354,13 @@ static struct fixed31_32 translate_from_linear_space(
dc_fixpt_recip(args->gamma));
}
scratch_1 = dc_fixpt_add(one, args->a3);
if (cal_buffer->buffer_index < 16)
/* In the first region (first 16 points) and in the
* region delimited by START/END we calculate with
* full precision to avoid error accumulation.
*/
if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START &&
cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) ||
(cal_buffer->buffer_index < 16))
scratch_2 = dc_fixpt_pow(args->arg,
dc_fixpt_recip(args->gamma));
else
@ -397,9 +411,7 @@ static struct fixed31_32 translate_from_linear_space_long(
dc_fixpt_recip(args->gamma))),
args->a2);
else
return dc_fixpt_mul(
args->arg,
args->a1);
return dc_fixpt_mul(args->arg, args->a1);
}
static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer)
@ -717,7 +729,6 @@ static struct fixed31_32 calculate_mapped_value(
BREAK_TO_DEBUGGER();
result = dc_fixpt_zero;
} else {
BREAK_TO_DEBUGGER();
result = dc_fixpt_one;
}
@ -976,6 +987,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
cal_buffer->buffer_index = 0; // see var definition for more info
rgb += 32; // first 32 points have problems with fixed point, too small
coord_x += 32;
for (i = 32; i <= hw_points_num; i++) {
if (!is_clipped) {
if (use_eetf) {

View File

@ -499,6 +499,7 @@ enum atombios_firmware_capability
ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT = 0x0008000,
};
enum atom_cooling_solution_id{

View File

@ -227,6 +227,7 @@ struct smu_bios_boot_up_values
uint32_t content_revision;
uint32_t fclk;
uint32_t lclk;
uint32_t firmware_caps;
};
enum smu_table_id

View File

@ -178,7 +178,7 @@
__SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \
__SMU_DUMMY_MAP(GET_UMC_FW_WA), \
__SMU_DUMMY_MAP(Mode1Reset), \
__SMU_DUMMY_MAP(Spare), \
__SMU_DUMMY_MAP(RlcPowerNotify), \
__SMU_DUMMY_MAP(SetHardMinIspiclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinIspxclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMinSocclkByFreq), \
@ -209,6 +209,8 @@
__SMU_DUMMY_MAP(SetSoftMinCclk), \
__SMU_DUMMY_MAP(SetSoftMaxCclk), \
__SMU_DUMMY_MAP(SetGpoFeaturePMask), \
__SMU_DUMMY_MAP(DisallowGpo), \
__SMU_DUMMY_MAP(Enable2ndUSB20Port), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type

View File

@ -134,6 +134,10 @@
#define PPSMC_MSG_SetGpoFeaturePMask 0x45
#define PPSMC_MSG_SetSMBUSInterrupt 0x46
#define PPSMC_Message_Count 0x47
#define PPSMC_MSG_DisallowGpo 0x56
#define PPSMC_MSG_Enable2ndUSB20Port 0x57
#define PPSMC_Message_Count 0x58
#endif

View File

@ -41,7 +41,7 @@
#define PPSMC_MSG_PowerUpIspByTile 0x7
#define PPSMC_MSG_PowerDownVcn 0x8 // VCN is power gated by default
#define PPSMC_MSG_PowerUpVcn 0x9
#define PPSMC_MSG_spare 0xA
#define PPSMC_MSG_RlcPowerNotify 0xA
#define PPSMC_MSG_SetHardMinVcn 0xB // For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0xC //Sets SoftMin for GFXCLK. Arg is in MHz
#define PPSMC_MSG_ActiveProcessNotify 0xD

View File

@ -847,12 +847,10 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
if (!amdgpu_sriov_vf(adev) || (adev->asic_type != CHIP_NAVI12)) {
ret = smu_init_microcode(smu);
if (ret) {
dev_err(adev->dev, "Failed to load smu firmware!\n");
return ret;
}
ret = smu_init_microcode(smu);
if (ret) {
dev_err(adev->dev, "Failed to load smu firmware!\n");
return ret;
}
ret = smu_smc_table_sw_init(smu);

View File

@ -128,6 +128,8 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0),
MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0),
MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@ -302,6 +304,9 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
@ -377,7 +382,7 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@ -386,10 +391,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@ -418,7 +423,8 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table= &smu->smu_table;
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
SmuMetrics_t *metrics =
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
int ret = 0;
mutex_lock(&smu->metrics_lock);
@ -1065,12 +1071,18 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
return 0;
}
@ -1156,7 +1168,9 @@ static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *buf)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
uint32_t i, size = 0;
int16_t workload_type = 0;
static const char *profile_name[] = {
@ -1198,7 +1212,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
(void *)(&activity_monitor_external), false);
if (result) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return result;
@ -1211,43 +1225,43 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
" ",
0,
"GFXCLK",
activity_monitor.Gfx_FPS,
activity_monitor.Gfx_MinFreqStep,
activity_monitor.Gfx_MinActiveFreqType,
activity_monitor.Gfx_MinActiveFreq,
activity_monitor.Gfx_BoosterFreqType,
activity_monitor.Gfx_BoosterFreq,
activity_monitor.Gfx_PD_Data_limit_c,
activity_monitor.Gfx_PD_Data_error_coeff,
activity_monitor.Gfx_PD_Data_error_rate_coeff);
activity_monitor->Gfx_FPS,
activity_monitor->Gfx_MinFreqStep,
activity_monitor->Gfx_MinActiveFreqType,
activity_monitor->Gfx_MinActiveFreq,
activity_monitor->Gfx_BoosterFreqType,
activity_monitor->Gfx_BoosterFreq,
activity_monitor->Gfx_PD_Data_limit_c,
activity_monitor->Gfx_PD_Data_error_coeff,
activity_monitor->Gfx_PD_Data_error_rate_coeff);
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"SOCCLK",
activity_monitor.Fclk_FPS,
activity_monitor.Fclk_MinFreqStep,
activity_monitor.Fclk_MinActiveFreqType,
activity_monitor.Fclk_MinActiveFreq,
activity_monitor.Fclk_BoosterFreqType,
activity_monitor.Fclk_BoosterFreq,
activity_monitor.Fclk_PD_Data_limit_c,
activity_monitor.Fclk_PD_Data_error_coeff,
activity_monitor.Fclk_PD_Data_error_rate_coeff);
activity_monitor->Fclk_FPS,
activity_monitor->Fclk_MinFreqStep,
activity_monitor->Fclk_MinActiveFreqType,
activity_monitor->Fclk_MinActiveFreq,
activity_monitor->Fclk_BoosterFreqType,
activity_monitor->Fclk_BoosterFreq,
activity_monitor->Fclk_PD_Data_limit_c,
activity_monitor->Fclk_PD_Data_error_coeff,
activity_monitor->Fclk_PD_Data_error_rate_coeff);
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
"MEMLK",
activity_monitor.Mem_FPS,
activity_monitor.Mem_MinFreqStep,
activity_monitor.Mem_MinActiveFreqType,
activity_monitor.Mem_MinActiveFreq,
activity_monitor.Mem_BoosterFreqType,
activity_monitor.Mem_BoosterFreq,
activity_monitor.Mem_PD_Data_limit_c,
activity_monitor.Mem_PD_Data_error_coeff,
activity_monitor.Mem_PD_Data_error_rate_coeff);
activity_monitor->Mem_FPS,
activity_monitor->Mem_MinFreqStep,
activity_monitor->Mem_MinActiveFreqType,
activity_monitor->Mem_MinActiveFreq,
activity_monitor->Mem_BoosterFreqType,
activity_monitor->Mem_BoosterFreq,
activity_monitor->Mem_PD_Data_limit_c,
activity_monitor->Mem_PD_Data_error_coeff,
activity_monitor->Mem_PD_Data_error_rate_coeff);
}
return size;
@ -1255,7 +1269,10 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
smu->power_profile_mode = input[size];
@ -1269,7 +1286,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
(void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
@ -1277,43 +1294,43 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
activity_monitor.Gfx_MinFreqStep = input[2];
activity_monitor.Gfx_MinActiveFreqType = input[3];
activity_monitor.Gfx_MinActiveFreq = input[4];
activity_monitor.Gfx_BoosterFreqType = input[5];
activity_monitor.Gfx_BoosterFreq = input[6];
activity_monitor.Gfx_PD_Data_limit_c = input[7];
activity_monitor.Gfx_PD_Data_error_coeff = input[8];
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
activity_monitor->Gfx_FPS = input[1];
activity_monitor->Gfx_MinFreqStep = input[2];
activity_monitor->Gfx_MinActiveFreqType = input[3];
activity_monitor->Gfx_MinActiveFreq = input[4];
activity_monitor->Gfx_BoosterFreqType = input[5];
activity_monitor->Gfx_BoosterFreq = input[6];
activity_monitor->Gfx_PD_Data_limit_c = input[7];
activity_monitor->Gfx_PD_Data_error_coeff = input[8];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
break;
case 1: /* Socclk */
activity_monitor.Fclk_FPS = input[1];
activity_monitor.Fclk_MinFreqStep = input[2];
activity_monitor.Fclk_MinActiveFreqType = input[3];
activity_monitor.Fclk_MinActiveFreq = input[4];
activity_monitor.Fclk_BoosterFreqType = input[5];
activity_monitor.Fclk_BoosterFreq = input[6];
activity_monitor.Fclk_PD_Data_limit_c = input[7];
activity_monitor.Fclk_PD_Data_error_coeff = input[8];
activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
activity_monitor->Fclk_FPS = input[1];
activity_monitor->Fclk_MinFreqStep = input[2];
activity_monitor->Fclk_MinActiveFreqType = input[3];
activity_monitor->Fclk_MinActiveFreq = input[4];
activity_monitor->Fclk_BoosterFreqType = input[5];
activity_monitor->Fclk_BoosterFreq = input[6];
activity_monitor->Fclk_PD_Data_limit_c = input[7];
activity_monitor->Fclk_PD_Data_error_coeff = input[8];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
break;
case 2: /* Memlk */
activity_monitor.Mem_FPS = input[1];
activity_monitor.Mem_MinFreqStep = input[2];
activity_monitor.Mem_MinActiveFreqType = input[3];
activity_monitor.Mem_MinActiveFreq = input[4];
activity_monitor.Mem_BoosterFreqType = input[5];
activity_monitor.Mem_BoosterFreq = input[6];
activity_monitor.Mem_PD_Data_limit_c = input[7];
activity_monitor.Mem_PD_Data_error_coeff = input[8];
activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
activity_monitor->Mem_FPS = input[1];
activity_monitor->Mem_MinFreqStep = input[2];
activity_monitor->Mem_MinActiveFreqType = input[3];
activity_monitor->Mem_MinActiveFreq = input[4];
activity_monitor->Mem_BoosterFreqType = input[5];
activity_monitor->Mem_BoosterFreq = input[6];
activity_monitor->Mem_PD_Data_limit_c = input[7];
activity_monitor->Mem_PD_Data_error_coeff = input[8];
activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
break;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
(void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
@ -2582,52 +2599,54 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_0 *gpu_metrics =
(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
SmuMetricsExternal_t metrics_external;
SmuMetrics_t *metrics =
&(metrics_external.SmuMetrics);
int ret = 0;
ret = smu_cmn_get_metrics_table(smu,
&metrics,
&metrics_external,
true);
if (ret)
return ret;
smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
gpu_metrics->temperature_mem = metrics.TemperatureMem;
gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
gpu_metrics->temperature_edge = metrics->TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
gpu_metrics->temperature_mem = metrics->TemperatureMem;
gpu_metrics->temperature_vrgfx = metrics->TemperatureVrGfx;
gpu_metrics->temperature_vrsoc = metrics->TemperatureVrSoc;
gpu_metrics->temperature_vrmem = metrics->TemperatureVrMem0;
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
gpu_metrics->average_mm_activity = metrics->VcnActivityPercentage;
gpu_metrics->average_socket_power = metrics.AverageSocketPower;
gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
else
gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency;
gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency;
gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency;
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
gpu_metrics->average_uclk_frequency = metrics->AverageUclkFrequencyPostDs;
gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0];
gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1];
gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1];
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
gpu_metrics->throttle_status = metrics->ThrottlerStatus;
gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
gpu_metrics->pcie_link_width =
smu_v11_0_get_current_pcie_link_width(smu);
@ -2650,23 +2669,82 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
static int sienna_cichlid_gpo_control(struct smu_context *smu,
bool enablement)
{
uint32_t smu_version;
int ret = 0;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
if (enablement)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGpoFeaturePMask,
GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
NULL);
else
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGpoFeaturePMask,
0,
NULL);
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
if (enablement) {
if (smu_version < 0x003a2500) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGpoFeaturePMask,
GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
NULL);
} else {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisallowGpo,
0,
NULL);
}
} else {
if (smu_version < 0x003a2500) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetGpoFeaturePMask,
0,
NULL);
} else {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisallowGpo,
1,
NULL);
}
}
}
return ret;
}
static int sienna_cichlid_notify_2nd_usb20_port(struct smu_context *smu)
{
uint32_t smu_version;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
/*
* Message SMU_MSG_Enable2ndUSB20Port is supported by 58.45
* onwards PMFWs.
*/
if (smu_version < 0x003A2D00)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_Enable2ndUSB20Port,
smu->smu_table.boot_values.firmware_caps & ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT ?
1 : 0,
NULL);
}
static int sienna_cichlid_system_features_control(struct smu_context *smu,
bool en)
{
int ret = 0;
if (en) {
ret = sienna_cichlid_notify_2nd_usb20_port(smu);
if (ret)
return ret;
}
return smu_v11_0_system_features_control(smu, en);
}
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@ -2707,7 +2785,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.system_features_control = sienna_cichlid_system_features_control,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL,
@ -2740,6 +2818,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.run_btc = sienna_cichlid_run_btc,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,

View File

@ -29,6 +29,10 @@ typedef enum {
POWER_SOURCE_COUNT,
} POWER_SOURCE_e;
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK 1825
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif

View File

@ -91,6 +91,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
const struct common_firmware_header *header;
struct amdgpu_firmware_info *ucode = NULL;
if (amdgpu_sriov_vf(adev) &&
((adev->asic_type == CHIP_NAVI12) ||
(adev->asic_type == CHIP_SIENNA_CICHLID)))
return 0;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
chip_name = "arcturus";
@ -554,6 +559,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = 0;
smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
break;
case 3:
default:
@ -569,6 +575,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
}
smu->smu_table.boot_values.format_revision = header->format_revision;
@ -929,9 +936,13 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
if (power_src < 0)
return -EINVAL;
/*
* BIT 24-31: ControllerId (only PPT0 is supported for now)
* BIT 16-23: PowerSource
*/
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit,
power_src << 16,
(0 << 24) | (power_src << 16),
power_limit);
if (ret)
dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
@ -941,6 +952,7 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int power_src;
int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
@ -948,6 +960,22 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
power_src = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_PWR,
smu->adev->pm.ac_power ?
SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (power_src < 0)
return -EINVAL;
/*
* BIT 24-31: ControllerId (only PPT0 is supported for now)
* BIT 16-23: PowerSource
* BIT 0-15: PowerLimit
*/
n &= 0xFFFF;
n |= 0 << 24;
n |= (power_src) << 16;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
@ -2064,6 +2092,22 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu,
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
if (ret) {
dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
return ret;
}
}
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
if (ret) {

View File

@ -64,7 +64,7 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
MSG_MAP(Spare, PPSMC_MSG_spare, 0),
MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0),
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0),
@ -722,6 +722,12 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
return 0;
}
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
}
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@ -749,6 +755,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.print_clk_levels = vangogh_print_fine_grain_clk,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)

View File

@ -32,4 +32,8 @@ extern void vangogh_set_ppt_funcs(struct smu_context *smu);
#define VANGOGH_UMD_PSTATE_SOCCLK 678
#define VANGOGH_UMD_PSTATE_FCLK 800
/* RLC Power Status */
#define RLC_STATUS_OFF 0
#define RLC_STATUS_NORMAL 1
#endif

View File

@ -122,7 +122,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
new_encoder = funcs->atomic_best_encoder(connector,
state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
@ -345,8 +346,7 @@ update_connector_routing(struct drm_atomic_state *state,
funcs = connector->helper_private;
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector,
new_connector_state);
new_encoder = funcs->atomic_best_encoder(connector, state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
@ -1313,7 +1313,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
funcs->atomic_commit(connector, new_conn_state);
funcs->atomic_commit(connector, old_state);
}
}
}

View File

@ -196,10 +196,10 @@
* exposed and assumed to be black).
*
* SCALING_FILTER:
*
* Indicates scaling filter to be used for plane scaler
*
* The value of this property can be one of the following:
*
* Default:
* Driver's default scaling filter
* Nearest Neighbor:

View File

@ -77,6 +77,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
if ((entry->map->offset & 0xffffffff) ==
(map->offset & 0xffffffff))
return entry;
break;
default: /* Make gcc happy */
;
}

View File

@ -314,9 +314,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map
struct dma_buf_map *map = &buffer->map;
int ret;
if (dma_buf_map_is_set(map))
goto out;
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
@ -329,7 +326,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map
if (ret)
return ret;
out:
*map_copy = *map;
return 0;

View File

@ -230,14 +230,14 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
*
* Setting MODE_ID to 0 will release reserved resources for the CRTC.
* SCALING_FILTER:
* Atomic property for setting the scaling filter for CRTC scaler
* Atomic property for setting the scaling filter for CRTC scaler
*
* The value of this property can be one of the following:
* Default:
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
* The value of this property can be one of the following:
*
* Default:
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
*/
/**

View File

@ -371,9 +371,9 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
console_unlock();
}
static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip,
struct dma_buf_map *dst)
static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip,
struct dma_buf_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
unsigned int cpp = fb->format->cpp[0];
@ -391,40 +391,86 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
}
}
static void drm_fb_helper_dirty_work(struct work_struct *work)
static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
dirty_work);
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
struct dma_buf_map map;
struct drm_client_buffer *buffer = fb_helper->buffer;
struct dma_buf_map map, dst;
int ret;
spin_lock_irqsave(&helper->dirty_lock, flags);
/*
* We have to pin the client buffer to its current location while
* flushing the shadow buffer. In the general case, concurrent
* modesetting operations could try to move the buffer and would
* fail. The modeset has to be serialized by acquiring the reservation
* object of the underlying BO here.
*
* For fbdev emulation, we only have to protect against fbdev modeset
* operations. Nothing else will involve the client buffer's BO. So it
* is sufficient to acquire struct drm_fb_helper.lock here.
*/
mutex_lock(&fb_helper->lock);
ret = drm_client_buffer_vmap(buffer, &map);
if (ret)
goto out;
dst = map;
drm_fb_helper_damage_blit_real(fb_helper, clip, &dst);
drm_client_buffer_vunmap(buffer);
out:
mutex_unlock(&fb_helper->lock);
return ret;
}
static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
damage_work);
struct drm_device *dev = helper->dev;
struct drm_clip_rect *clip = &helper->damage_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
int ret;
spin_lock_irqsave(&helper->damage_lock, flags);
clip_copy = *clip;
clip->x1 = clip->y1 = ~0;
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->dirty_lock, flags);
spin_unlock_irqrestore(&helper->damage_lock, flags);
/* call dirty callback only when it has been really touched */
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
/* Call damage handlers only if necessary */
if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2))
return;
/* Generic fbdev uses a shadow buffer */
if (helper->buffer) {
ret = drm_client_buffer_vmap(helper->buffer, &map);
if (ret)
return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map);
}
if (helper->fb->funcs->dirty)
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
&clip_copy, 1);
if (helper->buffer)
drm_client_buffer_vunmap(helper->buffer);
if (helper->buffer) {
ret = drm_fb_helper_damage_blit(helper, &clip_copy);
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
goto err;
}
if (helper->fb->funcs->dirty) {
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
goto err;
}
return;
err:
/*
* Restore damage clip rectangle on errors. The next run
* of the damage worker will perform the update.
*/
spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, clip_copy.x1);
clip->y1 = min_t(u32, clip->y1, clip_copy.y1);
clip->x2 = max_t(u32, clip->x2, clip_copy.x2);
clip->y2 = max_t(u32, clip->y2, clip_copy.y2);
spin_unlock_irqrestore(&helper->damage_lock, flags);
}
/**
@ -440,10 +486,10 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
INIT_LIST_HEAD(&helper->kernel_fb_list);
spin_lock_init(&helper->dirty_lock);
spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work);
helper->damage_clip.x1 = helper->damage_clip.y1 = ~0;
mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
@ -579,7 +625,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
return;
cancel_work_sync(&fb_helper->resume_work);
cancel_work_sync(&fb_helper->dirty_work);
cancel_work_sync(&fb_helper->damage_work);
info = fb_helper->fbdev;
if (info) {
@ -614,30 +660,30 @@ static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
fb->funcs->dirty;
}
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height)
static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height)
{
struct drm_fb_helper *helper = info->par;
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect *clip = &helper->damage_clip;
unsigned long flags;
if (!drm_fbdev_use_shadow_fb(helper))
return;
spin_lock_irqsave(&helper->dirty_lock, flags);
spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, x);
clip->y1 = min_t(u32, clip->y1, y);
clip->x2 = max_t(u32, clip->x2, x + width);
clip->y2 = max_t(u32, clip->y2, y + height);
spin_unlock_irqrestore(&helper->dirty_lock, flags);
spin_unlock_irqrestore(&helper->damage_lock, flags);
schedule_work(&helper->dirty_work);
schedule_work(&helper->damage_work);
}
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
* @pagelist: list of dirty mmap framebuffer pages
* @pagelist: list of mmap framebuffer pages that have to be flushed
*
* This function is used as the &fb_deferred_io.deferred_io
* callback function for flushing the fbdev mmap writes.
@ -662,7 +708,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
y1 = min / info->fix.line_length;
y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
info->var.yres);
drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1);
}
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
@ -699,8 +745,7 @@ ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
ret = fb_sys_write(info, buf, count, ppos);
if (ret > 0)
drm_fb_helper_dirty(info, 0, 0, info->var.xres,
info->var.yres);
drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres);
return ret;
}
@ -717,8 +762,7 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
sys_fillrect(info, rect);
drm_fb_helper_dirty(info, rect->dx, rect->dy,
rect->width, rect->height);
drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
@ -733,8 +777,7 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
sys_copyarea(info, area);
drm_fb_helper_dirty(info, area->dx, area->dy,
area->width, area->height);
drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
@ -749,8 +792,7 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
sys_imageblit(info, image);
drm_fb_helper_dirty(info, image->dx, image->dy,
image->width, image->height);
drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
@ -765,8 +807,7 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
cfb_fillrect(info, rect);
drm_fb_helper_dirty(info, rect->dx, rect->dy,
rect->width, rect->height);
drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
@ -781,8 +822,7 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
cfb_copyarea(info, area);
drm_fb_helper_dirty(info, area->dx, area->dy,
area->width, area->height);
drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
@ -797,8 +837,7 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
cfb_imageblit(info, image);
drm_fb_helper_dirty(info, image->dx, image->dy,
image->width, image->height);
drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
@ -1988,14 +2027,19 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
if (!fb_helper->dev)
return;
if (fbi && fbi->fbdefio) {
fb_deferred_io_cleanup(fbi);
shadow = fbi->screen_buffer;
if (fbi) {
if (fbi->fbdefio)
fb_deferred_io_cleanup(fbi);
if (drm_fbdev_use_shadow_fb(fb_helper))
shadow = fbi->screen_buffer;
}
drm_fb_helper_fini(fb_helper);
vfree(shadow);
if (shadow)
vfree(shadow);
else
drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
}
@ -2189,6 +2233,9 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
if (ret > 0)
*ppos += ret;
if (ret > 0)
drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual);
return ret ? ret : err;
}

View File

@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (!obj)
return ERR_PTR(-ENOMEM);
shmem = to_drm_gem_shmem_obj(obj);
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
if (private)
if (private) {
drm_gem_private_object_init(dev, obj, size);
else
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
ret = drm_gem_object_init(dev, obj, size);
}
if (ret)
goto err_free;
@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (ret)
goto err_release;
shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct
if (ret)
goto err_zero_use;
if (!shmem->map_cached)
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
@ -476,33 +479,6 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_shmem_purge);
/**
* drm_gem_shmem_create_object_cached - Create a shmem buffer object with
* cached mappings
* @dev: DRM device
* @size: Size of the object to allocate
*
* By default, shmem buffer objects use writecombine mappings. This
* function implements struct drm_driver.gem_create_object for shmem
* buffer objects with cached mappings.
*
* Returns:
* A struct drm_gem_shmem_object * on success or NULL negative on failure.
*/
struct drm_gem_object *
drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
{
struct drm_gem_shmem_object *shmem;
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
return NULL;
shmem->map_cached = true;
return &shmem->base;
}
EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
@ -626,7 +602,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (!shmem->map_cached)
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;

View File

@ -145,10 +145,8 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
fput(vma->vm_file);
get_file(etnaviv_obj->base.filp);
vma->vm_pgoff = 0;
vma->vm_file = etnaviv_obj->base.filp;
vma_set_file(vma, etnaviv_obj->base.filp);
vma->vm_page_prot = vm_page_prot;
}

View File

@ -23,6 +23,7 @@
*
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@ -719,11 +720,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *state)
struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(state->crtc);
struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}

View File

@ -114,8 +114,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (ret)
return ret;
fput(vma->vm_file);
vma->vm_file = get_file(obj->base.filp);
vma_set_file(vma, obj->base.filp);
return 0;
}

View File

@ -382,7 +382,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
(vma->node.start + vma->node.size - 1) >> 32)
(vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&

View File

@ -893,8 +893,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* requires avoiding extraneous references to their filp, hence why
* we prefer to use an anonymous file for their mmaps.
*/
fput(vma->vm_file);
vma->vm_file = anon;
vma_set_file(vma, anon);
/* Drop the initial creation reference, the vma is now holding one. */
fput(anon);
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:

View File

@ -1579,9 +1579,9 @@ static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
return tgl_uy_revids;
return &tgl_uy_revids[INTEL_REVID(dev_priv)];
else
return tgl_revids;
return &tgl_revids[INTEL_REVID(dev_priv)];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@ -1591,14 +1591,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
tgl_uy_revids->gt_stepping >= (since) && \
tgl_uy_revids->gt_stepping <= (until))
tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
tgl_revids->gt_stepping >= (since) && \
tgl_revids->gt_stepping <= (until))
tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1

View File

@ -4242,18 +4242,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
if (HAS_PCH_DG1(dev_priv))
dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
else if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
if (HAS_PCH_DG1(dev_priv))
dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
else if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
}
}
/**

View File

@ -914,7 +914,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
IS_GEN_RANGE(uncore->i915, 8, 10) ?
IS_GEN_RANGE(uncore->i915, 8, 11) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}

View File

@ -7,6 +7,7 @@
#define __DCSS_PRV_H__
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <linux/io.h>
#include <video/videomode.h>
@ -165,6 +166,8 @@ void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
/* SCALER */
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
void dcss_scaler_exit(struct dcss_scaler *scl);
void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
enum drm_scaling_filter scaling_filter);
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,

View File

@ -103,15 +103,15 @@ static bool dcss_plane_can_rotate(const struct drm_format_info *format,
bool mod_present, u64 modifier,
unsigned int rotation)
{
bool linear_format = !mod_present ||
(mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR;
u32 supported_rotation = DRM_MODE_ROTATE_0;
if (!format->is_yuv && linear_format)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (!format->is_yuv &&
modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
(modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED))
supported_rotation = DRM_MODE_ROTATE_MASK |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
@ -257,7 +257,8 @@ static bool dcss_plane_needs_setup(struct drm_plane_state *state,
state->src_h != old_state->src_h ||
fb->format->format != old_fb->format->format ||
fb->modifier != old_fb->modifier ||
state->rotation != old_state->rotation;
state->rotation != old_state->rotation ||
state->scaling_filter != old_state->scaling_filter;
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
@ -272,6 +273,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
u32 src_w, src_h, dst_w, dst_h;
struct drm_rect src, dst;
bool enable = true;
bool is_rotation_90_or_270;
if (!fb || !state->crtc || !state->visible)
return;
@ -309,8 +311,16 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
dcss_plane_atomic_set_base(dcss_plane);
is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_270);
dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num,
state->scaling_filter);
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
state->fb->format, src_w, src_h,
state->fb->format,
is_rotation_90_or_270 ? src_h : src_w,
is_rotation_90_or_270 ? src_w : src_h,
dst_w, dst_h,
drm_mode_vrefresh(&crtc_state->mode));
@ -388,6 +398,10 @@ struct dcss_plane *dcss_plane_init(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
drm_plane_create_scaling_filter_property(&dcss_plane->base,
BIT(DRM_SCALING_FILTER_DEFAULT) |
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
drm_plane_create_rotation_property(&dcss_plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |

View File

@ -77,6 +77,8 @@ struct dcss_scaler_ch {
u32 c_vstart;
u32 c_hstart;
bool use_nn_interpolation;
};
struct dcss_scaler {
@ -243,6 +245,17 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
}
}
static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps,
int coef[][PSC_NUM_TAPS])
{
int i, j;
for (i = 0; i < PSC_STORED_PHASES; i++)
for (j = 0; j < PSC_NUM_TAPS; j++)
coef[i][j] = j == PSC_NUM_TAPS >> 1 ?
(1 << PSC_COEFF_PRECISION) : 0;
}
/**
* dcss_scaler_filter_design() - Compute filter coefficients using
* Gaussian filter.
@ -253,7 +266,8 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
*/
static void dcss_scaler_filter_design(int src_length, int dst_length,
bool use_5_taps, bool phase0_identity,
int coef[][PSC_NUM_TAPS])
int coef[][PSC_NUM_TAPS],
bool nn_interpolation)
{
int fc_q;
@ -263,8 +277,11 @@ static void dcss_scaler_filter_design(int src_length, int dst_length,
else
fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
/* compute gaussian filter coefficients */
dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
if (nn_interpolation)
dcss_scaler_nearest_neighbor_filter(use_5_taps, coef);
else
/* compute gaussian filter coefficients */
dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
}
static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
@ -653,12 +670,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
/* horizontal luma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
src_xres == dst_xres, coef,
ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical luma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
src_yres == dst_yres, coef);
src_yres == dst_yres, coef,
ch->use_nn_interpolation);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
@ -678,14 +697,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
/* horizontal chroma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
(src_xres == dst_xres) && (ch->c_hstart == 0),
coef);
coef, ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
/* vertical chroma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
(src_yres == dst_yres) && (ch->c_vstart == 0),
coef);
coef, ch->use_nn_interpolation);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
else
@ -700,12 +719,14 @@ static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
/* horizontal RGB */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
src_xres == dst_xres, coef,
ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical RGB */
dcss_scaler_filter_design(src_yres, dst_yres, false,
src_yres == dst_yres, coef);
src_yres == dst_yres, coef,
ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
}
@ -751,6 +772,14 @@ static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
}
void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
enum drm_scaling_filter scaling_filter)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR;
}
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,

View File

@ -225,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
bo->base.map_wc = true;
bo->base.base.funcs = &lima_gem_funcs;
return &bo->base.base;

View File

@ -4,6 +4,7 @@ config DRM_MCDE
depends on CMA
depends on ARM || COMPILE_TEST
depends on OF
depends on COMMON_CLK
select MFD_SYSCON
select DRM_MIPI_DSI
select DRM_BRIDGE

View File

@ -1,3 +1,3 @@
mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_display.o
mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_clk_div.o mcde_display.o
obj-$(CONFIG_DRM_MCDE) += mcde_drm.o

View File

@ -0,0 +1,192 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/clk-provider.h>
#include <linux/regulator/consumer.h>
#include "mcde_drm.h"
#include "mcde_display_regs.h"
/* The MCDE internal clock dividers for FIFO A and B */
struct mcde_clk_div {
struct clk_hw hw;
struct mcde *mcde;
u32 cr;
u32 cr_div;
};
static int mcde_clk_div_enable(struct clk_hw *hw)
{
struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
struct mcde *mcde = cdiv->mcde;
u32 val;
spin_lock(&mcde->fifo_crx1_lock);
val = readl(mcde->regs + cdiv->cr);
/*
* Select the PLL72 (LCD) clock as parent
* FIXME: implement other parents.
*/
val &= ~MCDE_CRX1_CLKSEL_MASK;
val |= MCDE_CRX1_CLKSEL_CLKPLL72 << MCDE_CRX1_CLKSEL_SHIFT;
/* Internal clock */
val |= MCDE_CRA1_CLKTYPE_TVXCLKSEL1;
/* Clear then set the divider */
val &= ~(MCDE_CRX1_BCD | MCDE_CRX1_PCD_MASK);
val |= cdiv->cr_div;
writel(val, mcde->regs + cdiv->cr);
spin_unlock(&mcde->fifo_crx1_lock);
return 0;
}
static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
unsigned long *prate, bool set_parent)
{
int best_div = 1, div;
struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long best_prate = 0;
unsigned long best_diff = ~0ul;
int max_div = (1 << MCDE_CRX1_PCD_BITS) - 1;
for (div = 1; div < max_div; div++) {
unsigned long this_prate, div_rate, diff;
if (set_parent)
this_prate = clk_hw_round_rate(parent, rate * div);
else
this_prate = *prate;
div_rate = DIV_ROUND_UP_ULL(this_prate, div);
diff = abs(rate - div_rate);
if (diff < best_diff) {
best_div = div;
best_diff = diff;
best_prate = this_prate;
}
}
*prate = best_prate;
return best_div;
}
static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int div = mcde_clk_div_choose_div(hw, rate, prate, true);
return DIV_ROUND_UP_ULL(*prate, div);
}
static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
struct mcde *mcde = cdiv->mcde;
u32 cr;
int div;
/*
* If the MCDE is not powered we can't access registers.
* It will come up with 0 in the divider register bits, which
* means "divide by 2".
*/
if (!regulator_is_enabled(mcde->epod))
return DIV_ROUND_UP_ULL(prate, 2);
cr = readl(mcde->regs + cdiv->cr);
if (cr & MCDE_CRX1_BCD)
return prate;
/* 0 in the PCD means "divide by 2", 1 means "divide by 3" etc */
div = cr & MCDE_CRX1_PCD_MASK;
div += 2;
return DIV_ROUND_UP_ULL(prate, div);
}
static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
int div = mcde_clk_div_choose_div(hw, rate, &prate, false);
u32 cr = 0;
/*
* We cache the CR bits to set the divide in the state so that
* we can call this before we can even write to the hardware.
*/
if (div == 1) {
/* Bypass clock divider */
cr |= MCDE_CRX1_BCD;
} else {
div -= 2;
cr |= div & MCDE_CRX1_PCD_MASK;
}
cdiv->cr_div = cr;
return 0;
}
static const struct clk_ops mcde_clk_div_ops = {
.enable = mcde_clk_div_enable,
.recalc_rate = mcde_clk_div_recalc_rate,
.round_rate = mcde_clk_div_round_rate,
.set_rate = mcde_clk_div_set_rate,
};
int mcde_init_clock_divider(struct mcde *mcde)
{
struct device *dev = mcde->dev;
struct mcde_clk_div *fifoa;
struct mcde_clk_div *fifob;
const char *parent_name;
struct clk_init_data fifoa_init = {
.name = "fifoa",
.ops = &mcde_clk_div_ops,
.parent_names = &parent_name,
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
};
struct clk_init_data fifob_init = {
.name = "fifob",
.ops = &mcde_clk_div_ops,
.parent_names = &parent_name,
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
};
int ret;
spin_lock_init(&mcde->fifo_crx1_lock);
parent_name = __clk_get_name(mcde->lcd_clk);
/* Allocate 2 clocks */
fifoa = devm_kzalloc(dev, sizeof(*fifoa), GFP_KERNEL);
if (!fifoa)
return -ENOMEM;
fifob = devm_kzalloc(dev, sizeof(*fifob), GFP_KERNEL);
if (!fifob)
return -ENOMEM;
fifoa->mcde = mcde;
fifoa->cr = MCDE_CRA1;
fifoa->hw.init = &fifoa_init;
ret = devm_clk_hw_register(dev, &fifoa->hw);
if (ret) {
dev_err(dev, "error registering FIFO A clock divider\n");
return ret;
}
mcde->fifoa_clk = fifoa->hw.clk;
fifob->mcde = mcde;
fifob->cr = MCDE_CRB1;
fifob->hw.init = &fifob_init;
ret = devm_clk_hw_register(dev, &fifob->hw);
if (ret) {
dev_err(dev, "error registering FIFO B clock divider\n");
return ret;
}
mcde->fifob_clk = fifob->hw.clk;
return 0;
}

View File

@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
@ -16,6 +17,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
@ -57,10 +59,15 @@ enum mcde_overlay {
MCDE_OVERLAY_5,
};
enum mcde_dsi_formatter {
enum mcde_formatter {
MCDE_DSI_FORMATTER_0 = 0,
MCDE_DSI_FORMATTER_1,
MCDE_DSI_FORMATTER_2,
MCDE_DSI_FORMATTER_3,
MCDE_DSI_FORMATTER_4,
MCDE_DSI_FORMATTER_5,
MCDE_DPI_FORMATTER_0,
MCDE_DPI_FORMATTER_1,
};
void mcde_display_irq(struct mcde *mcde)
@ -81,7 +88,7 @@ void mcde_display_irq(struct mcde *mcde)
*
* TODO: Currently only one DSI link is supported.
*/
if (mcde_dsi_irq(mcde->mdsi)) {
if (!mcde->dpi_output && mcde_dsi_irq(mcde->mdsi)) {
u32 val;
/*
@ -243,73 +250,70 @@ static int mcde_configure_extsrc(struct mcde *mcde, enum mcde_extsrc src,
val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT;
val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT;
val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT;
/*
* MCDE has inverse semantics from DRM on RBG/BGR which is why
* all the modes are inversed here.
*/
switch (format) {
case DRM_FORMAT_ARGB8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ABGR8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_BGR888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ARGB4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ABGR4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_BGR565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_YUV422:
val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 <<
@ -556,6 +560,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
case MCDE_VIDEO_FORMATTER_FLOW:
case MCDE_DPI_FORMATTER_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
@ -564,7 +569,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
default:
dev_err(mcde->dev, "unknown flow mode %d\n",
mcde->flow_mode);
break;
return;
}
writel(val, mcde->regs + sync);
@ -594,10 +599,35 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
mcde->regs + mux);
break;
}
/*
* If using DPI configure the sync event.
* TODO: this is for LCD only, it does not cover TV out.
*/
if (mcde->dpi_output) {
u32 stripwidth;
stripwidth = 0xF000 / (mode->vdisplay * 4);
dev_info(mcde->dev, "stripwidth: %d\n", stripwidth);
val = MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO |
(mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_HWREQVCNT_SHIFT |
MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO |
(mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_SWINTVCNT_SHIFT;
switch (fifo) {
case MCDE_FIFO_A:
writel(val, mcde->regs + MCDE_SYNCHCONFA);
break;
case MCDE_FIFO_B:
writel(val, mcde->regs + MCDE_SYNCHCONFB);
break;
}
}
}
static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
enum mcde_dsi_formatter fmt,
enum mcde_formatter fmt,
int fifo_wtrmrk)
{
u32 val;
@ -618,12 +648,49 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
}
val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT;
/* We only support DSI formatting for now */
val |= MCDE_CTRLX_FORMTYPE_DSI <<
MCDE_CTRLX_FORMTYPE_SHIFT;
/* Select the formatter to use for this FIFO */
val |= fmt << MCDE_CTRLX_FORMID_SHIFT;
/*
* Select the formatter to use for this FIFO
*
* The register definitions imply that different IDs should be used
* by the DSI formatters depending on if they are in VID or CMD
* mode, and the manual says they are dedicated but identical.
* The vendor code uses them as it seems fit.
*/
switch (fmt) {
case MCDE_DSI_FORMATTER_0:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI0VID << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_1:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI0CMD << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_2:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI1VID << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_3:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI1CMD << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_4:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI2VID << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_5:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI2CMD << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DPI_FORMATTER_0:
val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DPIA << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DPI_FORMATTER_1:
val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DPIB << MCDE_CTRLX_FORMID_SHIFT;
break;
}
writel(val, mcde->regs + ctrl);
/* Blend source with Alpha 0xff on FIFO */
@ -631,17 +698,54 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
0xff << MCDE_CRX0_ALPHABLEND_SHIFT;
writel(val, mcde->regs + cr0);
/* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
spin_lock(&mcde->fifo_crx1_lock);
val = readl(mcde->regs + cr1);
/*
* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video()
* FIXME: a different clock needs to be selected for TV out.
*/
if (mcde->dpi_output) {
struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
u32 bus_format;
/* Use the MCDE clock for this FIFO */
val = MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
/* Assume RGB888 24 bit if we have no further info */
if (!connector->display_info.num_bus_formats) {
dev_info(mcde->dev, "panel does not specify bus format, assume RGB888\n");
bus_format = MEDIA_BUS_FMT_RGB888_1X24;
} else {
bus_format = connector->display_info.bus_formats[0];
}
/* TODO: when adding DPI support add OUTBPP etc here */
/*
* Set up the CDWIN and OUTBPP for the LCD
*
* FIXME: fill this in if you know the correspondance between the MIPI
* DPI specification and the media bus formats.
*/
val &= ~MCDE_CRX1_CDWIN_MASK;
val &= ~MCDE_CRX1_OUTBPP_MASK;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB888_1X24:
val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
break;
default:
dev_err(mcde->dev, "unknown bus format, assume RGB888\n");
val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
break;
}
} else {
/* Use the MCDE clock for DSI */
val &= ~MCDE_CRX1_CLKSEL_MASK;
val |= MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
}
writel(val, mcde->regs + cr1);
spin_unlock(&mcde->fifo_crx1_lock);
};
static void mcde_configure_dsi_formatter(struct mcde *mcde,
enum mcde_dsi_formatter fmt,
enum mcde_formatter fmt,
u32 formatter_frame,
int pkt_size)
{
@ -681,6 +785,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde,
delay0 = MCDE_DSIVID2DELAY0;
delay1 = MCDE_DSIVID2DELAY1;
break;
default:
dev_err(mcde->dev, "tried to configure a non-DSI formatter as DSI\n");
return;
}
/*
@ -700,7 +807,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde,
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
val |= MCDE_DSICONF0_PACKING_RGB666_PACKED <<
dev_err(mcde->dev,
"we cannot handle the packed RGB666 format\n");
val |= MCDE_DSICONF0_PACKING_RGB666 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB565:
@ -860,73 +969,140 @@ static int mcde_dsi_get_pkt_div(int ppl, int fifo_size)
return 1;
}
static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *cstate,
struct drm_plane_state *plane_state)
static void mcde_setup_dpi(struct mcde *mcde, const struct drm_display_mode *mode,
int *fifo_wtrmrk_lvl)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
u32 format = fb->format->format;
u32 formatter_ppl = mode->hdisplay; /* pixels per line */
u32 formatter_lpf = mode->vdisplay; /* lines per frame */
int pkt_size, fifo_wtrmrk;
int cpp = fb->format->cpp[0];
int formatter_cpp;
struct drm_format_name_buf tmp;
u32 formatter_frame;
u32 pkt_div;
struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
u32 hsw, hfp, hbp;
u32 vsw, vfp, vbp;
u32 val;
int ret;
/* This powers up the entire MCDE block and the DSI hardware */
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(drm->dev, "can't re-enable EPOD regulator\n");
return;
}
/* FIXME: we only support LCD, implement TV out */
hsw = mode->hsync_end - mode->hsync_start;
hfp = mode->hsync_start - mode->hdisplay;
hbp = mode->htotal - mode->hsync_end;
vsw = mode->vsync_end - mode->vsync_start;
vfp = mode->vsync_start - mode->vdisplay;
vbp = mode->vtotal - mode->vsync_end;
dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
mode->hdisplay, mode->vdisplay,
drm_get_format_name(format, &tmp));
if (!mcde->mdsi) {
/* TODO: deal with this for non-DSI output */
dev_err(drm->dev, "no DSI master attached!\n");
return;
}
dev_info(mcde->dev, "output on DPI LCD from channel A\n");
/* Display actual values */
dev_info(mcde->dev, "HSW: %d, HFP: %d, HBP: %d, VSW: %d, VFP: %d, VBP: %d\n",
hsw, hfp, hbp, vsw, vfp, vbp);
/*
* The pixel fetcher is 128 64-bit words deep = 1024 bytes.
* One overlay of 32bpp (4 cpp) assumed, fetch 160 pixels.
* 160 * 4 = 640 bytes.
*/
*fifo_wtrmrk_lvl = 640;
/* Set up the main control, watermark level at 7 */
val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
/* 24 bits DPI: connect LSB Ch B to D[0:7] */
val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
/* TV out: connect LSB Ch B to D[8:15] */
val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
/*
* This sets up the internal silicon muxing of the DPI
* lines. This is how the silicon connects out to the
* external pins, then the pins need to be further
* configured into "alternate functions" using pin control
* to actually get the signals out.
*
* FIXME: this is hardcoded to the only setting found in
* the wild. If we need to use different settings for
* different DPI displays, make this parameterizable from
* the device tree.
*/
/* 24 bits DPI: connect Ch A LSB to D[0:7] */
val |= 0 << MCDE_CONF0_OUTMUX0_SHIFT;
/* 24 bits DPI: connect Ch A MID to D[8:15] */
val |= 1 << MCDE_CONF0_OUTMUX1_SHIFT;
/* Don't care about this muxing */
val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
/* 24 bits DPI: connect MID Ch B to D[24:31] */
val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
/* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */
val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
/* Syncmux bits zero: DPI channel A and B on output pins A and B resp */
/* Don't care about this muxing */
val |= 0 << MCDE_CONF0_OUTMUX3_SHIFT;
/* 24 bits DPI: connect Ch A MSB to D[32:39] */
val |= 2 << MCDE_CONF0_OUTMUX4_SHIFT;
/* Syncmux bits zero: DPI channel A */
writel(val, mcde->regs + MCDE_CONF0);
/* Clear any pending interrupts */
mcde_display_disable_irqs(mcde);
writel(0, mcde->regs + MCDE_IMSCERR);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
/* This hammers us into LCD mode */
writel(0, mcde->regs + MCDE_TVCRA);
dev_info(drm->dev, "output in %s mode, format %dbpp\n",
/* Front porch and sync width */
val = (vsw << MCDE_TVBL1_BEL1_SHIFT);
val |= (vfp << MCDE_TVBL1_BSL1_SHIFT);
writel(val, mcde->regs + MCDE_TVBL1A);
/* The vendor driver sets the same value into TVBL2A */
writel(val, mcde->regs + MCDE_TVBL2A);
/* Vertical back porch */
val = (vbp << MCDE_TVDVO_DVO1_SHIFT);
/* The vendor drivers sets the same value into TVDVOA */
val |= (vbp << MCDE_TVDVO_DVO2_SHIFT);
writel(val, mcde->regs + MCDE_TVDVOA);
/* Horizontal back porch, as 0 = 1 cycle we need to subtract 1 */
writel((hbp - 1), mcde->regs + MCDE_TVTIM1A);
/* Horizongal sync width and horizonal front porch, 0 = 1 cycle */
val = ((hsw - 1) << MCDE_TVLBALW_LBW_SHIFT);
val |= ((hfp - 1) << MCDE_TVLBALW_ALW_SHIFT);
writel(val, mcde->regs + MCDE_TVLBALWA);
/* Blank some TV registers we don't use */
writel(0, mcde->regs + MCDE_TVISLA);
writel(0, mcde->regs + MCDE_TVBLUA);
/* Set up sync inversion etc */
val = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
val |= MCDE_LCDTIM1B_IHS;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val |= MCDE_LCDTIM1B_IVS;
if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= MCDE_LCDTIM1B_IOE;
if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
val |= MCDE_LCDTIM1B_IPC;
writel(val, mcde->regs + MCDE_LCDTIM1A);
}
static void mcde_setup_dsi(struct mcde *mcde, const struct drm_display_mode *mode,
int cpp, int *fifo_wtrmrk_lvl, int *dsi_formatter_frame,
int *dsi_pkt_size)
{
u32 formatter_ppl = mode->hdisplay; /* pixels per line */
u32 formatter_lpf = mode->vdisplay; /* lines per frame */
int formatter_frame;
int formatter_cpp;
int fifo_wtrmrk;
u32 pkt_div;
int pkt_size;
u32 val;
dev_info(mcde->dev, "output in %s mode, format %dbpp\n",
(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ?
"VIDEO" : "CMD",
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format));
formatter_cpp =
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8;
dev_info(drm->dev, "overlay CPP %d bytes, DSI CPP %d bytes\n",
cpp,
formatter_cpp);
dev_info(mcde->dev, "Overlay CPP: %d bytes, DSI formatter CPP %d bytes\n",
cpp, formatter_cpp);
/* Set up the main control, watermark level at 7 */
val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
/*
* This is the internal silicon muxing of the DPI
* (parallell display) lines. Since we are not using
* this at all (we are using DSI) these are just
* dummy values from the vendor tree.
*/
val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
writel(val, mcde->regs + MCDE_CONF0);
/* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
@ -948,9 +1124,9 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
/* The FIFO is 640 entries deep on this v3 hardware */
pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640);
}
dev_dbg(drm->dev, "FIFO watermark after flooring: %d bytes\n",
dev_dbg(mcde->dev, "FIFO watermark after flooring: %d bytes\n",
fifo_wtrmrk);
dev_dbg(drm->dev, "Packet divisor: %d bytes\n", pkt_div);
dev_dbg(mcde->dev, "Packet divisor: %d bytes\n", pkt_div);
/* NOTE: pkt_div is 1 for video mode */
pkt_size = (formatter_ppl * formatter_cpp) / pkt_div;
@ -958,16 +1134,64 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO))
pkt_size++;
dev_dbg(drm->dev, "DSI packet size: %d * %d bytes per line\n",
dev_dbg(mcde->dev, "DSI packet size: %d * %d bytes per line\n",
pkt_size, pkt_div);
dev_dbg(drm->dev, "Overlay frame size: %u bytes\n",
dev_dbg(mcde->dev, "Overlay frame size: %u bytes\n",
mode->hdisplay * mode->vdisplay * cpp);
mcde->stride = mode->hdisplay * cpp;
dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
mcde->stride);
/* NOTE: pkt_div is 1 for video mode */
formatter_frame = pkt_size * pkt_div * formatter_lpf;
dev_dbg(drm->dev, "Formatter frame size: %u bytes\n", formatter_frame);
dev_dbg(mcde->dev, "Formatter frame size: %u bytes\n", formatter_frame);
*fifo_wtrmrk_lvl = fifo_wtrmrk;
*dsi_pkt_size = pkt_size;
*dsi_formatter_frame = formatter_frame;
}
static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *cstate,
struct drm_plane_state *plane_state)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
u32 format = fb->format->format;
int dsi_pkt_size;
int fifo_wtrmrk;
int cpp = fb->format->cpp[0];
struct drm_format_name_buf tmp;
u32 dsi_formatter_frame;
u32 val;
int ret;
/* This powers up the entire MCDE block and the DSI hardware */
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(drm->dev, "can't re-enable EPOD regulator\n");
return;
}
dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
mode->hdisplay, mode->vdisplay,
drm_get_format_name(format, &tmp));
/* Clear any pending interrupts */
mcde_display_disable_irqs(mcde);
writel(0, mcde->regs + MCDE_IMSCERR);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
if (mcde->dpi_output)
mcde_setup_dpi(mcde, mode, &fifo_wtrmrk);
else
mcde_setup_dsi(mcde, mode, cpp, &fifo_wtrmrk,
&dsi_formatter_frame, &dsi_pkt_size);
mcde->stride = mode->hdisplay * cpp;
dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
mcde->stride);
/* Drain the FIFO A + channel 0 pipe so we have a clean slate */
mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0);
@ -995,29 +1219,47 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
*/
mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode);
/* Configure FIFO A to use DSI formatter 0 */
mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
fifo_wtrmrk);
if (mcde->dpi_output) {
unsigned long lcd_freq;
/*
* This brings up the DSI bridge which is tightly connected
* to the MCDE DSI formatter.
*
* FIXME: if we want to use another formatter, such as DPI,
* we need to be more elaborate here and select the appropriate
* bridge.
*/
mcde_dsi_enable(mcde->bridge);
/* Configure FIFO A to use DPI formatter 0 */
mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DPI_FORMATTER_0,
fifo_wtrmrk);
/* Configure the DSI formatter 0 for the DSI panel output */
mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
formatter_frame, pkt_size);
/* Set up and enable the LCD clock */
lcd_freq = clk_round_rate(mcde->fifoa_clk, mode->clock * 1000);
ret = clk_set_rate(mcde->fifoa_clk, lcd_freq);
if (ret)
dev_err(mcde->dev, "failed to set LCD clock rate %lu Hz\n",
lcd_freq);
ret = clk_prepare_enable(mcde->fifoa_clk);
if (ret) {
dev_err(mcde->dev, "failed to enable FIFO A DPI clock\n");
return;
}
dev_info(mcde->dev, "LCD FIFO A clk rate %lu Hz\n",
clk_get_rate(mcde->fifoa_clk));
} else {
/* Configure FIFO A to use DSI formatter 0 */
mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
fifo_wtrmrk);
/*
* This brings up the DSI bridge which is tightly connected
* to the MCDE DSI formatter.
*/
mcde_dsi_enable(mcde->bridge);
/* Configure the DSI formatter 0 for the DSI panel output */
mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
dsi_formatter_frame, dsi_pkt_size);
}
switch (mcde->flow_mode) {
case MCDE_COMMAND_TE_FLOW:
case MCDE_COMMAND_BTA_TE_FLOW:
case MCDE_VIDEO_TE_FLOW:
/* We are using TE in some comination */
/* We are using TE in some combination */
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val = MCDE_VSCRC_VSPOL;
else
@ -1069,8 +1311,12 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
/* This disables the DSI bridge */
mcde_dsi_disable(mcde->bridge);
if (mcde->dpi_output) {
clk_disable_unprepare(mcde->fifoa_clk);
} else {
/* This disables the DSI bridge */
mcde_dsi_disable(mcde->bridge);
}
event = crtc->state->event;
if (event) {
@ -1261,6 +1507,10 @@ int mcde_display_init(struct drm_device *drm)
DRM_FORMAT_YUV422,
};
ret = mcde_init_clock_divider(mcde);
if (ret)
return ret;
ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
&mcde_display_funcs,
formats, ARRAY_SIZE(formats),

View File

@ -215,6 +215,80 @@
#define MCDE_OVLXCOMP_Z_SHIFT 27
#define MCDE_OVLXCOMP_Z_MASK 0x78000000
/* DPI/TV configuration registers, channel A and B */
#define MCDE_TVCRA 0x00000838
#define MCDE_TVCRB 0x00000A38
#define MCDE_TVCR_MOD_TV BIT(0) /* 0 = LCD mode */
#define MCDE_TVCR_INTEREN BIT(1)
#define MCDE_TVCR_IFIELD BIT(2)
#define MCDE_TVCR_TVMODE_SDTV_656P (0 << 3)
#define MCDE_TVCR_TVMODE_SDTV_656P_LE (3 << 3)
#define MCDE_TVCR_TVMODE_SDTV_656P_BE (4 << 3)
#define MCDE_TVCR_SDTVMODE_Y0CBY1CR (0 << 6)
#define MCDE_TVCR_SDTVMODE_CBY0CRY1 (1 << 6)
#define MCDE_TVCR_AVRGEN BIT(8)
#define MCDE_TVCR_CKINV BIT(9)
/* TV blanking control register 1, channel A and B */
#define MCDE_TVBL1A 0x0000083C
#define MCDE_TVBL1B 0x00000A3C
#define MCDE_TVBL1_BEL1_SHIFT 0 /* VFP vertical front porch 11 bits */
#define MCDE_TVBL1_BSL1_SHIFT 16 /* VSW vertical sync pulse width 11 bits */
/* Pixel processing TV start line, channel A and B */
#define MCDE_TVISLA 0x00000840
#define MCDE_TVISLB 0x00000A40
#define MCDE_TVISL_FSL1_SHIFT 0 /* Field 1 identification start line 11 bits */
#define MCDE_TVISL_FSL2_SHIFT 16 /* Field 2 identification start line 11 bits */
/* Pixel processing TV DVO offset */
#define MCDE_TVDVOA 0x00000844
#define MCDE_TVDVOB 0x00000A44
#define MCDE_TVDVO_DVO1_SHIFT 0 /* VBP vertical back porch 0 = 0 */
#define MCDE_TVDVO_DVO2_SHIFT 16
/*
* Pixel processing TV Timing 1
* HBP horizontal back porch 11 bits horizontal offset
* 0 = 1 pixel HBP, 255 = 256 pixels, so actual value - 1
*/
#define MCDE_TVTIM1A 0x0000084C
#define MCDE_TVTIM1B 0x00000A4C
/* Pixel processing TV LBALW */
/* 0 = 1 clock cycle, 255 = 256 clock cycles */
#define MCDE_TVLBALWA 0x00000850
#define MCDE_TVLBALWB 0x00000A50
#define MCDE_TVLBALW_LBW_SHIFT 0 /* HSW horizonal sync width, line blanking width 11 bits */
#define MCDE_TVLBALW_ALW_SHIFT 16 /* HFP horizontal front porch, active line width 11 bits */
/* TV blanking control register 1, channel A and B */
#define MCDE_TVBL2A 0x00000854
#define MCDE_TVBL2B 0x00000A54
#define MCDE_TVBL2_BEL2_SHIFT 0 /* Field 2 blanking end line 11 bits */
#define MCDE_TVBL2_BSL2_SHIFT 16 /* Field 2 blanking start line 11 bits */
/* Pixel processing TV background */
#define MCDE_TVBLUA 0x00000858
#define MCDE_TVBLUB 0x00000A58
#define MCDE_TVBLU_TVBLU_SHIFT 0 /* 8 bits luminance */
#define MCDE_TVBLU_TVBCB_SHIFT 8 /* 8 bits Cb chrominance */
#define MCDE_TVBLU_TVBCR_SHIFT 16 /* 8 bits Cr chrominance */
/* Pixel processing LCD timing 1 */
#define MCDE_LCDTIM1A 0x00000860
#define MCDE_LCDTIM1B 0x00000A60
/* inverted vertical sync pulse for HRTFT 0 = active low, 1 active high */
#define MCDE_LCDTIM1B_IVP BIT(19)
/* inverted vertical sync, 0 = active high (the normal), 1 = active low */
#define MCDE_LCDTIM1B_IVS BIT(20)
/* inverted horizontal sync, 0 = active high (the normal), 1 = active low */
#define MCDE_LCDTIM1B_IHS BIT(21)
/* inverted panel clock 0 = rising edge data out, 1 = falling edge data out */
#define MCDE_LCDTIM1B_IPC BIT(22)
/* invert output enable 0 = active high, 1 = active low */
#define MCDE_LCDTIM1B_IOE BIT(23)
#define MCDE_CRC 0x00000C00
#define MCDE_CRC_C1EN BIT(2)
#define MCDE_CRC_C2EN BIT(3)
@ -360,6 +434,7 @@
#define MCDE_CRB1 0x00000A04
#define MCDE_CRX1_PCD_SHIFT 0
#define MCDE_CRX1_PCD_MASK 0x000003FF
#define MCDE_CRX1_PCD_BITS 10
#define MCDE_CRX1_CLKSEL_SHIFT 10
#define MCDE_CRX1_CLKSEL_MASK 0x00001C00
#define MCDE_CRX1_CLKSEL_CLKPLL72 0
@ -421,8 +496,20 @@
#define MCDE_ROTACONF 0x0000087C
#define MCDE_ROTBCONF 0x00000A7C
/* Synchronization event configuration */
#define MCDE_SYNCHCONFA 0x00000880
#define MCDE_SYNCHCONFB 0x00000A80
#define MCDE_SYNCHCONF_HWREQVEVENT_SHIFT 0
#define MCDE_SYNCHCONF_HWREQVEVENT_VSYNC (0 << 0)
#define MCDE_SYNCHCONF_HWREQVEVENT_BACK_PORCH (1 << 0)
#define MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO (2 << 0)
#define MCDE_SYNCHCONF_HWREQVEVENT_FRONT_PORCH (3 << 0)
#define MCDE_SYNCHCONF_HWREQVCNT_SHIFT 2 /* 14 bits */
#define MCDE_SYNCHCONF_SWINTVEVENT_VSYNC (0 << 16)
#define MCDE_SYNCHCONF_SWINTVEVENT_BACK_PORCH (1 << 16)
#define MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO (2 << 16)
#define MCDE_SYNCHCONF_SWINTVEVENT_FRONT_PORCH (3 << 16)
#define MCDE_SYNCHCONF_SWINTVCNT_SHIFT 18 /* 14 bits */
/* Channel A+B control registers */
#define MCDE_CTRLA 0x00000884
@ -465,8 +552,8 @@
#define MCDE_DSICONF0_PACKING_MASK 0x00700000
#define MCDE_DSICONF0_PACKING_RGB565 0
#define MCDE_DSICONF0_PACKING_RGB666 1
#define MCDE_DSICONF0_PACKING_RGB666_PACKED 2
#define MCDE_DSICONF0_PACKING_RGB888 3
#define MCDE_DSICONF0_PACKING_RGB888 2
#define MCDE_DSICONF0_PACKING_BGR888 3
#define MCDE_DSICONF0_PACKING_HDTV 4
#define MCDE_DSIVID0FRAME 0x00000E04

View File

@ -62,6 +62,8 @@ enum mcde_flow_mode {
MCDE_VIDEO_TE_FLOW,
/* Video mode with the formatter itself as sync source */
MCDE_VIDEO_FORMATTER_FLOW,
/* DPI video with the formatter itsels as sync source */
MCDE_DPI_FORMATTER_FLOW,
};
struct mcde {
@ -72,6 +74,7 @@ struct mcde {
struct drm_connector *connector;
struct drm_simple_display_pipe pipe;
struct mipi_dsi_device *mdsi;
bool dpi_output;
s16 stride;
enum mcde_flow_mode flow_mode;
unsigned int flow_active;
@ -82,6 +85,11 @@ struct mcde {
struct clk *mcde_clk;
struct clk *lcd_clk;
struct clk *hdmi_clk;
/* Handles to the clock dividers for FIFO A and B */
struct clk *fifoa_clk;
struct clk *fifob_clk;
/* Locks the MCDE FIFO control register A and B */
spinlock_t fifo_crx1_lock;
struct regulator *epod;
struct regulator *vana;
@ -105,4 +113,6 @@ void mcde_display_irq(struct mcde *mcde);
void mcde_display_disable_irqs(struct mcde *mcde);
int mcde_display_init(struct drm_device *drm);
int mcde_init_clock_divider(struct mcde *mcde);
#endif /* _MCDE_DRM_H_ */

View File

@ -22,13 +22,13 @@
* The hardware has four display pipes, and the layout is a little
* bit like this::
*
* Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
* External 0..5 0..3 A,B, 3 x DSI bridge
* Memory -> Overlay -> Channel -> FIFO -> 8 formatters -> DSI/DPI
* External 0..5 0..3 A,B, 6 x DSI bridge
* source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
* 3 of the formatters are for DSI.
* 6 of the formatters are for DSI, 3 pairs for VID/CMD respectively.
* 2 of the formatters are for DPI.
*
* Behind the formatters are the DSI or DPI ports that route to
@ -130,9 +130,37 @@ static int mcde_modeset_init(struct drm_device *drm)
struct mcde *mcde = to_mcde(drm);
int ret;
/*
* If no other bridge was found, check if we have a DPI panel or
* any other bridge connected directly to the MCDE DPI output.
* If a DSI bridge is found, DSI will take precedence.
*
* TODO: more elaborate bridge selection if we have more than one
* thing attached to the system.
*/
if (!mcde->bridge) {
dev_err(drm->dev, "no display output bridge yet\n");
return -EPROBE_DEFER;
struct drm_panel *panel;
struct drm_bridge *bridge;
ret = drm_of_find_panel_or_bridge(drm->dev->of_node,
0, 0, &panel, &bridge);
if (ret) {
dev_err(drm->dev,
"Could not locate any output bridge or panel\n");
return ret;
}
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
dev_err(drm->dev,
"Could not connect panel bridge\n");
return PTR_ERR(bridge);
}
}
mcde->dpi_output = true;
mcde->bridge = bridge;
mcde->flow_mode = MCDE_DPI_FORMATTER_FLOW;
}
mode_config = &drm->mode_config;
@ -156,13 +184,7 @@ static int mcde_modeset_init(struct drm_device *drm)
return ret;
}
/*
* Attach the DSI bridge
*
* TODO: when adding support for the DPI bridge or several DSI bridges,
* we selectively connect the bridge(s) here instead of this simple
* attachment.
*/
/* Attach the bridge. */
ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe,
mcde->bridge);
if (ret) {

View File

@ -145,8 +145,6 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
struct clk *hdmi_pclk;
struct clk *venci_clk;
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
@ -946,6 +944,29 @@ static void meson_disable_regulator(void *data)
regulator_disable(data);
}
static void meson_disable_clk(void *data)
{
clk_disable_unprepare(data);
}
static int meson_enable_clk(struct device *dev, char *name)
{
struct clk *clk;
int ret;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
dev_err(dev, "Unable to get %s pclk\n", name);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (!ret)
ret = devm_add_action_or_reset(dev, meson_disable_clk, clk);
return ret;
}
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@ -1026,19 +1047,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr");
if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) {
dev_err(dev, "Unable to get HDMI pclk\n");
return PTR_ERR(meson_dw_hdmi->hdmi_pclk);
}
clk_prepare_enable(meson_dw_hdmi->hdmi_pclk);
ret = meson_enable_clk(dev, "isfr");
if (ret)
return ret;
meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci");
if (IS_ERR(meson_dw_hdmi->venci_clk)) {
dev_err(dev, "Unable to get venci clk\n");
return PTR_ERR(meson_dw_hdmi->venci_clk);
}
clk_prepare_enable(meson_dw_hdmi->venci_clk);
ret = meson_enable_clk(dev, "iahb");
if (ret)
return ret;
ret = meson_enable_clk(dev, "venci");
if (ret)
return ret;
dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi,
&meson_dw_hdmi_regmap_config);
@ -1071,6 +1090,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
encoder->possible_crtcs = BIT(0);
meson_dw_hdmi_init(meson_dw_hdmi);
DRM_DEBUG_DRIVER("encoder initialized\n");
/* Bridge / Connector */
@ -1095,8 +1116,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
meson_dw_hdmi_init(meson_dw_hdmi);
next_bridge = of_drm_find_bridge(pdev->dev.of_node);
if (next_bridge)
drm_bridge_attach(encoder, next_bridge,

View File

@ -37,7 +37,6 @@ static const struct drm_driver mgag200_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.gem_create_object = drm_gem_shmem_create_object_cached,
DRM_GEM_SHMEM_DRIVER_OPS,
};

View File

@ -211,10 +211,8 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
fput(vma->vm_file);
get_file(obj->filp);
vma->vm_pgoff = 0;
vma->vm_file = obj->filp;
vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}

View File

@ -134,11 +134,8 @@ static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb)
return -ENODEV;
ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm->dev,
"failed to attach bridge: %d\n", ret);
return ret;
}
if (ret)
return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n");
mxsfb->bridge = bridge;
@ -212,7 +209,8 @@ static int mxsfb_load(struct drm_device *drm,
ret = mxsfb_attach_bridge(mxsfb);
if (ret) {
dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
if (ret != -EPROBE_DEFER)
dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
goto err_vblank;
}

View File

@ -32,6 +32,7 @@
#include <linux/hdmi.h>
#include <linux/component.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@ -1161,8 +1162,10 @@ nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_crtc *crtc = connector_state->crtc;

View File

@ -942,16 +942,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
if ((old_reg->mem_type == TTM_PL_SYSTEM &&
new_reg->mem_type == TTM_PL_VRAM) ||
(old_reg->mem_type == TTM_PL_VRAM &&
new_reg->mem_type == TTM_PL_SYSTEM)) {
hop->fpfn = 0;
hop->lpfn = 0;
hop->mem_type = TTM_PL_TT;
hop->flags = 0;
return -EMULTIHOP;
}
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
@ -995,14 +985,25 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Hardware assisted copy. */
if (drm->ttm.move) {
if ((old_reg->mem_type == TTM_PL_SYSTEM &&
new_reg->mem_type == TTM_PL_VRAM) ||
(old_reg->mem_type == TTM_PL_VRAM &&
new_reg->mem_type == TTM_PL_SYSTEM)) {
hop->fpfn = 0;
hop->lpfn = 0;
hop->mem_type = TTM_PL_TT;
hop->flags = 0;
return -EMULTIHOP;
}
ret = nouveau_bo_move_m2mf(bo, evict, ctx,
new_reg);
if (!ret)
goto out;
}
} else
ret = -ENODEV;
/* Fallback to software copy. */
ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
if (ret) {
/* Fallback to software copy. */
ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
}
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {

Some files were not shown because too many files have changed in this diff Show More