diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index f9750b0b6708..27fffafe5b5c 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -159,6 +159,8 @@ properties: - innolux,g121x1-l03 # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel - innolux,n116bge + # InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel + - innolux,n125hce-gn1 # InnoLux 15.6" WXGA TFT LCD panel - innolux,n156bge-l21 # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index cb2497d157f6..90ed8c789e48 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -1,7 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 -agpgart-y := backend.o frontend.o generic.o isoch.o +agpgart-y := backend.o generic.o isoch.o +ifeq ($(CONFIG_DRM_LEGACY),y) agpgart-$(CONFIG_COMPAT) += compat_ioctl.o +agpgart-y += frontend.o +endif + obj-$(CONFIG_AGP) += agpgart.o obj-$(CONFIG_AGP_ALI) += ali-agp.o diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 4eb1c772ded7..bb09d64cd51e 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -186,8 +186,13 @@ int agp_add_bridge(struct agp_bridge_data *bridge); void agp_remove_bridge(struct agp_bridge_data *bridge); /* Frontend routines. */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) int agp_frontend_initialize(void); void agp_frontend_cleanup(void); +#else +static inline int agp_frontend_initialize(void) { return 0; } +static inline void agp_frontend_cleanup(void) {} +#endif /* Generic routines. */ void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 0eb80c1ecdab..e63684d4cd90 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1166,9 +1166,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { - struct file *oldfile; - int ret; - if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -1186,22 +1183,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* readjust the vma */ - get_file(dmabuf->file); - oldfile = vma->vm_file; - vma->vm_file = dmabuf->file; + vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; - ret = dmabuf->ops->mmap(dmabuf, vma); - if (ret) { - /* restore old parameters on failure */ - vma->vm_file = oldfile; - fput(dmabuf->file); - } else { - if (oldfile) - fput(oldfile); - } - return ret; - + return dmabuf->ops->mmap(dmabuf, vma); } EXPORT_SYMBOL_GPL(dma_buf_mmap); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index bb5a42b10c29..6ddbeb5dfbf6 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) max = max(old->shared_count + num_fences, old->shared_max * 2); } else { - max = 4; + max = max(4ul, roundup_pow_of_two(num_fences)); } new = dma_resv_list_alloc(max); diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3da0..974467791032 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index e55384dc115b..5e7c3436310c 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,305 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ - #include -#include #include #include #include #include -#include #include +#include +#include #include -#include #include -#include +#include -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) -{ - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; - /* free page list */ - kfree(buffer->pages); - /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; + if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) - return -ENOMEM; - - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; - - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_clear_pages = pagecount; struct page *page = cma_pages; while (nr_clear_pages > 0) { @@ -85,7 +314,6 @@ static int cma_heap_allocate(struct dma_heap *heap, */ if (fatal_signal_pending(current)) goto free_cma; - page++; nr_clear_pages--; } @@ -93,28 +321,30 @@ static int cma_heap_allocate(struct dma_heap *heap, memset(page_address(cma_pages), 0, size); } - helper_buffer->pagecount = nr_pages; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); + if (!buffer->pages) { ret = -ENOMEM; goto free_cma; } - for (pg = 0; pg < helper_buffer->pagecount; pg++) - helper_buffer->pages[pg] = &cma_pages[pg]; + for (pg = 0; pg < pagecount; pg++) + buffer->pages[pg] = &cma_pages[pg]; + + buffer->cma_pages = cma_pages; + buffer->heap = cma_heap; + buffer->pagecount = pagecount; /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.ops = &cma_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } - helper_buffer->dmabuf = dmabuf; - helper_buffer->priv_virt = cma_pages; - ret = dma_buf_fd(dmabuf, fd_flags); if (ret < 0) { dma_buf_put(dmabuf); @@ -125,11 +355,12 @@ static int cma_heap_allocate(struct dma_heap *heap, return ret; free_pages: - kfree(helper_buffer->pages); + kfree(buffer->pages); free_cma: - cma_release(cma_heap->cma, cma_pages, nr_pages); -free_buf: - kfree(helper_buffer); + cma_release(cma_heap->cma, cma_pages, pagecount); +free_buffer: + kfree(buffer); + return ret; } diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c deleted file mode 100644 index fcf4ce3e2cbb..000000000000 --- a/drivers/dma-buf/heaps/heap-helpers.c +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "heap-helpers.h" - -void init_heap_helper_buffer(struct heap_helper_buffer *buffer, - void (*free)(struct heap_helper_buffer *)) -{ - buffer->priv_virt = NULL; - mutex_init(&buffer->lock); - buffer->vmap_cnt = 0; - buffer->vaddr = NULL; - buffer->pagecount = 0; - buffer->pages = NULL; - INIT_LIST_HEAD(&buffer->attachments); - buffer->free = free; -} - -struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, - int fd_flags) -{ - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &heap_helper_ops; - exp_info.size = buffer->size; - exp_info.flags = fd_flags; - exp_info.priv = buffer; - - return dma_buf_export(&exp_info); -} - -static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer) -{ - void *vaddr; - - vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); - if (!vaddr) - return ERR_PTR(-ENOMEM); - - return vaddr; -} - -static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer) -{ - if (buffer->vmap_cnt > 0) { - WARN(1, "%s: buffer still mapped in the kernel\n", __func__); - vunmap(buffer->vaddr); - } - - buffer->free(buffer); -} - -static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer) -{ - void *vaddr; - - if (buffer->vmap_cnt) { - buffer->vmap_cnt++; - return buffer->vaddr; - } - vaddr = dma_heap_map_kernel(buffer); - if (IS_ERR(vaddr)) - return vaddr; - buffer->vaddr = vaddr; - buffer->vmap_cnt++; - return vaddr; -} - -static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer) -{ - if (!--buffer->vmap_cnt) { - vunmap(buffer->vaddr); - buffer->vaddr = NULL; - } -} - -struct dma_heaps_attachment { - struct device *dev; - struct sg_table table; - struct list_head list; -}; - -static int dma_heap_attach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct dma_heaps_attachment *a; - struct heap_helper_buffer *buffer = dmabuf->priv; - int ret; - - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; - - ret = sg_alloc_table_from_pages(&a->table, buffer->pages, - buffer->pagecount, 0, - buffer->pagecount << PAGE_SHIFT, - GFP_KERNEL); - if (ret) { - kfree(a); - return ret; - } - - a->dev = attachment->dev; - INIT_LIST_HEAD(&a->list); - - attachment->priv = a; - - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); - - return 0; -} - -static void dma_heap_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct dma_heaps_attachment *a = attachment->priv; - struct heap_helper_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - - sg_free_table(&a->table); - kfree(a); -} - -static -struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction direction) -{ - struct dma_heaps_attachment *a = attachment->priv; - struct sg_table *table = &a->table; - int ret; - - ret = dma_map_sgtable(attachment->dev, table, direction, 0); - if (ret) - table = ERR_PTR(ret); - return table; -} - -static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *table, - enum dma_data_direction direction) -{ - dma_unmap_sgtable(attachment->dev, table, direction, 0); -} - -static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct heap_helper_buffer *buffer = vma->vm_private_data; - - if (vmf->pgoff > buffer->pagecount) - return VM_FAULT_SIGBUS; - - vmf->page = buffer->pages[vmf->pgoff]; - get_page(vmf->page); - - return 0; -} - -static const struct vm_operations_struct dma_heap_vm_ops = { - .fault = dma_heap_vm_fault, -}; - -static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) - return -EINVAL; - - vma->vm_ops = &dma_heap_vm_ops; - vma->vm_private_data = buffer; - - return 0; -} - -static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - dma_heap_buffer_destroy(buffer); -} - -static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - struct dma_heaps_attachment *a; - int ret = 0; - - mutex_lock(&buffer->lock); - - if (buffer->vmap_cnt) - invalidate_kernel_vmap_range(buffer->vaddr, buffer->size); - - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents, - direction); - } - mutex_unlock(&buffer->lock); - - return ret; -} - -static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - struct dma_heaps_attachment *a; - - mutex_lock(&buffer->lock); - - if (buffer->vmap_cnt) - flush_kernel_vmap_range(buffer->vaddr, buffer->size); - - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, - direction); - } - mutex_unlock(&buffer->lock); - - return 0; -} - -static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - void *vaddr; - - mutex_lock(&buffer->lock); - vaddr = dma_heap_buffer_vmap_get(buffer); - mutex_unlock(&buffer->lock); - - if (!vaddr) - return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); - - return 0; -} - -static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - dma_heap_buffer_vmap_put(buffer); - mutex_unlock(&buffer->lock); -} - -const struct dma_buf_ops heap_helper_ops = { - .map_dma_buf = dma_heap_map_dma_buf, - .unmap_dma_buf = dma_heap_unmap_dma_buf, - .mmap = dma_heap_mmap, - .release = dma_heap_dma_buf_release, - .attach = dma_heap_attach, - .detach = dma_heap_detach, - .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, - .end_cpu_access = dma_heap_dma_buf_end_cpu_access, - .vmap = dma_heap_dma_buf_vmap, - .vunmap = dma_heap_dma_buf_vunmap, -}; diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h deleted file mode 100644 index 805d2df88024..000000000000 --- a/drivers/dma-buf/heaps/heap-helpers.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DMABUF Heaps helper code - * - * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. - */ - -#ifndef _HEAP_HELPERS_H -#define _HEAP_HELPERS_H - -#include -#include - -/** - * struct heap_helper_buffer - helper buffer metadata - * @heap: back pointer to the heap the buffer came from - * @dmabuf: backing dma-buf for this buffer - * @size: size of the buffer - * @priv_virt pointer to heap specific private value - * @lock mutext to protect the data in this structure - * @vmap_cnt count of vmap references on the buffer - * @vaddr vmap'ed virtual address - * @pagecount number of pages in the buffer - * @pages list of page pointers - * @attachments list of device attachments - * - * @free heap callback to free the buffer - */ -struct heap_helper_buffer { - struct dma_heap *heap; - struct dma_buf *dmabuf; - size_t size; - - void *priv_virt; - struct mutex lock; - int vmap_cnt; - void *vaddr; - pgoff_t pagecount; - struct page **pages; - struct list_head attachments; - - void (*free)(struct heap_helper_buffer *buffer); -}; - -void init_heap_helper_buffer(struct heap_helper_buffer *buffer, - void (*free)(struct heap_helper_buffer *)); - -struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, - int fd_flags); - -extern const struct dma_buf_ops heap_helper_ops; -#endif /* _HEAP_HELPERS_H */ diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 0bf688e3c023..17e0e9a68baf 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -3,7 +3,11 @@ * DMABUF System heap exporter * * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. + * Copyright (C) 2019, 2020 Linaro Ltd. + * + * Portions based off of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis */ #include @@ -15,87 +19,404 @@ #include #include #include -#include -#include +#include -#include "heap-helpers.h" +static struct dma_heap *sys_heap; -struct dma_heap *sys_heap; +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; +}; -static void system_heap_free(struct heap_helper_buffer *buffer) +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; +}; + +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ + | __GFP_COMP) +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) +static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP}; +/* + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed + * to match with the sizes often found in IOMMUs. Using order 4 pages instead + * of order 0 pages can significantly improve the performance of many IOMMUs + * by reducing TLB pressure and time spent updating page tables. + */ +static const unsigned int orders[] = {8, 4, 0}; +#define NUM_ORDERS ARRAY_SIZE(orders) + +static struct sg_table *dup_sg_table(struct sg_table *table) { - pgoff_t pg; + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; - for (pg = 0; pg < buffer->pagecount; pg++) - __free_page(buffer->pages[pg]); - kfree(buffer->pages); + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; +} + +static int system_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + struct sg_table *table; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); + return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void system_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} + +static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + return table; +} + +static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static void *system_heap_do_vmap(struct system_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); +} + +static void system_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table; + struct scatterlist *sg; + int i; + + table = &buffer->sg_table; + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + __free_pages(page, compound_order(page)); + } + sg_free_table(table); kfree(buffer); } +static const struct dma_buf_ops system_heap_buf_ops = { + .attach = system_heap_attach, + .detach = system_heap_detach, + .map_dma_buf = system_heap_map_dma_buf, + .unmap_dma_buf = system_heap_unmap_dma_buf, + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = system_heap_dma_buf_end_cpu_access, + .mmap = system_heap_mmap, + .vmap = system_heap_vmap, + .vunmap = system_heap_vunmap, + .release = system_heap_dma_buf_release, +}; + +static struct page *alloc_largest_available(unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + if (size < (PAGE_SIZE << orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_pages(order_flags[i], orders[i]); + if (!page) + continue; + return page; + } + return NULL; +} + static int system_heap_allocate(struct dma_heap *heap, unsigned long len, unsigned long fd_flags, unsigned long heap_flags) { - struct heap_helper_buffer *helper_buffer; + struct system_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size_remaining = len; + unsigned int max_order = orders[0]; struct dma_buf *dmabuf; - int ret = -ENOMEM; - pgoff_t pg; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) return -ENOMEM; - init_heap_helper_buffer(helper_buffer, system_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = len; - helper_buffer->pagecount = len / PAGE_SIZE; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { - ret = -ENOMEM; - goto err0; - } - - for (pg = 0; pg < helper_buffer->pagecount; pg++) { + INIT_LIST_HEAD(&pages); + i = 0; + while (size_remaining > 0) { /* * Avoid trying to allocate memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL */ if (fatal_signal_pending(current)) - goto err1; + goto free_buffer; - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!helper_buffer->pages[pg]) - goto err1; + page = alloc_largest_available(size_remaining, max_order); + if (!page) + goto free_buffer; + + list_add_tail(&page->lru, &pages); + size_remaining -= page_size(page); + max_order = compound_order(page); + i++; + } + + table = &buffer->sg_table; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; + + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); } /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.ops = &system_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); - goto err1; + goto free_pages; } - helper_buffer->dmabuf = dmabuf; - ret = dma_buf_fd(dmabuf, fd_flags); if (ret < 0) { dma_buf_put(dmabuf); /* just return, as put will call release and that will free */ return ret; } - return ret; -err1: - while (pg > 0) - __free_page(helper_buffer->pages[--pg]); - kfree(helper_buffer->pages); -err0: - kfree(helper_buffer); +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); + + __free_pages(p, compound_order(p)); + } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + kfree(buffer); return ret; } @@ -107,7 +428,6 @@ static const struct dma_heap_ops system_heap_ops = { static int system_heap_create(void) { struct dma_heap_export_info exp_info; - int ret = 0; exp_info.name = "system"; exp_info.ops = &system_heap_ops; @@ -115,9 +435,9 @@ static int system_heap_create(void) sys_heap = dma_heap_add(&exp_info); if (IS_ERR(sys_heap)) - ret = PTR_ERR(sys_heap); + return PTR_ERR(sys_heap); - return ret; + return 0; } module_init(system_heap_create); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6e2953233231..f9c81bc21ba4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1280,6 +1280,8 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int amdgpu_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); /* * functions used by amdgpu_encoder.c diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ebdab31f9de9..31506a1678c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1533,8 +1533,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); - const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b848f9e97613..4d8f19ab1014 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -551,25 +551,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; - if ((old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) || - (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM)) { - hop->fpfn = 0; - hop->lpfn = 0; - hop->mem_type = TTM_PL_TT; - hop->flags = 0; - return -EMULTIHOP; - } - if (new_mem->mem_type == TTM_PL_TT) { r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) return r; } - amdgpu_bo_move_notify(bo, evict, new_mem); - /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) @@ -579,24 +566,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT) { ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } - if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { r = ttm_bo_wait_ctx(bo, ctx); if (r) - goto fail; + return r; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); - return 0; + goto out; } if (old_mem->mem_type == AMDGPU_PL_GDS || @@ -607,27 +593,37 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } - if (!adev->mman.buffer_funcs_enabled) { + if (adev->mman.buffer_funcs_enabled) { + if (((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM))) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); + } else { r = -ENODEV; - goto memcpy; } - r = amdgpu_move_blit(bo, evict, new_mem, old_mem); if (r) { -memcpy: /* Check that all memory is CPU accessible */ if (!amdgpu_mem_visible(adev, old_mem) || !amdgpu_mem_visible(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); - goto fail; + return r; } r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) - goto fail; + return r; } if (bo->type == ttm_bo_type_device && @@ -639,14 +635,11 @@ memcpy: abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; } +out: /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); + amdgpu_bo_move_notify(bo, evict, new_mem); return 0; -fail: - swap(*new_mem, bo->mem); - amdgpu_bo_move_notify(bo, false, new_mem); - swap(*new_mem, bo->mem); - return r; } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 6f975c16779d..8ab0b9060d2b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -24,6 +24,7 @@ */ #include +#include #include #include #include @@ -252,8 +253,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) static struct drm_encoder * dm_mst_atomic_best_encoder(struct drm_connector *connector, - struct drm_connector_state *connector_state) + struct drm_atomic_state *state) { + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + connector); struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ddd0e3239150..ba1507036f26 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -122,7 +122,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, continue; if (funcs->atomic_best_encoder) - new_encoder = funcs->atomic_best_encoder(connector, new_conn_state); + new_encoder = funcs->atomic_best_encoder(connector, + state); else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else @@ -345,8 +346,7 @@ update_connector_routing(struct drm_atomic_state *state, funcs = connector->helper_private; if (funcs->atomic_best_encoder) - new_encoder = funcs->atomic_best_encoder(connector, - new_connector_state); + new_encoder = funcs->atomic_best_encoder(connector, state); else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else @@ -1313,7 +1313,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); - funcs->atomic_commit(connector, new_conn_state); + funcs->atomic_commit(connector, old_state); } } } diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index ae2234aae93d..5c2141e9a9f4 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -196,10 +196,10 @@ * exposed and assumed to be black). * * SCALING_FILTER: - * * Indicates scaling filter to be used for plane scaler * * The value of this property can be one of the following: + * * Default: * Driver's default scaling filter * Nearest Neighbor: diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 7a01d0918861..aeb1327e3077 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -77,6 +77,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, if ((entry->map->offset & 0xffffffff) == (map->offset & 0xffffffff)) return entry; + break; default: /* Make gcc happy */ ; } diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index fe573acf1067..ce45e380f4a2 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -314,9 +314,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map struct dma_buf_map *map = &buffer->map; int ret; - if (dma_buf_map_is_set(map)) - goto out; - /* * FIXME: The dependency on GEM here isn't required, we could * convert the driver handle to a dma-buf instead and use the @@ -329,7 +326,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map if (ret) return ret; -out: *map_copy = *map; return 0; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f927976eca50..74090fc3aa55 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -230,14 +230,14 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) * * Setting MODE_ID to 0 will release reserved resources for the CRTC. * SCALING_FILTER: - * Atomic property for setting the scaling filter for CRTC scaler + * Atomic property for setting the scaling filter for CRTC scaler * - * The value of this property can be one of the following: - * Default: - * Driver's default scaling filter - * Nearest Neighbor: - * Nearest Neighbor scaling filter + * The value of this property can be one of the following: * + * Default: + * Driver's default scaling filter + * Nearest Neighbor: + * Nearest Neighbor scaling filter */ /** diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 25edf670867c..4b8119510687 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -371,9 +371,9 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) console_unlock(); } -static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip, - struct dma_buf_map *dst) +static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip, + struct dma_buf_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; unsigned int cpp = fb->format->cpp[0]; @@ -391,40 +391,86 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, } } -static void drm_fb_helper_dirty_work(struct work_struct *work) +static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) { - struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, - dirty_work); - struct drm_clip_rect *clip = &helper->dirty_clip; - struct drm_clip_rect clip_copy; - unsigned long flags; - struct dma_buf_map map; + struct drm_client_buffer *buffer = fb_helper->buffer; + struct dma_buf_map map, dst; int ret; - spin_lock_irqsave(&helper->dirty_lock, flags); + /* + * We have to pin the client buffer to its current location while + * flushing the shadow buffer. In the general case, concurrent + * modesetting operations could try to move the buffer and would + * fail. The modeset has to be serialized by acquiring the reservation + * object of the underlying BO here. + * + * For fbdev emulation, we only have to protect against fbdev modeset + * operations. Nothing else will involve the client buffer's BO. So it + * is sufficient to acquire struct drm_fb_helper.lock here. + */ + mutex_lock(&fb_helper->lock); + + ret = drm_client_buffer_vmap(buffer, &map); + if (ret) + goto out; + + dst = map; + drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); + + drm_client_buffer_vunmap(buffer); + +out: + mutex_unlock(&fb_helper->lock); + + return ret; +} + +static void drm_fb_helper_damage_work(struct work_struct *work) +{ + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, + damage_work); + struct drm_device *dev = helper->dev; + struct drm_clip_rect *clip = &helper->damage_clip; + struct drm_clip_rect clip_copy; + unsigned long flags; + int ret; + + spin_lock_irqsave(&helper->damage_lock, flags); clip_copy = *clip; clip->x1 = clip->y1 = ~0; clip->x2 = clip->y2 = 0; - spin_unlock_irqrestore(&helper->dirty_lock, flags); + spin_unlock_irqrestore(&helper->damage_lock, flags); - /* call dirty callback only when it has been really touched */ - if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { + /* Call damage handlers only if necessary */ + if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) + return; - /* Generic fbdev uses a shadow buffer */ - if (helper->buffer) { - ret = drm_client_buffer_vmap(helper->buffer, &map); - if (ret) - return; - drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map); - } - - if (helper->fb->funcs->dirty) - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, - &clip_copy, 1); - - if (helper->buffer) - drm_client_buffer_vunmap(helper->buffer); + if (helper->buffer) { + ret = drm_fb_helper_damage_blit(helper, &clip_copy); + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) + goto err; } + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + goto err; + } + + return; + +err: + /* + * Restore damage clip rectangle on errors. The next run + * of the damage worker will perform the update. + */ + spin_lock_irqsave(&helper->damage_lock, flags); + clip->x1 = min_t(u32, clip->x1, clip_copy.x1); + clip->y1 = min_t(u32, clip->y1, clip_copy.y1); + clip->x2 = max_t(u32, clip->x2, clip_copy.x2); + clip->y2 = max_t(u32, clip->y2, clip_copy.y2); + spin_unlock_irqrestore(&helper->damage_lock, flags); } /** @@ -440,10 +486,10 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs) { INIT_LIST_HEAD(&helper->kernel_fb_list); - spin_lock_init(&helper->dirty_lock); + spin_lock_init(&helper->damage_lock); INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); - INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work); - helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0; + INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); + helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; mutex_init(&helper->lock); helper->funcs = funcs; helper->dev = dev; @@ -579,7 +625,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) return; cancel_work_sync(&fb_helper->resume_work); - cancel_work_sync(&fb_helper->dirty_work); + cancel_work_sync(&fb_helper->damage_work); info = fb_helper->fbdev; if (info) { @@ -614,30 +660,30 @@ static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) fb->funcs->dirty; } -static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, - u32 width, u32 height) +static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, + u32 width, u32 height) { struct drm_fb_helper *helper = info->par; - struct drm_clip_rect *clip = &helper->dirty_clip; + struct drm_clip_rect *clip = &helper->damage_clip; unsigned long flags; if (!drm_fbdev_use_shadow_fb(helper)) return; - spin_lock_irqsave(&helper->dirty_lock, flags); + spin_lock_irqsave(&helper->damage_lock, flags); clip->x1 = min_t(u32, clip->x1, x); clip->y1 = min_t(u32, clip->y1, y); clip->x2 = max_t(u32, clip->x2, x + width); clip->y2 = max_t(u32, clip->y2, y + height); - spin_unlock_irqrestore(&helper->dirty_lock, flags); + spin_unlock_irqrestore(&helper->damage_lock, flags); - schedule_work(&helper->dirty_work); + schedule_work(&helper->damage_work); } /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer - * @pagelist: list of dirty mmap framebuffer pages + * @pagelist: list of mmap framebuffer pages that have to be flushed * * This function is used as the &fb_deferred_io.deferred_io * callback function for flushing the fbdev mmap writes. @@ -662,7 +708,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, y1 = min / info->fix.line_length; y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length), info->var.yres); - drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1); + drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1); } } EXPORT_SYMBOL(drm_fb_helper_deferred_io); @@ -699,8 +745,7 @@ ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, ret = fb_sys_write(info, buf, count, ppos); if (ret > 0) - drm_fb_helper_dirty(info, 0, 0, info->var.xres, - info->var.yres); + drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres); return ret; } @@ -717,8 +762,7 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { sys_fillrect(info, rect); - drm_fb_helper_dirty(info, rect->dx, rect->dy, - rect->width, rect->height); + drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); } EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); @@ -733,8 +777,7 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info, const struct fb_copyarea *area) { sys_copyarea(info, area); - drm_fb_helper_dirty(info, area->dx, area->dy, - area->width, area->height); + drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); } EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); @@ -749,8 +792,7 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info, const struct fb_image *image) { sys_imageblit(info, image); - drm_fb_helper_dirty(info, image->dx, image->dy, - image->width, image->height); + drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); } EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); @@ -765,8 +807,7 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { cfb_fillrect(info, rect); - drm_fb_helper_dirty(info, rect->dx, rect->dy, - rect->width, rect->height); + drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); @@ -781,8 +822,7 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { cfb_copyarea(info, area); - drm_fb_helper_dirty(info, area->dx, area->dy, - area->width, area->height); + drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); @@ -797,8 +837,7 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info, const struct fb_image *image) { cfb_imageblit(info, image); - drm_fb_helper_dirty(info, image->dx, image->dy, - image->width, image->height); + drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); @@ -1988,14 +2027,19 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) if (!fb_helper->dev) return; - if (fbi && fbi->fbdefio) { - fb_deferred_io_cleanup(fbi); - shadow = fbi->screen_buffer; + if (fbi) { + if (fbi->fbdefio) + fb_deferred_io_cleanup(fbi); + if (drm_fbdev_use_shadow_fb(fb_helper)) + shadow = fbi->screen_buffer; } drm_fb_helper_fini(fb_helper); - vfree(shadow); + if (shadow) + vfree(shadow); + else + drm_client_buffer_vunmap(fb_helper->buffer); drm_client_framebuffer_delete(fb_helper->buffer); } @@ -2189,6 +2233,9 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, if (ret > 0) *ppos += ret; + if (ret > 0) + drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual); + return ret ? ret : err; } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 499189c48f0b..9825c378dfa6 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (!obj) return ERR_PTR(-ENOMEM); + shmem = to_drm_gem_shmem_obj(obj); + if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; - if (private) + if (private) { drm_gem_private_object_init(dev, obj, size); - else + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ + } else { ret = drm_gem_object_init(dev, obj, size); + } if (ret) goto err_free; @@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (ret) goto err_release; - shmem = to_drm_gem_shmem_obj(obj); mutex_init(&shmem->pages_lock); mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); @@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct if (ret) goto err_zero_use; - if (!shmem->map_cached) + if (shmem->map_wc) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); @@ -476,33 +479,6 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_shmem_purge); -/** - * drm_gem_shmem_create_object_cached - Create a shmem buffer object with - * cached mappings - * @dev: DRM device - * @size: Size of the object to allocate - * - * By default, shmem buffer objects use writecombine mappings. This - * function implements struct drm_driver.gem_create_object for shmem - * buffer objects with cached mappings. - * - * Returns: - * A struct drm_gem_shmem_object * on success or NULL negative on failure. - */ -struct drm_gem_object * -drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) -{ - struct drm_gem_shmem_object *shmem; - - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!shmem) - return NULL; - shmem->map_cached = true; - - return &shmem->base; -} -EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); - /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * @file: DRM file structure to create the dumb buffer for @@ -626,7 +602,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - if (!shmem->map_cached) + if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &drm_gem_shmem_vm_ops; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index bbd235473645..6d38c5c17f23 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -145,10 +145,8 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, * address_space (so unmap_mapping_range does what we want, * in particular in the case of mmap'd dmabufs) */ - fput(vma->vm_file); - get_file(etnaviv_obj->base.filp); vma->vm_pgoff = 0; - vma->vm_file = etnaviv_obj->base.filp; + vma_set_file(vma, etnaviv_obj->base.filp); vma->vm_page_prot = vm_page_prot; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 0c8684634fca..27f04aed8764 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -23,6 +23,7 @@ * */ +#include #include #include #include @@ -719,11 +720,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, } static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, - struct drm_connector_state *state) + struct drm_atomic_state *state) { + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + connector); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - struct intel_crtc *crtc = to_intel_crtc(state->crtc); + struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); return &intel_dp->mst_encoders[crtc->pipe]->base.base; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 0dd477e56573..04e9c04545ad 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -114,8 +114,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (ret) return ret; - fput(vma->vm_file); - vma->vm_file = get_file(obj->base.filp); + vma_set_file(vma, obj->base.filp); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 3d69e51f3e4d..ec28a6cde49b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -893,8 +893,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) * requires avoiding extraneous references to their filp, hence why * we prefer to use an anonymous file for their mmaps. */ - fput(vma->vm_file); - vma->vm_file = anon; + vma_set_file(vma, anon); + /* Drop the initial creation reference, the vma is now holding one. */ + fput(anon); switch (mmo->mmap_type) { case I915_MMAP_TYPE_WC: diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h index c642ae17837f..1e582270c6ea 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dev.h +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h @@ -7,6 +7,7 @@ #define __DCSS_PRV_H__ #include +#include #include #include