mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 09:31:50 +00:00
Linux 5.7-rc4
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl6vPf8eHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG3ooH/0QNxSzms8GgWzm+ VVg7c293rMJHKDwkzYTP2N308V7WtrSzEYfoN8We70mvSGPIg2Lc4xmTtuNSvF0r tyGYHP75WdSEb+eqcsw4diMz6bPGuNYQPLYOsgIsFeCogiSk8U9MAdOrophWA4MZ oCmqVupVjimzE5Tye1SVMH5apvEFzapCT5huOkdvuRQ7x6/pQvnHDm/yJg9++ouV ICDRjhP4dLspRzoEykmHR/eiNGUFJj4WoPYGLOYX5tvW9CVtwbGuvWAc4aXaUdHx 4rxD5DQbvdK41lt5yqLnqsAahLDbdwmON0lEbU4U83wYFlJNOOeMs15wDR8rzVVy EH6M0uA= =UcvZ -----END PGP SIGNATURE----- Merge tag 'v5.7-rc4' into core Linux 5.7-rc4
This commit is contained in:
commit
ec9b40cffd
@ -182,12 +182,15 @@ fix_padding
|
||||
space-efficient. If this option is not present, large padding is
|
||||
used - that is for compatibility with older kernels.
|
||||
|
||||
allow_discards
|
||||
Allow block discard requests (a.k.a. TRIM) for the integrity device.
|
||||
Discards are only allowed to devices using internal hash.
|
||||
|
||||
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
|
||||
be changed when reloading the target (load an inactive table and swap the
|
||||
tables with suspend and resume). The other arguments should not be changed
|
||||
when reloading the target because the layout of disk data depend on them
|
||||
and the reloaded target would be non-functional.
|
||||
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
|
||||
allow_discards can be changed when reloading the target (load an inactive
|
||||
table and swap the tables with suspend and resume). The other arguments
|
||||
should not be changed when reloading the target because the layout of disk
|
||||
data depend on them and the reloaded target would be non-functional.
|
||||
|
||||
|
||||
The layout of the formatted block device:
|
||||
|
@ -22,9 +22,7 @@ properties:
|
||||
const: socionext,uniphier-xdmac
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: XDMAC base register region (offset and length)
|
||||
- description: XDMAC extension register region (offset and length)
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
@ -49,12 +47,13 @@ required:
|
||||
- reg
|
||||
- interrupts
|
||||
- "#dma-cells"
|
||||
- dma-channels
|
||||
|
||||
examples:
|
||||
- |
|
||||
xdmac: dma-controller@5fc10000 {
|
||||
compatible = "socionext,uniphier-xdmac";
|
||||
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
|
||||
reg = <0x5fc10000 0x5300>;
|
||||
interrupts = <0 188 4>;
|
||||
#dma-cells = <2>;
|
||||
dma-channels = <16>;
|
||||
|
@ -3657,7 +3657,7 @@ L: linux-btrfs@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://btrfs.wiki.kernel.org/
|
||||
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
|
||||
F: Documentation/filesystems/btrfs.rst
|
||||
F: fs/btrfs/
|
||||
F: include/linux/btrfs*
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -32,7 +32,7 @@ UBSAN_SANITIZE := n
|
||||
OBJECT_FILES_NON_STANDARD := y
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
|
||||
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
|
||||
|
||||
ifneq ($(c-gettimeofday-y),)
|
||||
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
|
||||
|
@ -60,7 +60,7 @@ config RISCV
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_SET_DIRECT_MAP
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
select SPARSEMEM_STATIC if 32BIT
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
|
@ -102,7 +102,7 @@ void sbi_shutdown(void)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_set_timer);
|
||||
EXPORT_SYMBOL(sbi_shutdown);
|
||||
|
||||
/**
|
||||
* sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
|
||||
@ -113,7 +113,7 @@ void sbi_clear_ipi(void)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_shutdown);
|
||||
EXPORT_SYMBOL(sbi_clear_ipi);
|
||||
|
||||
/**
|
||||
* sbi_set_timer_v01() - Program the timer for next timer event.
|
||||
@ -167,6 +167,11 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void sbi_set_power_off(void)
|
||||
{
|
||||
pm_power_off = sbi_shutdown;
|
||||
}
|
||||
#else
|
||||
static void __sbi_set_timer_v01(uint64_t stime_value)
|
||||
{
|
||||
@ -191,6 +196,8 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sbi_set_power_off(void) {}
|
||||
#endif /* CONFIG_RISCV_SBI_V01 */
|
||||
|
||||
static void __sbi_set_timer_v02(uint64_t stime_value)
|
||||
@ -540,16 +547,12 @@ static inline long sbi_get_firmware_version(void)
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
|
||||
}
|
||||
|
||||
static void sbi_power_off(void)
|
||||
{
|
||||
sbi_shutdown();
|
||||
}
|
||||
|
||||
int __init sbi_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pm_power_off = sbi_power_off;
|
||||
sbi_set_power_off();
|
||||
ret = sbi_get_spec_version();
|
||||
if (ret > 0)
|
||||
sbi_spec_version = ret;
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
register unsigned long sp_in_global __asm__("sp");
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
|
||||
struct stackframe {
|
||||
@ -19,8 +21,6 @@ struct stackframe {
|
||||
unsigned long ra;
|
||||
};
|
||||
|
||||
register unsigned long sp_in_global __asm__("sp");
|
||||
|
||||
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
bool (*fn)(unsigned long, void *), void *arg)
|
||||
{
|
||||
|
@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
|
||||
$(call if_changed,vdsold)
|
||||
|
||||
# We also create a special relocatable object that should mirror the symbol
|
||||
# table and layout of the linked DSO. With ld -R we can then refer to
|
||||
# these symbols in the kernel code rather than hand-coded addresses.
|
||||
# table and layout of the linked DSO. With ld --just-symbols we can then
|
||||
# refer to these symbols in the kernel code rather than hand-coded addresses.
|
||||
|
||||
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
|
||||
-Wl,--build-id -Wl,--hash-style=both
|
||||
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
|
||||
$(call if_changed,vdsold)
|
||||
|
||||
LDFLAGS_vdso-syms.o := -r -R
|
||||
LDFLAGS_vdso-syms.o := -r --just-symbols
|
||||
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
|
@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
|
||||
{
|
||||
mm_segment_t old_fs;
|
||||
unsigned long asce, cr;
|
||||
unsigned long flags;
|
||||
|
||||
old_fs = current->thread.mm_segment;
|
||||
if (old_fs & 1)
|
||||
return old_fs;
|
||||
/* protect against a concurrent page table upgrade */
|
||||
local_irq_save(flags);
|
||||
current->thread.mm_segment |= 1;
|
||||
asce = S390_lowcore.kernel_asce;
|
||||
if (likely(old_fs == USER_DS)) {
|
||||
@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
|
||||
__ctl_load(asce, 7, 7);
|
||||
set_cpu_flag(CIF_ASCE_SECONDARY);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
return old_fs;
|
||||
}
|
||||
EXPORT_SYMBOL(enable_sacf_uaccess);
|
||||
|
@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = arg;
|
||||
|
||||
if (current->active_mm == mm)
|
||||
set_user_asce(mm);
|
||||
/* we must change all active ASCEs to avoid the creation of new TLBs */
|
||||
if (current->active_mm == mm) {
|
||||
S390_lowcore.user_asce = mm->context.asce;
|
||||
if (current->thread.mm_segment == USER_DS) {
|
||||
__ctl_load(S390_lowcore.user_asce, 1, 1);
|
||||
/* Mark user-ASCE present in CR1 */
|
||||
clear_cpu_flag(CIF_ASCE_PRIMARY);
|
||||
}
|
||||
if (current->thread.mm_segment == USER_DS_SACF) {
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
/* enable_sacf_uaccess does all or nothing */
|
||||
WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
|
||||
}
|
||||
}
|
||||
__tlb_flush_local();
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,8 @@ static int hv_cpu_init(unsigned int cpu)
|
||||
struct page *pg;
|
||||
|
||||
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
pg = alloc_page(GFP_KERNEL);
|
||||
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
|
||||
pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
|
||||
if (unlikely(!pg))
|
||||
return -ENOMEM;
|
||||
*input_arg = page_address(pg);
|
||||
@ -254,6 +255,7 @@ static int __init hv_pci_init(void)
|
||||
static int hv_suspend(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Reset the hypercall page as it is going to be invalidated
|
||||
@ -270,12 +272,17 @@ static int hv_suspend(void)
|
||||
hypercall_msr.enable = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
return 0;
|
||||
ret = hv_cpu_die(0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hv_resume(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
int ret;
|
||||
|
||||
ret = hv_cpu_init(0);
|
||||
WARN_ON(ret);
|
||||
|
||||
/* Re-enable the hypercall page */
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
@ -288,6 +295,7 @@ static void hv_resume(void)
|
||||
hv_hypercall_pg_saved = NULL;
|
||||
}
|
||||
|
||||
/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
|
||||
static struct syscore_ops hv_syscore_ops = {
|
||||
.suspend = hv_suspend,
|
||||
.resume = hv_resume,
|
||||
|
@ -35,6 +35,8 @@ typedef int (*hyperv_fill_flush_list_func)(
|
||||
rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
|
||||
#define hv_set_synint_state(int_num, val) \
|
||||
wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
|
||||
#define hv_recommend_using_aeoi() \
|
||||
(!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
|
||||
|
||||
#define hv_get_crash_ctl(val) \
|
||||
rdmsrl(HV_X64_MSR_CRASH_CTL, val)
|
||||
|
@ -496,7 +496,7 @@ int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
|
||||
if (!disk_part_scan_enabled(disk))
|
||||
return 0;
|
||||
if (bdev->bd_part_count || bdev->bd_openers > 1)
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
res = invalidate_partition(disk, 0);
|
||||
if (res)
|
||||
|
@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
|
||||
end:
|
||||
if (result) {
|
||||
dev_warn(&device->dev, "Failed to change power state to %s\n",
|
||||
acpi_power_state_string(state));
|
||||
acpi_power_state_string(target_state));
|
||||
} else {
|
||||
device->power.state = target_state;
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Device [%s] transitioned to %s\n",
|
||||
device->pnp.bus_id,
|
||||
acpi_power_state_string(state)));
|
||||
acpi_power_state_string(target_state)));
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -1059,7 +1059,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
|
||||
|
||||
update_turbo_state();
|
||||
if (global.turbo_disabled) {
|
||||
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EPERM;
|
||||
|
@ -963,10 +963,12 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
||||
struct aead_edesc *edesc;
|
||||
int ecode = 0;
|
||||
bool has_bklog;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = rctx->edesc;
|
||||
has_bklog = edesc->bklog;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
@ -979,7 +981,7 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
if (!has_bklog)
|
||||
aead_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_aead_request(jrp->engine, req, ecode);
|
||||
@ -995,10 +997,12 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
int ecode = 0;
|
||||
bool has_bklog;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = rctx->edesc;
|
||||
has_bklog = edesc->bklog;
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@ -1028,7 +1032,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
if (!has_bklog)
|
||||
skcipher_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_skcipher_request(jrp->engine, req, ecode);
|
||||
@ -1711,7 +1715,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
|
||||
if (ivsize || mapped_dst_nents > 1)
|
||||
sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
|
||||
mapped_dst_nents);
|
||||
mapped_dst_nents - 1 + !!ivsize);
|
||||
|
||||
if (sec4_sg_bytes) {
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
|
@ -583,10 +583,12 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
int ecode = 0;
|
||||
bool has_bklog;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = state->edesc;
|
||||
has_bklog = edesc->bklog;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
@ -603,7 +605,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
if (!has_bklog)
|
||||
req->base.complete(&req->base, ecode);
|
||||
else
|
||||
crypto_finalize_hash_request(jrp->engine, req, ecode);
|
||||
@ -632,10 +634,12 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
int ecode = 0;
|
||||
bool has_bklog;
|
||||
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = state->edesc;
|
||||
has_bklog = edesc->bklog;
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@ -663,7 +667,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
if (!has_bklog)
|
||||
req->base.complete(&req->base, ecode);
|
||||
else
|
||||
crypto_finalize_hash_request(jrp->engine, req, ecode);
|
||||
|
@ -121,11 +121,13 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
|
||||
struct rsa_edesc *edesc;
|
||||
int ecode = 0;
|
||||
bool has_bklog;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = req_ctx->edesc;
|
||||
has_bklog = edesc->bklog;
|
||||
|
||||
rsa_pub_unmap(dev, edesc, req);
|
||||
rsa_io_unmap(dev, edesc, req);
|
||||
@ -135,7 +137,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
if (!has_bklog)
|
||||
akcipher_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_akcipher_request(jrp->engine, req, ecode);
|
||||
@ -152,11 +154,13 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct rsa_edesc *edesc;
|
||||
int ecode = 0;
|
||||
bool has_bklog;
|
||||
|
||||
if (err)
|
||||
ecode = caam_jr_strstatus(dev, err);
|
||||
|
||||
edesc = req_ctx->edesc;
|
||||
has_bklog = edesc->bklog;
|
||||
|
||||
switch (key->priv_form) {
|
||||
case FORM1:
|
||||
@ -176,7 +180,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
|
||||
* If no backlog flag, the completion of the request is done
|
||||
* by CAAM, not crypto engine.
|
||||
*/
|
||||
if (!edesc->bklog)
|
||||
if (!has_bklog)
|
||||
akcipher_request_complete(req, ecode);
|
||||
else
|
||||
crypto_finalize_akcipher_request(jrp->engine, req, ecode);
|
||||
|
@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
|
||||
|
||||
return ret;
|
||||
|
||||
case DMA_BUF_SET_NAME:
|
||||
case DMA_BUF_SET_NAME_A:
|
||||
case DMA_BUF_SET_NAME_B:
|
||||
return dma_buf_set_name(dmabuf, (const char __user *)arg);
|
||||
|
||||
default:
|
||||
@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
|
||||
* calls attach() of dma_buf_ops to allow device-specific attach functionality
|
||||
* @dmabuf: [in] buffer to attach device to.
|
||||
* @dev: [in] device to be attached.
|
||||
* @importer_ops [in] importer operations for the attachment
|
||||
* @importer_priv [in] importer private pointer for the attachment
|
||||
* @importer_ops: [in] importer operations for the attachment
|
||||
* @importer_priv: [in] importer private pointer for the attachment
|
||||
*
|
||||
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
|
||||
* must be cleaned up by calling dma_buf_detach().
|
||||
|
@ -241,7 +241,8 @@ config FSL_RAID
|
||||
|
||||
config HISI_DMA
|
||||
tristate "HiSilicon DMA Engine support"
|
||||
depends on ARM64 || (COMPILE_TEST && PCI_MSI)
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
depends on PCI_MSI
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
|
||||
struct dma_chan_dev *chan_dev;
|
||||
|
||||
chan_dev = container_of(dev, typeof(*chan_dev), device);
|
||||
if (atomic_dec_and_test(chan_dev->idr_ref)) {
|
||||
ida_free(&dma_ida, chan_dev->dev_id);
|
||||
kfree(chan_dev->idr_ref);
|
||||
}
|
||||
kfree(chan_dev);
|
||||
}
|
||||
|
||||
@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
|
||||
}
|
||||
|
||||
static int __dma_async_device_channel_register(struct dma_device *device,
|
||||
struct dma_chan *chan,
|
||||
int chan_id)
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
int rc = 0;
|
||||
int chancnt = device->chancnt;
|
||||
atomic_t *idr_ref;
|
||||
struct dma_chan *tchan;
|
||||
|
||||
tchan = list_first_entry_or_null(&device->channels,
|
||||
struct dma_chan, device_node);
|
||||
if (!tchan)
|
||||
return -ENODEV;
|
||||
|
||||
if (tchan->dev) {
|
||||
idr_ref = tchan->dev->idr_ref;
|
||||
} else {
|
||||
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
|
||||
if (!idr_ref)
|
||||
return -ENOMEM;
|
||||
atomic_set(idr_ref, 0);
|
||||
}
|
||||
|
||||
chan->local = alloc_percpu(typeof(*chan->local));
|
||||
if (!chan->local)
|
||||
@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
||||
* When the chan_id is a negative value, we are dynamically adding
|
||||
* the channel. Otherwise we are static enumerating.
|
||||
*/
|
||||
chan->chan_id = chan_id < 0 ? chancnt : chan_id;
|
||||
mutex_lock(&device->chan_mutex);
|
||||
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
if (chan->chan_id < 0) {
|
||||
pr_err("%s: unable to alloc ida for chan: %d\n",
|
||||
__func__, chan->chan_id);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
chan->dev->device.class = &dma_devclass;
|
||||
chan->dev->device.parent = device->dev;
|
||||
chan->dev->chan = chan;
|
||||
chan->dev->idr_ref = idr_ref;
|
||||
chan->dev->dev_id = device->dev_id;
|
||||
atomic_inc(idr_ref);
|
||||
dev_set_name(&chan->dev->device, "dma%dchan%d",
|
||||
device->dev_id, chan->chan_id);
|
||||
|
||||
rc = device_register(&chan->dev->device);
|
||||
if (rc)
|
||||
goto err_out;
|
||||
goto err_out_ida;
|
||||
chan->client_count = 0;
|
||||
device->chancnt = chan->chan_id + 1;
|
||||
device->chancnt++;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_ida:
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
err_out:
|
||||
free_percpu(chan->local);
|
||||
kfree(chan->dev);
|
||||
if (atomic_dec_return(idr_ref) == 0)
|
||||
kfree(idr_ref);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = __dma_async_device_channel_register(device, chan, -1);
|
||||
rc = __dma_async_device_channel_register(device, chan);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
||||
device->chancnt--;
|
||||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
device_unregister(&chan->dev->device);
|
||||
free_percpu(chan->local);
|
||||
}
|
||||
@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
|
||||
*/
|
||||
int dma_async_device_register(struct dma_device *device)
|
||||
{
|
||||
int rc, i = 0;
|
||||
int rc;
|
||||
struct dma_chan* chan;
|
||||
|
||||
if (!device)
|
||||
@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
mutex_init(&device->chan_mutex);
|
||||
ida_init(&device->chan_ida);
|
||||
|
||||
/* represent channels in sysfs. Probably want devs too */
|
||||
list_for_each_entry(chan, &device->channels, device_node) {
|
||||
rc = __dma_async_device_channel_register(device, chan, i++);
|
||||
rc = __dma_async_device_channel_register(device, chan);
|
||||
if (rc < 0)
|
||||
goto err_out;
|
||||
}
|
||||
@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
|
||||
*/
|
||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||
dma_channel_rebalance();
|
||||
ida_free(&dma_ida, device->dev_id);
|
||||
dma_device_put(device);
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
|
||||
struct dmatest_thread *thread;
|
||||
|
||||
list_for_each_entry(thread, &dtc->threads, node) {
|
||||
if (!thread->done)
|
||||
if (!thread->done && !thread->pending)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -662,8 +662,8 @@ static int dmatest_func(void *data)
|
||||
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
|
||||
ktime = ktime_get();
|
||||
while (!kthread_should_stop()
|
||||
&& !(params->iterations && total_tests >= params->iterations)) {
|
||||
while (!(kthread_should_stop() ||
|
||||
(params->iterations && total_tests >= params->iterations))) {
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
struct dmaengine_unmap_data *um;
|
||||
dma_addr_t *dsts;
|
||||
|
@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
||||
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
|
||||
size);
|
||||
tdmac->desc_arr = NULL;
|
||||
if (tdmac->status == DMA_ERROR)
|
||||
tdmac->status = DMA_COMPLETE;
|
||||
|
||||
return;
|
||||
}
|
||||
@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
|
||||
if (!desc)
|
||||
goto err_out;
|
||||
|
||||
mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
|
||||
if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
|
||||
goto err_out;
|
||||
|
||||
while (buf < buf_len) {
|
||||
desc = &tdmac->desc_arr[i];
|
||||
|
@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pd->dma.dev = &pdev->dev;
|
||||
|
||||
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
|
||||
if (err) {
|
||||
@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
pd->dma.dev = &pdev->dev;
|
||||
|
||||
INIT_LIST_HEAD(&pd->dma.channels);
|
||||
|
||||
|
@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
|
||||
static void tegra_dma_synchronize(struct dma_chan *dc)
|
||||
{
|
||||
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||||
int err;
|
||||
|
||||
err = pm_runtime_get_sync(tdc->tdma->dev);
|
||||
if (err < 0) {
|
||||
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU, which handles interrupt, could be busy in
|
||||
@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
|
||||
wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
|
||||
|
||||
tasklet_kill(&tdc->tasklet);
|
||||
|
||||
pm_runtime_put(tdc->tdma->dev);
|
||||
}
|
||||
|
||||
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
|
||||
|
@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
|
||||
soc_ep_map = &j721e_ep_map;
|
||||
} else {
|
||||
pr_err("PSIL: No compatible machine found for map\n");
|
||||
mutex_unlock(&ep_map_mutex);
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
|
||||
|
@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
|
||||
desc = list_last_entry(&chan->active_list,
|
||||
struct xilinx_dma_tx_descriptor, node);
|
||||
/*
|
||||
* VDMA and simple mode do not support residue reporting, so the
|
||||
* residue field will always be 0.
|
||||
*/
|
||||
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
|
||||
residue = xilinx_dma_get_residue(chan, desc);
|
||||
|
||||
if (!list_empty(&chan->active_list)) {
|
||||
desc = list_last_entry(&chan->active_list,
|
||||
struct xilinx_dma_tx_descriptor, node);
|
||||
/*
|
||||
* VDMA and simple mode do not support residue reporting, so the
|
||||
* residue field will always be 0.
|
||||
*/
|
||||
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
|
||||
residue = xilinx_dma_get_residue(chan, desc);
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
dma_set_residue(txstate, residue);
|
||||
|
@ -85,9 +85,10 @@
|
||||
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
|
||||
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
|
||||
* - 3.36.0 - Allow reading more status registers on si/cik
|
||||
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 36
|
||||
#define KMS_DRIVER_MINOR 37
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
|
@ -73,6 +73,22 @@
|
||||
#define SDMA_OP_AQL_COPY 0
|
||||
#define SDMA_OP_AQL_BARRIER_OR 0
|
||||
|
||||
#define SDMA_GCR_RANGE_IS_PA (1 << 18)
|
||||
#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
|
||||
#define SDMA_GCR_GL2_WB (1 << 15)
|
||||
#define SDMA_GCR_GL2_INV (1 << 14)
|
||||
#define SDMA_GCR_GL2_DISCARD (1 << 13)
|
||||
#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
|
||||
#define SDMA_GCR_GL2_US (1 << 10)
|
||||
#define SDMA_GCR_GL1_INV (1 << 9)
|
||||
#define SDMA_GCR_GLV_INV (1 << 8)
|
||||
#define SDMA_GCR_GLK_INV (1 << 7)
|
||||
#define SDMA_GCR_GLK_WB (1 << 6)
|
||||
#define SDMA_GCR_GLM_INV (1 << 5)
|
||||
#define SDMA_GCR_GLM_WB (1 << 4)
|
||||
#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
|
||||
#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
|
||||
|
||||
/*define for op field*/
|
||||
#define SDMA_PKT_HEADER_op_offset 0
|
||||
#define SDMA_PKT_HEADER_op_mask 0x000000FF
|
||||
|
@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
|
||||
|
||||
/* Invalidate L2, because if we don't do it, we might get stale cache
|
||||
* lines from previous IBs.
|
||||
*/
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
|
||||
SDMA_GCR_GL2_WB |
|
||||
SDMA_GCR_GLM_INV |
|
||||
SDMA_GCR_GLM_WB) << 16);
|
||||
amdgpu_ring_write(ring, 0xffffff80);
|
||||
amdgpu_ring_write(ring, 0xffff);
|
||||
|
||||
/* An IB packet must end on a 8 DW boundary--the next dword
|
||||
* must be on a 8-dword boundary. Our IB packet below is 6
|
||||
* dwords long, thus add x number of NOPs, such that, in
|
||||
@ -1595,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
|
||||
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
||||
.emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v5_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v5_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
|
||||
|
@ -3340,7 +3340,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
|
||||
const union dc_tiling_info *tiling_info,
|
||||
const uint64_t info,
|
||||
struct dc_plane_dcc_param *dcc,
|
||||
struct dc_plane_address *address)
|
||||
struct dc_plane_address *address,
|
||||
bool force_disable_dcc)
|
||||
{
|
||||
struct dc *dc = adev->dm.dc;
|
||||
struct dc_dcc_surface_param input;
|
||||
@ -3352,6 +3353,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
|
||||
memset(&input, 0, sizeof(input));
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
if (force_disable_dcc)
|
||||
return 0;
|
||||
|
||||
if (!offset)
|
||||
return 0;
|
||||
|
||||
@ -3401,7 +3405,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
|
||||
union dc_tiling_info *tiling_info,
|
||||
struct plane_size *plane_size,
|
||||
struct dc_plane_dcc_param *dcc,
|
||||
struct dc_plane_address *address)
|
||||
struct dc_plane_address *address,
|
||||
bool force_disable_dcc)
|
||||
{
|
||||
const struct drm_framebuffer *fb = &afb->base;
|
||||
int ret;
|
||||
@ -3507,7 +3512,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
|
||||
|
||||
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
|
||||
plane_size, tiling_info,
|
||||
tiling_flags, dcc, address);
|
||||
tiling_flags, dcc, address,
|
||||
force_disable_dcc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -3599,7 +3605,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
||||
const struct drm_plane_state *plane_state,
|
||||
const uint64_t tiling_flags,
|
||||
struct dc_plane_info *plane_info,
|
||||
struct dc_plane_address *address)
|
||||
struct dc_plane_address *address,
|
||||
bool force_disable_dcc)
|
||||
{
|
||||
const struct drm_framebuffer *fb = plane_state->fb;
|
||||
const struct amdgpu_framebuffer *afb =
|
||||
@ -3681,7 +3688,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
||||
plane_info->rotation, tiling_flags,
|
||||
&plane_info->tiling_info,
|
||||
&plane_info->plane_size,
|
||||
&plane_info->dcc, address);
|
||||
&plane_info->dcc, address,
|
||||
force_disable_dcc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -3704,6 +3712,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
||||
struct dc_plane_info plane_info;
|
||||
uint64_t tiling_flags;
|
||||
int ret;
|
||||
bool force_disable_dcc = false;
|
||||
|
||||
ret = fill_dc_scaling_info(plane_state, &scaling_info);
|
||||
if (ret)
|
||||
@ -3718,9 +3727,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
|
||||
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
|
||||
&plane_info,
|
||||
&dc_plane_state->address);
|
||||
&dc_plane_state->address,
|
||||
force_disable_dcc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -5342,6 +5353,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
||||
uint64_t tiling_flags;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
bool force_disable_dcc = false;
|
||||
|
||||
dm_plane_state_old = to_dm_plane_state(plane->state);
|
||||
dm_plane_state_new = to_dm_plane_state(new_state);
|
||||
@ -5400,11 +5412,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
||||
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
|
||||
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
|
||||
|
||||
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
|
||||
fill_plane_buffer_attributes(
|
||||
adev, afb, plane_state->format, plane_state->rotation,
|
||||
tiling_flags, &plane_state->tiling_info,
|
||||
&plane_state->plane_size, &plane_state->dcc,
|
||||
&plane_state->address);
|
||||
&plane_state->address,
|
||||
force_disable_dcc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -6676,7 +6690,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
fill_dc_plane_info_and_addr(
|
||||
dm->adev, new_plane_state, tiling_flags,
|
||||
&bundle->plane_infos[planes_count],
|
||||
&bundle->flip_addrs[planes_count].address);
|
||||
&bundle->flip_addrs[planes_count].address,
|
||||
false);
|
||||
|
||||
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
|
||||
new_plane_state->plane->index,
|
||||
bundle->plane_infos[planes_count].dcc.enable);
|
||||
|
||||
bundle->surface_updates[planes_count].plane_info =
|
||||
&bundle->plane_infos[planes_count];
|
||||
@ -8096,7 +8115,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
|
||||
ret = fill_dc_plane_info_and_addr(
|
||||
dm->adev, new_plane_state, tiling_flags,
|
||||
plane_info,
|
||||
&flip_addr->address);
|
||||
&flip_addr->address,
|
||||
false);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
|
@ -2908,6 +2908,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||
sizeof(hpd_irq_dpcd_data),
|
||||
"Status: ");
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
|
||||
link->dc->hwss.blank_stream(pipe_ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
|
||||
@ -2927,6 +2933,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
dc_link_reallocate_mst_payload(link);
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
|
||||
link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
|
||||
}
|
||||
|
||||
status = false;
|
||||
if (out_link_loss)
|
||||
*out_link_loss = true;
|
||||
@ -4227,6 +4239,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
|
||||
void dpcd_set_source_specific_data(struct dc_link *link)
|
||||
{
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
uint8_t dspc = 0;
|
||||
enum dc_status ret = DC_ERROR_UNEXPECTED;
|
||||
|
||||
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
|
||||
sizeof(dspc));
|
||||
|
||||
if (ret != DC_OK) {
|
||||
DC_LOG_ERROR("Error in DP aux read transaction,"
|
||||
" not writing source specific data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return if OUI unsupported */
|
||||
if (!(dspc & DP_OUI_SUPPORT))
|
||||
return;
|
||||
|
||||
if (!link->dc->vendor_signature.is_valid) {
|
||||
struct dpcd_amd_signature amd_signature;
|
||||
|
@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
|
||||
return dc_stream_get_status_from_state(dc->current_state, stream);
|
||||
}
|
||||
|
||||
static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
unsigned int vupdate_line;
|
||||
unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
unsigned int us_per_line;
|
||||
|
||||
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
|
||||
ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
|
||||
|
||||
vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
|
||||
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
|
||||
return;
|
||||
|
||||
if (vpos >= vupdate_line)
|
||||
return;
|
||||
|
||||
us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
|
||||
lines_to_vupdate = vupdate_line - vpos;
|
||||
us_to_vupdate = lines_to_vupdate * us_per_line;
|
||||
|
||||
/* 70 us is a conservative estimate of cursor update time*/
|
||||
if (us_to_vupdate < 70)
|
||||
udelay(us_to_vupdate);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
|
||||
@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
|
||||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
|
||||
delay_cursor_until_vupdate(pipe_ctx, dc);
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
|
||||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
|
||||
delay_cursor_until_vupdate(pipe_ctx, dc);
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
dc->hwss.set_cursor_position(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -2757,6 +2757,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
|
||||
.disable_plane = dce110_power_down_fe,
|
||||
.pipe_control_lock = dce_pipe_control_lock,
|
||||
.interdependent_update_lock = NULL,
|
||||
.cursor_lock = dce_pipe_control_lock,
|
||||
.prepare_bandwidth = dce110_prepare_bandwidth,
|
||||
.optimize_bandwidth = dce110_optimize_bandwidth,
|
||||
.set_drr = set_drr,
|
||||
|
@ -1625,6 +1625,16 @@ void dcn10_pipe_control_lock(
|
||||
hws->funcs.verify_allow_pstate_change_high(dc);
|
||||
}
|
||||
|
||||
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
|
||||
{
|
||||
/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
|
||||
if (!pipe || pipe->top_pipe)
|
||||
return;
|
||||
|
||||
dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
|
||||
pipe->stream_res.opp->inst, lock);
|
||||
}
|
||||
|
||||
static bool wait_for_reset_trigger_to_occur(
|
||||
struct dc_context *dc_ctx,
|
||||
struct timing_generator *tg)
|
||||
|
@ -49,6 +49,7 @@ void dcn10_pipe_control_lock(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe,
|
||||
bool lock);
|
||||
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
|
||||
void dcn10_blank_pixel_data(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
|
@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.disable_audio_stream = dce110_disable_audio_stream,
|
||||
.disable_plane = dcn10_disable_plane,
|
||||
.pipe_control_lock = dcn10_pipe_control_lock,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.prepare_bandwidth = dcn10_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn10_optimize_bandwidth,
|
||||
|
@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
|
||||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
|
||||
|
||||
/* Configure VUPDATE lock set for this MPCC to map to the OPP */
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
|
||||
|
||||
/* update mpc tree mux setting */
|
||||
if (tree->opp_list == insert_above_mpcc) {
|
||||
/* insert the toppest mpcc */
|
||||
@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
|
||||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
|
||||
/* mark this mpcc as not in use */
|
||||
mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
|
||||
@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
|
||||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
}
|
||||
}
|
||||
|
||||
@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
|
||||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
|
||||
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
|
||||
}
|
||||
@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
|
||||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
|
||||
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
|
||||
|
||||
@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
|
||||
MPCC_BUSY, &s->busy);
|
||||
}
|
||||
|
||||
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
|
||||
{
|
||||
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
|
||||
|
||||
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
|
||||
}
|
||||
|
||||
static const struct mpc_funcs dcn10_mpc_funcs = {
|
||||
.read_mpcc_state = mpc1_read_mpcc_state,
|
||||
.insert_plane = mpc1_insert_plane,
|
||||
@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
|
||||
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
|
||||
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
|
||||
.update_blending = mpc1_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.set_denorm = NULL,
|
||||
.set_denorm_clamp = NULL,
|
||||
.set_output_csc = NULL,
|
||||
|
@ -39,11 +39,12 @@
|
||||
SRII(MPCC_BG_G_Y, MPCC, inst),\
|
||||
SRII(MPCC_BG_R_CR, MPCC, inst),\
|
||||
SRII(MPCC_BG_B_CB, MPCC, inst),\
|
||||
SRII(MPCC_BG_B_CB, MPCC, inst),\
|
||||
SRII(MPCC_SM_CONTROL, MPCC, inst)
|
||||
SRII(MPCC_SM_CONTROL, MPCC, inst),\
|
||||
SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
|
||||
|
||||
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
|
||||
SRII(MUX, MPC_OUT, inst)
|
||||
SRII(MUX, MPC_OUT, inst),\
|
||||
VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
|
||||
|
||||
#define MPC_COMMON_REG_VARIABLE_LIST \
|
||||
uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
|
||||
@ -55,7 +56,9 @@
|
||||
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
|
||||
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
|
||||
uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
|
||||
uint32_t MUX[MAX_OPP];
|
||||
uint32_t MUX[MAX_OPP]; \
|
||||
uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
|
||||
uint32_t CUR[MAX_OPP];
|
||||
|
||||
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
|
||||
SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
|
||||
@ -78,7 +81,8 @@
|
||||
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
|
||||
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
|
||||
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
|
||||
SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
|
||||
|
||||
#define MPC_REG_FIELD_LIST(type) \
|
||||
type MPCC_TOP_SEL;\
|
||||
@ -101,7 +105,9 @@
|
||||
type MPCC_SM_FIELD_ALT;\
|
||||
type MPCC_SM_FORCE_NEXT_FRAME_POL;\
|
||||
type MPCC_SM_FORCE_NEXT_TOP_POL;\
|
||||
type MPC_OUT_MUX;
|
||||
type MPC_OUT_MUX;\
|
||||
type MPCC_UPDATE_LOCK_SEL;\
|
||||
type CUR_VUPDATE_LOCK_SET;
|
||||
|
||||
struct dcn_mpc_registers {
|
||||
MPC_COMMON_REG_VARIABLE_LIST
|
||||
@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
|
||||
int mpcc_inst,
|
||||
struct mpcc_state *s);
|
||||
|
||||
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
|
||||
|
||||
#endif
|
||||
|
@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
|
||||
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
|
||||
mm ## block ## id ## _ ## reg_name
|
||||
|
||||
#define VUPDATE_SRII(reg_name, block, id)\
|
||||
.reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
|
||||
mm ## reg_name ## 0 ## _ ## block ## id
|
||||
|
||||
/* set field/register/bitfield name */
|
||||
#define SFRB(field_name, reg_name, bitfield, post_fix)\
|
||||
.field_name = reg_name ## __ ## bitfield ## post_fix
|
||||
|
||||
/* NBIO */
|
||||
#define NBIO_BASE_INNER(seg) \
|
||||
NBIF_BASE__INST0_SEG ## seg
|
||||
@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
|
||||
};
|
||||
|
||||
static const struct dcn_mpc_shift mpc_shift = {
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
|
||||
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
|
||||
};
|
||||
|
||||
static const struct dcn_mpc_mask mpc_mask = {
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
|
||||
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
|
||||
};
|
||||
|
||||
#define tg_regs(id)\
|
||||
|
@ -2294,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
|
||||
|
||||
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
|
||||
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
if (REG(REFCLK_CNTL))
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
//
|
||||
|
||||
|
||||
|
@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
||||
.disable_plane = dcn20_disable_plane,
|
||||
.pipe_control_lock = dcn20_pipe_control_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.prepare_bandwidth = dcn20_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn20_optimize_bandwidth,
|
||||
.update_bandwidth = dcn20_update_bandwidth,
|
||||
|
@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
|
||||
.mpc_init = mpc1_mpc_init,
|
||||
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
|
||||
.update_blending = mpc2_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
|
||||
.wait_for_idle = mpc2_assert_idle_mpcc,
|
||||
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
|
||||
|
@ -179,7 +179,8 @@
|
||||
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
|
||||
SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
|
||||
|
||||
/*
|
||||
* DCN2 MPC_OCSC debug status register:
|
||||
|
@ -508,6 +508,10 @@ enum dcn20_clk_src_array_id {
|
||||
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
|
||||
mm ## block ## id ## _ ## reg_name
|
||||
|
||||
#define VUPDATE_SRII(reg_name, block, id)\
|
||||
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
|
||||
mm ## reg_name ## _ ## block ## id
|
||||
|
||||
/* NBIO */
|
||||
#define NBIO_BASE_INNER(seg) \
|
||||
NBIO_BASE__INST0_SEG ## seg
|
||||
|
@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
||||
.disable_plane = dcn20_disable_plane,
|
||||
.pipe_control_lock = dcn20_pipe_control_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.prepare_bandwidth = dcn20_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn20_optimize_bandwidth,
|
||||
.update_bandwidth = dcn20_update_bandwidth,
|
||||
|
@ -284,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.dram_channel_width_bytes = 4,
|
||||
.fabric_datapath_to_dcn_data_return_bytes = 32,
|
||||
.dcn_downspread_percent = 0.5,
|
||||
.downspread_percent = 0.5,
|
||||
.downspread_percent = 0.38,
|
||||
.dram_page_open_time_ns = 50.0,
|
||||
.dram_rw_turnaround_time_ns = 17.5,
|
||||
.dram_return_buffer_per_channel_bytes = 8192,
|
||||
@ -340,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
|
||||
mm ## block ## id ## _ ## reg_name
|
||||
|
||||
#define VUPDATE_SRII(reg_name, block, id)\
|
||||
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
|
||||
mm ## reg_name ## _ ## block ## id
|
||||
|
||||
/* NBIO */
|
||||
#define NBIO_BASE_INNER(seg) \
|
||||
NBIF0_BASE__INST0_SEG ## seg
|
||||
@ -1374,64 +1378,49 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
{
|
||||
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
|
||||
struct clk_limit_table *clk_table = &bw_params->clk_table;
|
||||
unsigned int i, j, k;
|
||||
int closest_clk_lvl;
|
||||
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
|
||||
unsigned int i, j, closest_clk_lvl;
|
||||
|
||||
// Default clock levels are used for diags, which may lead to overclocking.
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
|
||||
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
|
||||
dcn2_1_soc.num_chans = bw_params->num_channels;
|
||||
|
||||
/* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
|
||||
dcn2_1_soc.clock_limits[0].state = 0;
|
||||
dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
|
||||
dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
|
||||
dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
|
||||
dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
|
||||
|
||||
/*
|
||||
* Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
|
||||
* as indicator
|
||||
*/
|
||||
|
||||
closest_clk_lvl = -1;
|
||||
/* index currently being filled */
|
||||
k = 1;
|
||||
for (i = 1; i < clk_table->num_entries; i++) {
|
||||
/* loop backwards, skip duplicate state*/
|
||||
for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
|
||||
ASSERT(clk_table->num_entries);
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
/* loop backwards*/
|
||||
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
|
||||
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
|
||||
closest_clk_lvl = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
|
||||
if (closest_clk_lvl != -1) {
|
||||
dcn2_1_soc.clock_limits[k].state = i;
|
||||
dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
|
||||
clock_limits[i].state = i;
|
||||
clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
|
||||
clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
|
||||
clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
|
||||
clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
|
||||
|
||||
dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
|
||||
dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
|
||||
dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
|
||||
k++;
|
||||
}
|
||||
clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
|
||||
clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
|
||||
clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
|
||||
clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
|
||||
clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
|
||||
clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
|
||||
clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
|
||||
}
|
||||
for (i = 0; i < clk_table->num_entries; i++)
|
||||
dcn2_1_soc.clock_limits[i] = clock_limits[i];
|
||||
if (clk_table->num_entries) {
|
||||
dcn2_1_soc.num_states = clk_table->num_entries;
|
||||
/* duplicate last level */
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
|
||||
}
|
||||
dcn2_1_soc.num_states = k;
|
||||
}
|
||||
|
||||
/* duplicate last level */
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
|
||||
|
||||
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
|
||||
}
|
||||
|
||||
|
@ -210,6 +210,22 @@ struct mpc_funcs {
|
||||
struct mpcc_blnd_cfg *blnd_cfg,
|
||||
int mpcc_id);
|
||||
|
||||
/*
|
||||
* Lock cursor updates for the specified OPP.
|
||||
* OPP defines the set of MPCC that are locked together for cursor.
|
||||
*
|
||||
* Parameters:
|
||||
* [in] mpc - MPC context.
|
||||
* [in] opp_id - The OPP to lock cursor updates on
|
||||
* [in] lock - lock/unlock the OPP
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void (*cursor_lock)(
|
||||
struct mpc *mpc,
|
||||
int opp_id,
|
||||
bool lock);
|
||||
|
||||
struct mpcc* (*get_mpcc_for_dpp)(
|
||||
struct mpc_tree *tree,
|
||||
int dpp_id);
|
||||
|
@ -86,6 +86,7 @@ struct hw_sequencer_funcs {
|
||||
struct dc_state *context, bool lock);
|
||||
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
|
||||
bool flip_immediate);
|
||||
void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
|
||||
|
||||
/* Timing Related */
|
||||
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
|
||||
|
@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
|
||||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
|
||||
if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
||||
!hwmgr->hwmgr_func->get_asic_baco_capability)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
@ -1452,8 +1453,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
|
||||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
||||
!hwmgr->hwmgr_func->get_asic_baco_state)
|
||||
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
@ -1470,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
|
||||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
|
||||
if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
||||
!hwmgr->hwmgr_func->set_asic_baco_state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
|
@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
|
||||
drm_dp_queue_down_tx(mgr, txmsg);
|
||||
|
||||
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
|
||||
if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
|
||||
ret = -EIO;
|
||||
if (ret > 0) {
|
||||
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = size;
|
||||
}
|
||||
|
||||
kfree(txmsg);
|
||||
fail_put:
|
||||
|
@ -5111,7 +5111,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
|
||||
struct drm_display_mode *mode;
|
||||
unsigned pixel_clock = (timings->pixel_clock[0] |
|
||||
(timings->pixel_clock[1] << 8) |
|
||||
(timings->pixel_clock[2] << 16));
|
||||
(timings->pixel_clock[2] << 16)) + 1;
|
||||
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
|
||||
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
|
||||
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
|
||||
|
@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
|
||||
int tiling_mode, unsigned int stride)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
|
||||
struct i915_vma *vma;
|
||||
struct i915_vma *vma, *vn;
|
||||
LIST_HEAD(unbind);
|
||||
int ret = 0;
|
||||
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ggtt->vm.mutex);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
GEM_BUG_ON(vma->vm != &ggtt->vm);
|
||||
|
||||
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
|
||||
continue;
|
||||
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
break;
|
||||
list_move(&vma->vm_link, &unbind);
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret) {
|
||||
/* Restore the remaining vma on an error */
|
||||
list_splice(&unbind, &ggtt->vm.bound_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
|
||||
return ret;
|
||||
@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
vma->fence_size =
|
||||
i915_gem_fence_size(i915, vma->size, tiling, stride);
|
||||
@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
if (vma->fence)
|
||||
vma->fence->dirty = true;
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
obj->tiling_and_stride = tiling | stride;
|
||||
i915_gem_object_unlock(obj);
|
||||
|
@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
unsigned int page_size = BIT(first);
|
||||
|
||||
obj = i915_gem_object_create_internal(dev_priv, page_size);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_vm;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
}
|
||||
|
||||
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_vm;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
|
||||
|
||||
rcu_read_lock();
|
||||
cl = rcu_dereference(from->hwsp_cacheline);
|
||||
if (i915_request_completed(from)) /* confirm cacheline is valid */
|
||||
goto unlock;
|
||||
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
|
||||
goto unlock; /* seqno wrapped and completed! */
|
||||
if (unlikely(i915_request_completed(from)))
|
||||
|
@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
|
||||
GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_enables;
|
||||
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
|
||||
u32 de_port_enables;
|
||||
@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
de_misc_masked |= GEN8_DE_MISC_GSE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
de_port_masked |= BXT_DE_PORT_GMBUS;
|
||||
} else {
|
||||
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
if (unlikely(overflows_type(vma->size, u32)))
|
||||
goto err_vma;
|
||||
goto err_unlock;
|
||||
|
||||
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
|
||||
i915_gem_object_get_tiling(obj),
|
||||
i915_gem_object_get_stride(obj));
|
||||
if (unlikely(vma->fence_size < vma->size || /* overflow */
|
||||
vma->fence_size > vm->total))
|
||||
goto err_vma;
|
||||
goto err_unlock;
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
|
||||
|
||||
@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
|
||||
}
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
|
||||
rb = NULL;
|
||||
p = &obj->vma.tree.rb_node;
|
||||
while (*p) {
|
||||
@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
|
||||
return vma;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock(&obj->vma.lock);
|
||||
err_vma:
|
||||
i915_vma_free(vma);
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
return ret;
|
||||
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
qxl_release_free(qdev, release);
|
||||
return ret;
|
||||
|
||||
}
|
||||
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_SURFACE_CMD_CREATE;
|
||||
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
|
||||
@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
/* no need to add a release to the fence for this surface bo,
|
||||
since it is only released when we ask to destroy the surface
|
||||
and it would never signal otherwise */
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
surf->hw_surf_alloc = true;
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
|
||||
cmd->surface_id = id;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
|
||||
cmd->u.set.visible = 1;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
|
||||
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
|
||||
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
||||
if (old_cursor_bo != NULL)
|
||||
qxl_bo_unpin(old_cursor_bo);
|
||||
@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
|
||||
cmd->type = QXL_CURSOR_HIDE;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
}
|
||||
|
||||
static void qxl_update_dumb_head(struct qxl_device *qdev,
|
||||
|
@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
||||
goto out_release_backoff;
|
||||
|
||||
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
|
||||
if (!rects)
|
||||
if (!rects) {
|
||||
ret = -EINVAL;
|
||||
goto out_release_backoff;
|
||||
|
||||
}
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
|
||||
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
|
||||
@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
||||
}
|
||||
qxl_bo_kunmap(clips_bo);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
|
||||
out_release_backoff:
|
||||
if (ret)
|
||||
|
@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unsupported image bit depth\n");
|
||||
return -EINVAL; /* TODO: cleanup */
|
||||
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
|
||||
image->u.bitmap.x = width;
|
||||
|
@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
apply_surf_reloc(qdev, &reloc_info[i]);
|
||||
}
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
|
||||
if (ret)
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
else
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_bos:
|
||||
out_free_release:
|
||||
|
@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
|
||||
events_clear, &events_clear);
|
||||
}
|
||||
|
||||
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
|
||||
uint32_t ctx_id)
|
||||
{
|
||||
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
|
||||
virtio_gpu_notify(vgdev);
|
||||
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
|
||||
}
|
||||
|
||||
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
|
||||
void (*work_func)(struct work_struct *work))
|
||||
{
|
||||
@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_fpriv *vfpriv;
|
||||
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
||||
|
||||
if (!vgdev->has_virgl_3d)
|
||||
return;
|
||||
|
||||
vfpriv = file->driver_priv;
|
||||
if (vfpriv->context_created) {
|
||||
virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
|
||||
virtio_gpu_notify(vgdev);
|
||||
}
|
||||
|
||||
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
|
||||
ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
|
||||
mutex_destroy(&vfpriv->context_lock);
|
||||
kfree(vfpriv);
|
||||
file->driver_priv = NULL;
|
||||
|
@ -184,11 +184,7 @@ void hv_synic_enable_regs(unsigned int cpu)
|
||||
|
||||
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
|
||||
shared_sint.masked = false;
|
||||
if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
|
||||
shared_sint.auto_eoi = false;
|
||||
else
|
||||
shared_sint.auto_eoi = true;
|
||||
|
||||
shared_sint.auto_eoi = hv_recommend_using_aeoi();
|
||||
hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
|
||||
|
||||
/* Enable the global synic bit */
|
||||
|
@ -286,8 +286,8 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
|
||||
__field(int, ret)
|
||||
),
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
|
||||
memcpy(__entry->host_id, &msg->host_service_id.b, 16);
|
||||
export_guid(__entry->guest_id, &msg->guest_endpoint_id);
|
||||
export_guid(__entry->host_id, &msg->host_service_id);
|
||||
__entry->ret = ret;
|
||||
),
|
||||
TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
|
||||
|
@ -978,6 +978,9 @@ static int vmbus_resume(struct device *child_device)
|
||||
|
||||
return drv->resume(dev);
|
||||
}
|
||||
#else
|
||||
#define vmbus_suspend NULL
|
||||
#define vmbus_resume NULL
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
/*
|
||||
@ -997,11 +1000,22 @@ static void vmbus_device_release(struct device *device)
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
|
||||
* SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
|
||||
* Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
|
||||
*
|
||||
* suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
|
||||
* shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
|
||||
* is no way to wake up a Generation-2 VM.
|
||||
*
|
||||
* The other 4 ops are for hibernation.
|
||||
*/
|
||||
|
||||
static const struct dev_pm_ops vmbus_pm = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
|
||||
.suspend_noirq = NULL,
|
||||
.resume_noirq = NULL,
|
||||
.freeze_noirq = vmbus_suspend,
|
||||
.thaw_noirq = vmbus_resume,
|
||||
.poweroff_noirq = vmbus_suspend,
|
||||
.restore_noirq = vmbus_resume,
|
||||
};
|
||||
|
||||
/* The one and only one */
|
||||
@ -2281,6 +2295,9 @@ static int vmbus_bus_resume(struct device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define vmbus_bus_suspend NULL
|
||||
#define vmbus_bus_resume NULL
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
|
||||
@ -2291,16 +2308,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
|
||||
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
|
||||
|
||||
/*
|
||||
* Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
|
||||
* SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
|
||||
* "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
|
||||
* pci "noirq" restore callback runs before "non-noirq" callbacks (see
|
||||
* Note: we must use the "no_irq" ops, otherwise hibernation can not work with
|
||||
* PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
|
||||
* the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
|
||||
* resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
|
||||
* dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
|
||||
* resume callback must also run via the "noirq" callbacks.
|
||||
* resume callback must also run via the "noirq" ops.
|
||||
*
|
||||
* Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
|
||||
* earlier in this file before vmbus_pm.
|
||||
*/
|
||||
|
||||
static const struct dev_pm_ops vmbus_bus_pm = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
|
||||
.suspend_noirq = NULL,
|
||||
.resume_noirq = NULL,
|
||||
.freeze_noirq = vmbus_bus_suspend,
|
||||
.thaw_noirq = vmbus_bus_resume,
|
||||
.poweroff_noirq = vmbus_bus_suspend,
|
||||
.restore_noirq = vmbus_bus_resume
|
||||
};
|
||||
|
||||
static struct acpi_driver vmbus_acpi_driver = {
|
||||
|
@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
|
||||
if (!privdata)
|
||||
return -ENOMEM;
|
||||
|
||||
privdata->pci_dev = pci_dev;
|
||||
rc = amd_mp2_pci_init(privdata, pci_dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_init(&privdata->c2p_lock);
|
||||
privdata->pci_dev = pci_dev;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
|
||||
pm_runtime_use_autosuspend(&pci_dev->dev);
|
||||
|
@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
|
||||
/* Ack all interrupts except for Rx done */
|
||||
writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
|
||||
bus->base + ASPEED_I2C_INTR_STS_REG);
|
||||
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
|
||||
irq_remaining = irq_received;
|
||||
|
||||
#if IS_ENABLED(CONFIG_I2C_SLAVE)
|
||||
@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
|
||||
irq_received, irq_handled);
|
||||
|
||||
/* Ack Rx done */
|
||||
if (irq_received & ASPEED_I2CD_INTR_RX_DONE)
|
||||
if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
|
||||
writel(ASPEED_I2CD_INTR_RX_DONE,
|
||||
bus->base + ASPEED_I2C_INTR_STS_REG);
|
||||
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
|
||||
}
|
||||
spin_unlock(&bus->lock);
|
||||
return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
|
||||
}
|
||||
|
@ -360,6 +360,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
|
||||
value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
|
||||
i2c_slave_event(iproc_i2c->slave,
|
||||
I2C_SLAVE_WRITE_RECEIVED, &value);
|
||||
if (rx_status == I2C_SLAVE_RX_END)
|
||||
i2c_slave_event(iproc_i2c->slave,
|
||||
I2C_SLAVE_STOP, &value);
|
||||
}
|
||||
} else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
|
||||
/* Master read other than start */
|
||||
|
@ -996,13 +996,14 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
|
||||
do {
|
||||
u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
|
||||
|
||||
if (status)
|
||||
if (status) {
|
||||
tegra_i2c_isr(i2c_dev->irq, i2c_dev);
|
||||
|
||||
if (completion_done(complete)) {
|
||||
s64 delta = ktime_ms_delta(ktimeout, ktime);
|
||||
if (completion_done(complete)) {
|
||||
s64 delta = ktime_ms_delta(ktimeout, ktime);
|
||||
|
||||
return msecs_to_jiffies(delta) ?: 1;
|
||||
return msecs_to_jiffies(delta) ?: 1;
|
||||
}
|
||||
}
|
||||
|
||||
ktime = ktime_get();
|
||||
@ -1029,18 +1030,14 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
|
||||
disable_irq(i2c_dev->irq);
|
||||
|
||||
/*
|
||||
* Under some rare circumstances (like running KASAN +
|
||||
* NFS root) CPU, which handles interrupt, may stuck in
|
||||
* uninterruptible state for a significant time. In this
|
||||
* case we will get timeout if I2C transfer is running on
|
||||
* a sibling CPU, despite of IRQ being raised.
|
||||
*
|
||||
* In order to handle this rare condition, the IRQ status
|
||||
* needs to be checked after timeout.
|
||||
* There is a chance that completion may happen after IRQ
|
||||
* synchronization, which is done by disable_irq().
|
||||
*/
|
||||
if (ret == 0)
|
||||
ret = tegra_i2c_poll_completion_timeout(i2c_dev,
|
||||
complete, 0);
|
||||
if (ret == 0 && completion_done(complete)) {
|
||||
dev_warn(i2c_dev->dev,
|
||||
"completion done after timeout\n");
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1219,15 +1216,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
|
||||
time_left = tegra_i2c_wait_completion_timeout(
|
||||
i2c_dev, &i2c_dev->dma_complete, xfer_time);
|
||||
|
||||
/*
|
||||
* Synchronize DMA first, since dmaengine_terminate_sync()
|
||||
* performs synchronization after the transfer's termination
|
||||
* and we want to get a completion if transfer succeeded.
|
||||
*/
|
||||
dmaengine_synchronize(i2c_dev->msg_read ?
|
||||
i2c_dev->rx_dma_chan :
|
||||
i2c_dev->tx_dma_chan);
|
||||
|
||||
dmaengine_terminate_sync(i2c_dev->msg_read ?
|
||||
i2c_dev->rx_dma_chan :
|
||||
i2c_dev->tx_dma_chan);
|
||||
|
@ -862,7 +862,7 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
|
||||
|
||||
ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
|
||||
&cm.local_id_next, GFP_KERNEL);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
|
||||
|
||||
@ -1828,11 +1828,9 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
|
||||
|
||||
static void cm_format_rej(struct cm_rej_msg *rej_msg,
|
||||
struct cm_id_private *cm_id_priv,
|
||||
enum ib_cm_rej_reason reason,
|
||||
void *ari,
|
||||
u8 ari_length,
|
||||
const void *private_data,
|
||||
u8 private_data_len)
|
||||
enum ib_cm_rej_reason reason, void *ari,
|
||||
u8 ari_length, const void *private_data,
|
||||
u8 private_data_len, enum ib_cm_state state)
|
||||
{
|
||||
lockdep_assert_held(&cm_id_priv->lock);
|
||||
|
||||
@ -1840,7 +1838,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
|
||||
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
|
||||
be32_to_cpu(cm_id_priv->id.remote_id));
|
||||
|
||||
switch(cm_id_priv->id.state) {
|
||||
switch (state) {
|
||||
case IB_CM_REQ_RCVD:
|
||||
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
|
||||
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
|
||||
@ -1905,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work,
|
||||
cm_id_priv->private_data_len);
|
||||
break;
|
||||
case IB_CM_TIMEWAIT:
|
||||
cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
|
||||
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
|
||||
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
|
||||
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
|
||||
IB_CM_TIMEWAIT);
|
||||
break;
|
||||
default:
|
||||
goto unlock;
|
||||
@ -2904,6 +2903,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
u8 ari_length, const void *private_data,
|
||||
u8 private_data_len)
|
||||
{
|
||||
enum ib_cm_state state = cm_id_priv->id.state;
|
||||
struct ib_mad_send_buf *msg;
|
||||
int ret;
|
||||
|
||||
@ -2913,7 +2913,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
|
||||
return -EINVAL;
|
||||
|
||||
switch (cm_id_priv->id.state) {
|
||||
switch (state) {
|
||||
case IB_CM_REQ_SENT:
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
case IB_CM_REQ_RCVD:
|
||||
@ -2925,7 +2925,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
if (ret)
|
||||
return ret;
|
||||
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
|
||||
ari, ari_length, private_data, private_data_len);
|
||||
ari, ari_length, private_data, private_data_len,
|
||||
state);
|
||||
break;
|
||||
case IB_CM_REP_SENT:
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
@ -2934,7 +2935,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
if (ret)
|
||||
return ret;
|
||||
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
|
||||
ari, ari_length, private_data, private_data_len);
|
||||
ari, ari_length, private_data, private_data_len,
|
||||
state);
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
|
||||
|
@ -360,7 +360,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
|
||||
* uverbs_uobject_fd_release(), and the caller is expected to ensure
|
||||
* that release is never done while a call to lookup is possible.
|
||||
*/
|
||||
if (f->f_op != fd_type->fops) {
|
||||
if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
|
||||
fput(f);
|
||||
return ERR_PTR(-EBADF);
|
||||
}
|
||||
@ -474,16 +474,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
|
||||
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
|
||||
fd_type->flags);
|
||||
if (IS_ERR(filp)) {
|
||||
uverbs_uobject_put(uobj);
|
||||
uobj = ERR_CAST(filp);
|
||||
goto err_uobj;
|
||||
goto err_fd;
|
||||
}
|
||||
uobj->object = filp;
|
||||
|
||||
uobj->id = new_fd;
|
||||
return uobj;
|
||||
|
||||
err_uobj:
|
||||
uverbs_uobject_put(uobj);
|
||||
err_fd:
|
||||
put_unused_fd(new_fd);
|
||||
return uobj;
|
||||
@ -679,7 +678,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
|
||||
enum rdma_lookup_mode mode)
|
||||
{
|
||||
assert_uverbs_usecnt(uobj, mode);
|
||||
uobj->uapi_object->type_class->lookup_put(uobj, mode);
|
||||
/*
|
||||
* In order to unlock an object, either decrease its usecnt for
|
||||
* read access or zero it in case of exclusive access. See
|
||||
@ -696,6 +694,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
|
||||
break;
|
||||
}
|
||||
|
||||
uobj->uapi_object->type_class->lookup_put(uobj, mode);
|
||||
/* Pairs with the kref obtained by type->lookup_get */
|
||||
uverbs_uobject_put(uobj);
|
||||
}
|
||||
|
@ -820,6 +820,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
||||
ret = mmget_not_zero(mm);
|
||||
if (!ret) {
|
||||
list_del_init(&priv->list);
|
||||
if (priv->entry) {
|
||||
rdma_user_mmap_entry_put(priv->entry);
|
||||
priv->entry = NULL;
|
||||
}
|
||||
mm = NULL;
|
||||
continue;
|
||||
}
|
||||
|
@ -1046,7 +1046,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
|
||||
u64 header;
|
||||
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
if (wqe)
|
||||
if (!wqe)
|
||||
return I40IW_ERR_RING_FULL;
|
||||
|
||||
set_64bit_val(wqe, 32, feat_mem->pa);
|
||||
|
@ -1499,8 +1499,9 @@ static int __mlx4_ib_create_default_rules(
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
|
||||
union ib_flow_spec ib_spec = {};
|
||||
int ret;
|
||||
union ib_flow_spec ib_spec;
|
||||
|
||||
switch (pdefault_rules->rules_create_list[i]) {
|
||||
case 0:
|
||||
/* no rule */
|
||||
|
@ -5558,7 +5558,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
|
||||
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
|
||||
rdma_ah_set_static_rate(ah_attr,
|
||||
path->static_rate ? path->static_rate - 5 : 0);
|
||||
if (path->grh_mlid & (1 << 7)) {
|
||||
|
||||
if (path->grh_mlid & (1 << 7) ||
|
||||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
|
||||
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
|
||||
|
||||
rdma_ah_set_grh(ah_attr, NULL,
|
||||
|
@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
*/
|
||||
if (udata && udata->outlen >= sizeof(__u64)) {
|
||||
cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
|
||||
if (!cq->ip) {
|
||||
err = -ENOMEM;
|
||||
if (IS_ERR(cq->ip)) {
|
||||
err = PTR_ERR(cq->ip);
|
||||
goto bail_wc;
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ done:
|
||||
* @udata: user data (must be valid!)
|
||||
* @obj: opaque pointer to a cq, wq etc
|
||||
*
|
||||
* Return: rvt_mmap struct on success
|
||||
* Return: rvt_mmap struct on success, ERR_PTR on failure
|
||||
*/
|
||||
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
|
||||
struct ib_udata *udata, void *obj)
|
||||
@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
|
||||
|
||||
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
|
||||
if (!ip)
|
||||
return ip;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
|
@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
qp->ip = rvt_create_mmap_info(rdi, s, udata,
|
||||
qp->r_rq.wq);
|
||||
if (!qp->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(qp->ip)) {
|
||||
ret = ERR_CAST(qp->ip);
|
||||
goto bail_qpn;
|
||||
}
|
||||
|
||||
|
@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
|
||||
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
|
||||
|
||||
srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
|
||||
if (!srq->ip) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(srq->ip)) {
|
||||
ret = PTR_ERR(srq->ip);
|
||||
goto bail_wq;
|
||||
}
|
||||
|
||||
|
@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
|
||||
{
|
||||
struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
|
||||
struct siw_device *sdev = to_siw_dev(pd->device);
|
||||
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
|
||||
struct siw_mem *mem;
|
||||
int rv = 0;
|
||||
|
||||
siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
|
||||
|
||||
if (unlikely(!mem || !base_mr)) {
|
||||
if (unlikely(!base_mr)) {
|
||||
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
|
||||
pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
|
||||
rv = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
|
||||
if (unlikely(!mem)) {
|
||||
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(mem->pd != pd)) {
|
||||
pr_warn("siw: fastreg: PD mismatch\n");
|
||||
rv = -EINVAL;
|
||||
|
@ -362,7 +362,7 @@ config IPMMU_VMSA
|
||||
|
||||
config SPAPR_TCE_IOMMU
|
||||
bool "sPAPR TCE IOMMU Support"
|
||||
depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST)
|
||||
depends on PPC_POWERNV || PPC_PSERIES
|
||||
select IOMMU_API
|
||||
help
|
||||
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
||||
@ -457,7 +457,7 @@ config S390_AP_IOMMU
|
||||
|
||||
config MTK_IOMMU
|
||||
bool "MTK IOMMU Support"
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
depends on HAS_DMA
|
||||
depends on ARCH_MEDIATEK || COMPILE_TEST
|
||||
select ARM_DMA_USE_IOMMU
|
||||
select IOMMU_API
|
||||
|
@ -2936,7 +2936,7 @@ static int __init parse_amd_iommu_intr(char *str)
|
||||
{
|
||||
for (; *str; ++str) {
|
||||
if (strncmp(str, "legacy", 6) == 0) {
|
||||
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
|
||||
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
|
||||
break;
|
||||
}
|
||||
if (strncmp(str, "vapic", 5) == 0) {
|
||||
|
@ -371,11 +371,11 @@ int dmar_disabled = 0;
|
||||
int dmar_disabled = 1;
|
||||
#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
|
||||
|
||||
#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
|
||||
#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
|
||||
int intel_iommu_sm = 1;
|
||||
#else
|
||||
int intel_iommu_sm;
|
||||
#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
|
||||
#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
|
||||
|
||||
int intel_iommu_enabled = 0;
|
||||
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
|
||||
|
@ -184,6 +184,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
|
||||
|
||||
static void dev_iommu_free(struct device *dev)
|
||||
{
|
||||
iommu_fwspec_free(dev);
|
||||
kfree(dev->iommu);
|
||||
dev->iommu = NULL;
|
||||
}
|
||||
|
@ -814,8 +814,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
|
||||
qcom_iommu->dev = dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res)
|
||||
if (res) {
|
||||
qcom_iommu->local_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(qcom_iommu->local_base))
|
||||
return PTR_ERR(qcom_iommu->local_base);
|
||||
}
|
||||
|
||||
qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
|
||||
if (IS_ERR(qcom_iommu->iface_clk)) {
|
||||
|
@ -585,10 +585,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
|
||||
|
||||
/* Do we need to select a new pgpath? */
|
||||
pgpath = READ_ONCE(m->current_pgpath);
|
||||
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
|
||||
if (!pgpath || !queue_io)
|
||||
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
|
||||
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
|
||||
|
||||
/* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
|
||||
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
|
||||
|
||||
if ((pgpath && queue_io) ||
|
||||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
|
||||
/* Queue for the daemon to resubmit */
|
||||
|
@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
|
||||
fio->level++;
|
||||
|
||||
if (type == DM_VERITY_BLOCK_TYPE_METADATA)
|
||||
block += v->data_blocks;
|
||||
block = block - v->hash_start + v->data_blocks;
|
||||
|
||||
/*
|
||||
* For RS(M, N), the continuous FEC data is divided into blocks of N
|
||||
|
@ -931,6 +931,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
|
||||
{
|
||||
struct dm_io_region region;
|
||||
struct dm_io_request req;
|
||||
|
||||
region.bdev = wc->ssd_dev->bdev;
|
||||
region.sector = wc->start_sector;
|
||||
region.count = n_sectors;
|
||||
req.bi_op = REQ_OP_READ;
|
||||
req.bi_op_flags = REQ_SYNC;
|
||||
req.mem.type = DM_IO_VMA;
|
||||
req.mem.ptr.vma = (char *)wc->memory_map;
|
||||
req.client = wc->dm_io;
|
||||
req.notify.fn = NULL;
|
||||
|
||||
return dm_io(&req, 1, ®ion, NULL);
|
||||
}
|
||||
|
||||
static void writecache_resume(struct dm_target *ti)
|
||||
{
|
||||
struct dm_writecache *wc = ti->private;
|
||||
@ -941,8 +959,18 @@ static void writecache_resume(struct dm_target *ti)
|
||||
|
||||
wc_lock(wc);
|
||||
|
||||
if (WC_MODE_PMEM(wc))
|
||||
if (WC_MODE_PMEM(wc)) {
|
||||
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
|
||||
} else {
|
||||
r = writecache_read_metadata(wc, wc->metadata_sectors);
|
||||
if (r) {
|
||||
size_t sb_entries_offset;
|
||||
writecache_error(wc, r, "unable to read metadata: %d", r);
|
||||
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
|
||||
memset((char *)wc->memory_map + sb_entries_offset, -1,
|
||||
(wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
|
||||
}
|
||||
}
|
||||
|
||||
wc->tree = RB_ROOT;
|
||||
INIT_LIST_HEAD(&wc->lru);
|
||||
@ -2102,6 +2130,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
ti->error = "Invalid block size";
|
||||
goto bad;
|
||||
}
|
||||
if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
|
||||
wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
|
||||
r = -EINVAL;
|
||||
ti->error = "Block size is smaller than device logical block size";
|
||||
goto bad;
|
||||
}
|
||||
wc->block_size_bits = __ffs(wc->block_size);
|
||||
|
||||
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
|
||||
@ -2200,8 +2234,6 @@ invalid_optional:
|
||||
goto bad;
|
||||
}
|
||||
} else {
|
||||
struct dm_io_region region;
|
||||
struct dm_io_request req;
|
||||
size_t n_blocks, n_metadata_blocks;
|
||||
uint64_t n_bitmap_bits;
|
||||
|
||||
@ -2258,19 +2290,9 @@ invalid_optional:
|
||||
goto bad;
|
||||
}
|
||||
|
||||
region.bdev = wc->ssd_dev->bdev;
|
||||
region.sector = wc->start_sector;
|
||||
region.count = wc->metadata_sectors;
|
||||
req.bi_op = REQ_OP_READ;
|
||||
req.bi_op_flags = REQ_SYNC;
|
||||
req.mem.type = DM_IO_VMA;
|
||||
req.mem.ptr.vma = (char *)wc->memory_map;
|
||||
req.client = wc->dm_io;
|
||||
req.notify.fn = NULL;
|
||||
|
||||
r = dm_io(&req, 1, ®ion, NULL);
|
||||
r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
|
||||
if (r) {
|
||||
ti->error = "Unable to read metadata";
|
||||
ti->error = "Unable to read first block of metadata";
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
|
@ -878,7 +878,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
|
||||
* Issued High Priority Interrupt, and check for card status
|
||||
* until out-of prg-state.
|
||||
*/
|
||||
int mmc_interrupt_hpi(struct mmc_card *card)
|
||||
static int mmc_interrupt_hpi(struct mmc_card *card)
|
||||
{
|
||||
int err;
|
||||
u32 status;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
@ -349,12 +350,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
|
||||
/* CQHCI is idle and should halt immediately, so set a small timeout */
|
||||
#define CQHCI_OFF_TIMEOUT 100
|
||||
|
||||
static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
|
||||
{
|
||||
return cqhci_readl(cq_host, CQHCI_CTL);
|
||||
}
|
||||
|
||||
static void cqhci_off(struct mmc_host *mmc)
|
||||
{
|
||||
struct cqhci_host *cq_host = mmc->cqe_private;
|
||||
ktime_t timeout;
|
||||
bool timed_out;
|
||||
u32 reg;
|
||||
int err;
|
||||
|
||||
if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
|
||||
return;
|
||||
@ -364,15 +369,9 @@ static void cqhci_off(struct mmc_host *mmc)
|
||||
|
||||
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
|
||||
|
||||
timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
|
||||
while (1) {
|
||||
timed_out = ktime_compare(ktime_get(), timeout) > 0;
|
||||
reg = cqhci_readl(cq_host, CQHCI_CTL);
|
||||
if ((reg & CQHCI_HALT) || timed_out)
|
||||
break;
|
||||
}
|
||||
|
||||
if (timed_out)
|
||||
err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
|
||||
reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
|
||||
if (err < 0)
|
||||
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
|
||||
else
|
||||
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
|
||||
|
@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
meson_mx_mmc_start_cmd(mmc, mrq->cmd);
|
||||
}
|
||||
|
||||
static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
|
||||
{
|
||||
struct meson_mx_mmc_host *host = mmc_priv(mmc);
|
||||
u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
|
||||
|
||||
return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
|
||||
}
|
||||
|
||||
static void meson_mx_mmc_read_response(struct mmc_host *mmc,
|
||||
struct mmc_command *cmd)
|
||||
{
|
||||
@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
|
||||
static struct mmc_host_ops meson_mx_mmc_ops = {
|
||||
.request = meson_mx_mmc_request,
|
||||
.set_ios = meson_mx_mmc_set_ios,
|
||||
.card_busy = meson_mx_mmc_card_busy,
|
||||
.get_cd = mmc_gpio_get_cd,
|
||||
.get_ro = mmc_gpio_get_ro,
|
||||
};
|
||||
@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
|
||||
mmc->f_max = clk_round_rate(host->cfg_div_clk,
|
||||
clk_get_rate(host->parent_clk));
|
||||
|
||||
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
|
||||
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
|
||||
mmc->ops = &meson_mx_mmc_ops;
|
||||
|
||||
ret = mmc_of_parse(mmc);
|
||||
|
@ -2087,6 +2087,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
|
||||
goto clk_disable;
|
||||
}
|
||||
|
||||
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
|
||||
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
|
||||
struct sdhci_pci_slot *slot = sdhci_priv(host);
|
||||
struct intel_host *intel_host = sdhci_pci_priv(slot);
|
||||
|
||||
if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
|
||||
return 0;
|
||||
|
||||
return intel_host->drv_strength;
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
|
||||
{
|
||||
/* Wait for 5ms after set 1.8V signal enable bit */
|
||||
usleep_range(5000, 5500);
|
||||
|
||||
/*
|
||||
* For some reason the controller's Host Control2 register reports
|
||||
* the bit representing 1.8V signaling as 0 when read after it was
|
||||
* written as 1. Subsequent read reports 1.
|
||||
*
|
||||
* Since this may cause some issues, do an empty read of the Host
|
||||
* Control2 register here to circumvent this.
|
||||
*/
|
||||
sdhci_readw(host, SDHCI_HOST_CONTROL2);
|
||||
}
|
||||
|
||||
static const struct sdhci_ops sdhci_xenon_ops = {
|
||||
|
@ -3642,6 +3642,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
|
||||
return;
|
||||
out_put_disk:
|
||||
/* prevent double queue cleanup */
|
||||
ns->disk->queue = NULL;
|
||||
put_disk(ns->disk);
|
||||
out_unlink_ns:
|
||||
mutex_lock(&ctrl->subsys->lock);
|
||||
|
@ -3732,6 +3732,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
|
||||
}
|
||||
qla2x00_wait_for_hba_ready(base_vha);
|
||||
|
||||
/*
|
||||
* if UNLOADING flag is already set, then continue unload,
|
||||
* where it was set first.
|
||||
*/
|
||||
if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
|
||||
return;
|
||||
|
||||
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
|
||||
IS_QLA28XX(ha)) {
|
||||
if (ha->flags.fw_started)
|
||||
@ -3750,15 +3757,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
|
||||
|
||||
qla2x00_wait_for_sess_deletion(base_vha);
|
||||
|
||||
/*
|
||||
* if UNLOAD flag is already set, then continue unload,
|
||||
* where it was set first.
|
||||
*/
|
||||
if (test_bit(UNLOADING, &base_vha->dpc_flags))
|
||||
return;
|
||||
|
||||
set_bit(UNLOADING, &base_vha->dpc_flags);
|
||||
|
||||
qla_nvme_delete(base_vha);
|
||||
|
||||
dma_free_coherent(&ha->pdev->dev,
|
||||
@ -4864,6 +4862,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
|
||||
struct qla_work_evt *e;
|
||||
uint8_t bail;
|
||||
|
||||
if (test_bit(UNLOADING, &vha->dpc_flags))
|
||||
return NULL;
|
||||
|
||||
QLA_VHA_MARK_BUSY(vha, bail);
|
||||
if (bail)
|
||||
return NULL;
|
||||
@ -6628,13 +6629,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
|
||||
struct pci_dev *pdev = ha->pdev;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
/*
|
||||
* if UNLOAD flag is already set, then continue unload,
|
||||
* where it was set first.
|
||||
*/
|
||||
if (test_bit(UNLOADING, &base_vha->dpc_flags))
|
||||
return;
|
||||
|
||||
ql_log(ql_log_warn, base_vha, 0x015b,
|
||||
"Disabling adapter.\n");
|
||||
|
||||
@ -6645,9 +6639,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
qla2x00_wait_for_sess_deletion(base_vha);
|
||||
/*
|
||||
* if UNLOADING flag is already set, then continue unload,
|
||||
* where it was set first.
|
||||
*/
|
||||
if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
|
||||
return;
|
||||
|
||||
set_bit(UNLOADING, &base_vha->dpc_flags);
|
||||
qla2x00_wait_for_sess_deletion(base_vha);
|
||||
|
||||
qla2x00_delete_all_vps(ha, base_vha);
|
||||
|
||||
|
@ -2284,6 +2284,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
|
||||
switch (oldstate) {
|
||||
case SDEV_RUNNING:
|
||||
case SDEV_CREATED_BLOCK:
|
||||
case SDEV_QUIESCE:
|
||||
case SDEV_OFFLINE:
|
||||
break;
|
||||
default:
|
||||
|
@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
|
||||
target_to_linux_sector(dev, cmd->t_task_lba),
|
||||
target_to_linux_sector(dev,
|
||||
sbc_get_write_same_sectors(cmd)),
|
||||
GFP_KERNEL, false);
|
||||
GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
|
||||
if (ret)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user