Linux 5.7-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl6vPf8eHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG3ooH/0QNxSzms8GgWzm+
 VVg7c293rMJHKDwkzYTP2N308V7WtrSzEYfoN8We70mvSGPIg2Lc4xmTtuNSvF0r
 tyGYHP75WdSEb+eqcsw4diMz6bPGuNYQPLYOsgIsFeCogiSk8U9MAdOrophWA4MZ
 oCmqVupVjimzE5Tye1SVMH5apvEFzapCT5huOkdvuRQ7x6/pQvnHDm/yJg9++ouV
 ICDRjhP4dLspRzoEykmHR/eiNGUFJj4WoPYGLOYX5tvW9CVtwbGuvWAc4aXaUdHx
 4rxD5DQbvdK41lt5yqLnqsAahLDbdwmON0lEbU4U83wYFlJNOOeMs15wDR8rzVVy
 EH6M0uA=
 =UcvZ
 -----END PGP SIGNATURE-----

Merge tag 'v5.7-rc4' into core

Linux 5.7-rc4
This commit is contained in:
Joerg Roedel 2020-05-13 12:01:33 +02:00
commit ec9b40cffd
162 changed files with 1289 additions and 604 deletions

View File

@ -182,12 +182,15 @@ fix_padding
space-efficient. If this option is not present, large padding is space-efficient. If this option is not present, large padding is
used - that is for compatibility with older kernels. used - that is for compatibility with older kernels.
allow_discards
Allow block discard requests (a.k.a. TRIM) for the integrity device.
Discards are only allowed to devices using internal hash.
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
be changed when reloading the target (load an inactive table and swap the allow_discards can be changed when reloading the target (load an inactive
tables with suspend and resume). The other arguments should not be changed table and swap the tables with suspend and resume). The other arguments
when reloading the target because the layout of disk data depend on them should not be changed when reloading the target because the layout of disk
and the reloaded target would be non-functional. data depend on them and the reloaded target would be non-functional.
The layout of the formatted block device: The layout of the formatted block device:

View File

@ -22,9 +22,7 @@ properties:
const: socionext,uniphier-xdmac const: socionext,uniphier-xdmac
reg: reg:
items: maxItems: 1
- description: XDMAC base register region (offset and length)
- description: XDMAC extension register region (offset and length)
interrupts: interrupts:
maxItems: 1 maxItems: 1
@ -49,12 +47,13 @@ required:
- reg - reg
- interrupts - interrupts
- "#dma-cells" - "#dma-cells"
- dma-channels
examples: examples:
- | - |
xdmac: dma-controller@5fc10000 { xdmac: dma-controller@5fc10000 {
compatible = "socionext,uniphier-xdmac"; compatible = "socionext,uniphier-xdmac";
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>; reg = <0x5fc10000 0x5300>;
interrupts = <0 188 4>; interrupts = <0 188 4>;
#dma-cells = <2>; #dma-cells = <2>;
dma-channels = <16>; dma-channels = <16>;

View File

@ -3657,7 +3657,7 @@ L: linux-btrfs@vger.kernel.org
S: Maintained S: Maintained
W: http://btrfs.wiki.kernel.org/ W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
F: Documentation/filesystems/btrfs.rst F: Documentation/filesystems/btrfs.rst
F: fs/btrfs/ F: fs/btrfs/
F: include/linux/btrfs* F: include/linux/btrfs*

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 7 PATCHLEVEL = 7
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc4
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -32,7 +32,7 @@ UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n KCOV_INSTRUMENT := n
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
ifneq ($(c-gettimeofday-y),) ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)

View File

@ -60,7 +60,7 @@ config RISCV
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX if MMU
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU

View File

@ -102,7 +102,7 @@ void sbi_shutdown(void)
{ {
sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0); sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
} }
EXPORT_SYMBOL(sbi_set_timer); EXPORT_SYMBOL(sbi_shutdown);
/** /**
* sbi_clear_ipi() - Clear any pending IPIs for the calling hart. * sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
@ -113,7 +113,7 @@ void sbi_clear_ipi(void)
{ {
sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0); sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
} }
EXPORT_SYMBOL(sbi_shutdown); EXPORT_SYMBOL(sbi_clear_ipi);
/** /**
* sbi_set_timer_v01() - Program the timer for next timer event. * sbi_set_timer_v01() - Program the timer for next timer event.
@ -167,6 +167,11 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
return result; return result;
} }
static void sbi_set_power_off(void)
{
pm_power_off = sbi_shutdown;
}
#else #else
static void __sbi_set_timer_v01(uint64_t stime_value) static void __sbi_set_timer_v01(uint64_t stime_value)
{ {
@ -191,6 +196,8 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
return 0; return 0;
} }
static void sbi_set_power_off(void) {}
#endif /* CONFIG_RISCV_SBI_V01 */ #endif /* CONFIG_RISCV_SBI_V01 */
static void __sbi_set_timer_v02(uint64_t stime_value) static void __sbi_set_timer_v02(uint64_t stime_value)
@ -540,16 +547,12 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
} }
static void sbi_power_off(void)
{
sbi_shutdown();
}
int __init sbi_init(void) int __init sbi_init(void)
{ {
int ret; int ret;
pm_power_off = sbi_power_off; sbi_set_power_off();
ret = sbi_get_spec_version(); ret = sbi_get_spec_version();
if (ret > 0) if (ret > 0)
sbi_spec_version = ret; sbi_spec_version = ret;

View File

@ -12,6 +12,8 @@
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
register unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
struct stackframe { struct stackframe {
@ -19,8 +21,6 @@ struct stackframe {
unsigned long ra; unsigned long ra;
}; };
register unsigned long sp_in_global __asm__("sp");
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(unsigned long, void *), void *arg) bool (*fn)(unsigned long, void *), void *arg)
{ {

View File

@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold) $(call if_changed,vdsold)
# We also create a special relocatable object that should mirror the symbol # We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld -R we can then refer to # table and layout of the linked DSO. With ld --just-symbols we can then
# these symbols in the kernel code rather than hand-coded addresses. # refer to these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id -Wl,--hash-style=both -Wl,--build-id -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold) $(call if_changed,vdsold)
LDFLAGS_vdso-syms.o := -r -R LDFLAGS_vdso-syms.o := -r --just-symbols
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
$(call if_changed,ld) $(call if_changed,ld)

View File

@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
{ {
mm_segment_t old_fs; mm_segment_t old_fs;
unsigned long asce, cr; unsigned long asce, cr;
unsigned long flags;
old_fs = current->thread.mm_segment; old_fs = current->thread.mm_segment;
if (old_fs & 1) if (old_fs & 1)
return old_fs; return old_fs;
/* protect against a concurrent page table upgrade */
local_irq_save(flags);
current->thread.mm_segment |= 1; current->thread.mm_segment |= 1;
asce = S390_lowcore.kernel_asce; asce = S390_lowcore.kernel_asce;
if (likely(old_fs == USER_DS)) { if (likely(old_fs == USER_DS)) {
@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
__ctl_load(asce, 7, 7); __ctl_load(asce, 7, 7);
set_cpu_flag(CIF_ASCE_SECONDARY); set_cpu_flag(CIF_ASCE_SECONDARY);
} }
local_irq_restore(flags);
return old_fs; return old_fs;
} }
EXPORT_SYMBOL(enable_sacf_uaccess); EXPORT_SYMBOL(enable_sacf_uaccess);

View File

@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg)
{ {
struct mm_struct *mm = arg; struct mm_struct *mm = arg;
if (current->active_mm == mm) /* we must change all active ASCEs to avoid the creation of new TLBs */
set_user_asce(mm); if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment == USER_DS) {
__ctl_load(S390_lowcore.user_asce, 1, 1);
/* Mark user-ASCE present in CR1 */
clear_cpu_flag(CIF_ASCE_PRIMARY);
}
if (current->thread.mm_segment == USER_DS_SACF) {
__ctl_load(S390_lowcore.user_asce, 7, 7);
/* enable_sacf_uaccess does all or nothing */
WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
}
}
__tlb_flush_local(); __tlb_flush_local();
} }

View File

@ -73,7 +73,8 @@ static int hv_cpu_init(unsigned int cpu)
struct page *pg; struct page *pg;
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
pg = alloc_page(GFP_KERNEL); /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
if (unlikely(!pg)) if (unlikely(!pg))
return -ENOMEM; return -ENOMEM;
*input_arg = page_address(pg); *input_arg = page_address(pg);
@ -254,6 +255,7 @@ static int __init hv_pci_init(void)
static int hv_suspend(void) static int hv_suspend(void)
{ {
union hv_x64_msr_hypercall_contents hypercall_msr; union hv_x64_msr_hypercall_contents hypercall_msr;
int ret;
/* /*
* Reset the hypercall page as it is going to be invalidated * Reset the hypercall page as it is going to be invalidated
@ -270,12 +272,17 @@ static int hv_suspend(void)
hypercall_msr.enable = 0; hypercall_msr.enable = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
return 0; ret = hv_cpu_die(0);
return ret;
} }
static void hv_resume(void) static void hv_resume(void)
{ {
union hv_x64_msr_hypercall_contents hypercall_msr; union hv_x64_msr_hypercall_contents hypercall_msr;
int ret;
ret = hv_cpu_init(0);
WARN_ON(ret);
/* Re-enable the hypercall page */ /* Re-enable the hypercall page */
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@ -288,6 +295,7 @@ static void hv_resume(void)
hv_hypercall_pg_saved = NULL; hv_hypercall_pg_saved = NULL;
} }
/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
static struct syscore_ops hv_syscore_ops = { static struct syscore_ops hv_syscore_ops = {
.suspend = hv_suspend, .suspend = hv_suspend,
.resume = hv_resume, .resume = hv_resume,

View File

@ -35,6 +35,8 @@ typedef int (*hyperv_fill_flush_list_func)(
rdmsrl(HV_X64_MSR_SINT0 + int_num, val) rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
#define hv_set_synint_state(int_num, val) \ #define hv_set_synint_state(int_num, val) \
wrmsrl(HV_X64_MSR_SINT0 + int_num, val) wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
#define hv_recommend_using_aeoi() \
(!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
#define hv_get_crash_ctl(val) \ #define hv_get_crash_ctl(val) \
rdmsrl(HV_X64_MSR_CRASH_CTL, val) rdmsrl(HV_X64_MSR_CRASH_CTL, val)

View File

@ -496,7 +496,7 @@ int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
if (!disk_part_scan_enabled(disk)) if (!disk_part_scan_enabled(disk))
return 0; return 0;
if (bdev->bd_part_count || bdev->bd_openers > 1) if (bdev->bd_part_count)
return -EBUSY; return -EBUSY;
res = invalidate_partition(disk, 0); res = invalidate_partition(disk, 0);
if (res) if (res)

View File

@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
end: end:
if (result) { if (result) {
dev_warn(&device->dev, "Failed to change power state to %s\n", dev_warn(&device->dev, "Failed to change power state to %s\n",
acpi_power_state_string(state)); acpi_power_state_string(target_state));
} else { } else {
device->power.state = target_state; device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n", "Device [%s] transitioned to %s\n",
device->pnp.bus_id, device->pnp.bus_id,
acpi_power_state_string(state))); acpi_power_state_string(target_state)));
} }
return result; return result;

View File

@ -1059,7 +1059,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
update_turbo_state(); update_turbo_state();
if (global.turbo_disabled) { if (global.turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
return -EPERM; return -EPERM;

View File

@ -963,10 +963,12 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
struct aead_edesc *edesc; struct aead_edesc *edesc;
int ecode = 0; int ecode = 0;
bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = rctx->edesc; edesc = rctx->edesc;
has_bklog = edesc->bklog;
if (err) if (err)
ecode = caam_jr_strstatus(jrdev, err); ecode = caam_jr_strstatus(jrdev, err);
@ -979,7 +981,7 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
* If no backlog flag, the completion of the request is done * If no backlog flag, the completion of the request is done
* by CAAM, not crypto engine. * by CAAM, not crypto engine.
*/ */
if (!edesc->bklog) if (!has_bklog)
aead_request_complete(req, ecode); aead_request_complete(req, ecode);
else else
crypto_finalize_aead_request(jrp->engine, req, ecode); crypto_finalize_aead_request(jrp->engine, req, ecode);
@ -995,10 +997,12 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
int ivsize = crypto_skcipher_ivsize(skcipher); int ivsize = crypto_skcipher_ivsize(skcipher);
int ecode = 0; int ecode = 0;
bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = rctx->edesc; edesc = rctx->edesc;
has_bklog = edesc->bklog;
if (err) if (err)
ecode = caam_jr_strstatus(jrdev, err); ecode = caam_jr_strstatus(jrdev, err);
@ -1028,7 +1032,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
* If no backlog flag, the completion of the request is done * If no backlog flag, the completion of the request is done
* by CAAM, not crypto engine. * by CAAM, not crypto engine.
*/ */
if (!edesc->bklog) if (!has_bklog)
skcipher_request_complete(req, ecode); skcipher_request_complete(req, ecode);
else else
crypto_finalize_skcipher_request(jrp->engine, req, ecode); crypto_finalize_skcipher_request(jrp->engine, req, ecode);
@ -1711,7 +1715,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
if (ivsize || mapped_dst_nents > 1) if (ivsize || mapped_dst_nents > 1)
sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
mapped_dst_nents); mapped_dst_nents - 1 + !!ivsize);
if (sec4_sg_bytes) { if (sec4_sg_bytes) {
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,

View File

@ -583,10 +583,12 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int ecode = 0; int ecode = 0;
bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = state->edesc; edesc = state->edesc;
has_bklog = edesc->bklog;
if (err) if (err)
ecode = caam_jr_strstatus(jrdev, err); ecode = caam_jr_strstatus(jrdev, err);
@ -603,7 +605,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
* If no backlog flag, the completion of the request is done * If no backlog flag, the completion of the request is done
* by CAAM, not crypto engine. * by CAAM, not crypto engine.
*/ */
if (!edesc->bklog) if (!has_bklog)
req->base.complete(&req->base, ecode); req->base.complete(&req->base, ecode);
else else
crypto_finalize_hash_request(jrp->engine, req, ecode); crypto_finalize_hash_request(jrp->engine, req, ecode);
@ -632,10 +634,12 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
int ecode = 0; int ecode = 0;
bool has_bklog;
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = state->edesc; edesc = state->edesc;
has_bklog = edesc->bklog;
if (err) if (err)
ecode = caam_jr_strstatus(jrdev, err); ecode = caam_jr_strstatus(jrdev, err);
@ -663,7 +667,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
* If no backlog flag, the completion of the request is done * If no backlog flag, the completion of the request is done
* by CAAM, not crypto engine. * by CAAM, not crypto engine.
*/ */
if (!edesc->bklog) if (!has_bklog)
req->base.complete(&req->base, ecode); req->base.complete(&req->base, ecode);
else else
crypto_finalize_hash_request(jrp->engine, req, ecode); crypto_finalize_hash_request(jrp->engine, req, ecode);

View File

@ -121,11 +121,13 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct rsa_edesc *edesc; struct rsa_edesc *edesc;
int ecode = 0; int ecode = 0;
bool has_bklog;
if (err) if (err)
ecode = caam_jr_strstatus(dev, err); ecode = caam_jr_strstatus(dev, err);
edesc = req_ctx->edesc; edesc = req_ctx->edesc;
has_bklog = edesc->bklog;
rsa_pub_unmap(dev, edesc, req); rsa_pub_unmap(dev, edesc, req);
rsa_io_unmap(dev, edesc, req); rsa_io_unmap(dev, edesc, req);
@ -135,7 +137,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
* If no backlog flag, the completion of the request is done * If no backlog flag, the completion of the request is done
* by CAAM, not crypto engine. * by CAAM, not crypto engine.
*/ */
if (!edesc->bklog) if (!has_bklog)
akcipher_request_complete(req, ecode); akcipher_request_complete(req, ecode);
else else
crypto_finalize_akcipher_request(jrp->engine, req, ecode); crypto_finalize_akcipher_request(jrp->engine, req, ecode);
@ -152,11 +154,13 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc; struct rsa_edesc *edesc;
int ecode = 0; int ecode = 0;
bool has_bklog;
if (err) if (err)
ecode = caam_jr_strstatus(dev, err); ecode = caam_jr_strstatus(dev, err);
edesc = req_ctx->edesc; edesc = req_ctx->edesc;
has_bklog = edesc->bklog;
switch (key->priv_form) { switch (key->priv_form) {
case FORM1: case FORM1:
@ -176,7 +180,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
* If no backlog flag, the completion of the request is done * If no backlog flag, the completion of the request is done
* by CAAM, not crypto engine. * by CAAM, not crypto engine.
*/ */
if (!edesc->bklog) if (!has_bklog)
akcipher_request_complete(req, ecode); akcipher_request_complete(req, ecode);
else else
crypto_finalize_akcipher_request(jrp->engine, req, ecode); crypto_finalize_akcipher_request(jrp->engine, req, ecode);

View File

@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
return ret; return ret;
case DMA_BUF_SET_NAME: case DMA_BUF_SET_NAME_A:
case DMA_BUF_SET_NAME_B:
return dma_buf_set_name(dmabuf, (const char __user *)arg); return dma_buf_set_name(dmabuf, (const char __user *)arg);
default: default:
@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* calls attach() of dma_buf_ops to allow device-specific attach functionality * calls attach() of dma_buf_ops to allow device-specific attach functionality
* @dmabuf: [in] buffer to attach device to. * @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached. * @dev: [in] device to be attached.
* @importer_ops [in] importer operations for the attachment * @importer_ops: [in] importer operations for the attachment
* @importer_priv [in] importer private pointer for the attachment * @importer_priv: [in] importer private pointer for the attachment
* *
* Returns struct dma_buf_attachment pointer for this attachment. Attachments * Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach(). * must be cleaned up by calling dma_buf_detach().

View File

@ -241,7 +241,8 @@ config FSL_RAID
config HISI_DMA config HISI_DMA
tristate "HiSilicon DMA Engine support" tristate "HiSilicon DMA Engine support"
depends on ARM64 || (COMPILE_TEST && PCI_MSI) depends on ARM64 || COMPILE_TEST
depends on PCI_MSI
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
help help

View File

@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
struct dma_chan_dev *chan_dev; struct dma_chan_dev *chan_dev;
chan_dev = container_of(dev, typeof(*chan_dev), device); chan_dev = container_of(dev, typeof(*chan_dev), device);
if (atomic_dec_and_test(chan_dev->idr_ref)) {
ida_free(&dma_ida, chan_dev->dev_id);
kfree(chan_dev->idr_ref);
}
kfree(chan_dev); kfree(chan_dev);
} }
@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
} }
static int __dma_async_device_channel_register(struct dma_device *device, static int __dma_async_device_channel_register(struct dma_device *device,
struct dma_chan *chan, struct dma_chan *chan)
int chan_id)
{ {
int rc = 0; int rc = 0;
int chancnt = device->chancnt;
atomic_t *idr_ref;
struct dma_chan *tchan;
tchan = list_first_entry_or_null(&device->channels,
struct dma_chan, device_node);
if (!tchan)
return -ENODEV;
if (tchan->dev) {
idr_ref = tchan->dev->idr_ref;
} else {
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
atomic_set(idr_ref, 0);
}
chan->local = alloc_percpu(typeof(*chan->local)); chan->local = alloc_percpu(typeof(*chan->local));
if (!chan->local) if (!chan->local)
@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding * When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating. * the channel. Otherwise we are static enumerating.
*/ */
chan->chan_id = chan_id < 0 ? chancnt : chan_id; mutex_lock(&device->chan_mutex);
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
mutex_unlock(&device->chan_mutex);
if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
goto err_out;
}
chan->dev->device.class = &dma_devclass; chan->dev->device.class = &dma_devclass;
chan->dev->device.parent = device->dev; chan->dev->device.parent = device->dev;
chan->dev->chan = chan; chan->dev->chan = chan;
chan->dev->idr_ref = idr_ref;
chan->dev->dev_id = device->dev_id; chan->dev->dev_id = device->dev_id;
atomic_inc(idr_ref);
dev_set_name(&chan->dev->device, "dma%dchan%d", dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id); device->dev_id, chan->chan_id);
rc = device_register(&chan->dev->device); rc = device_register(&chan->dev->device);
if (rc) if (rc)
goto err_out; goto err_out_ida;
chan->client_count = 0; chan->client_count = 0;
device->chancnt = chan->chan_id + 1; device->chancnt++;
return 0; return 0;
err_out_ida:
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
err_out: err_out:
free_percpu(chan->local); free_percpu(chan->local);
kfree(chan->dev); kfree(chan->dev);
if (atomic_dec_return(idr_ref) == 0)
kfree(idr_ref);
return rc; return rc;
} }
@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
{ {
int rc; int rc;
rc = __dma_async_device_channel_register(device, chan, -1); rc = __dma_async_device_channel_register(device, chan);
if (rc < 0) if (rc < 0)
return rc; return rc;
@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--; device->chancnt--;
chan->dev->chan = NULL; chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device); device_unregister(&chan->dev->device);
free_percpu(chan->local); free_percpu(chan->local);
} }
@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
*/ */
int dma_async_device_register(struct dma_device *device) int dma_async_device_register(struct dma_device *device)
{ {
int rc, i = 0; int rc;
struct dma_chan* chan; struct dma_chan* chan;
if (!device) if (!device)
@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0) if (rc != 0)
return rc; return rc;
mutex_init(&device->chan_mutex);
ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */ /* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) { list_for_each_entry(chan, &device->channels, device_node) {
rc = __dma_async_device_channel_register(device, chan, i++); rc = __dma_async_device_channel_register(device, chan);
if (rc < 0) if (rc < 0)
goto err_out; goto err_out;
} }
@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
*/ */
dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance(); dma_channel_rebalance();
ida_free(&dma_ida, device->dev_id);
dma_device_put(device); dma_device_put(device);
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
} }

View File

@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
struct dmatest_thread *thread; struct dmatest_thread *thread;
list_for_each_entry(thread, &dtc->threads, node) { list_for_each_entry(thread, &dtc->threads, node) {
if (!thread->done) if (!thread->done && !thread->pending)
return true; return true;
} }
} }
@ -662,8 +662,8 @@ static int dmatest_func(void *data)
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get(); ktime = ktime_get();
while (!kthread_should_stop() while (!(kthread_should_stop() ||
&& !(params->iterations && total_tests >= params->iterations)) { (params->iterations && total_tests >= params->iterations))) {
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um; struct dmaengine_unmap_data *um;
dma_addr_t *dsts; dma_addr_t *dsts;

View File

@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size); size);
tdmac->desc_arr = NULL; tdmac->desc_arr = NULL;
if (tdmac->status == DMA_ERROR)
tdmac->status = DMA_COMPLETE;
return; return;
} }
@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc) if (!desc)
goto err_out; goto err_out;
mmp_tdma_config_write(chan, direction, &tdmac->slave_config); if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
goto err_out;
while (buf < buf_len) { while (buf < buf_len) {
desc = &tdmac->desc_arr[i]; desc = &tdmac->desc_arr[i];

View File

@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
} }
pci_set_master(pdev); pci_set_master(pdev);
pd->dma.dev = &pdev->dev;
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
if (err) { if (err) {
@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_free_irq; goto err_free_irq;
} }
pd->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&pd->dma.channels); INIT_LIST_HEAD(&pd->dma.channels);

View File

@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
static void tegra_dma_synchronize(struct dma_chan *dc) static void tegra_dma_synchronize(struct dma_chan *dc)
{ {
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int err;
err = pm_runtime_get_sync(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
return;
}
/* /*
* CPU, which handles interrupt, could be busy in * CPU, which handles interrupt, could be busy in
@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
tasklet_kill(&tdc->tasklet); tasklet_kill(&tdc->tasklet);
pm_runtime_put(tdc->tdma->dev);
} }
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc, static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,

View File

@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
soc_ep_map = &j721e_ep_map; soc_ep_map = &j721e_ep_map;
} else { } else {
pr_err("PSIL: No compatible machine found for map\n"); pr_err("PSIL: No compatible machine found for map\n");
mutex_unlock(&ep_map_mutex);
return ERR_PTR(-ENOTSUPP); return ERR_PTR(-ENOTSUPP);
} }
pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);

View File

@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
return ret; return ret;
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
if (!list_empty(&chan->active_list)) {
desc = list_last_entry(&chan->active_list, desc = list_last_entry(&chan->active_list,
struct xilinx_dma_tx_descriptor, node); struct xilinx_dma_tx_descriptor, node);
/* /*
* VDMA and simple mode do not support residue reporting, so the * VDMA and simple mode do not support residue reporting, so the
* residue field will always be 0. * residue field will always be 0.
*/ */
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
residue = xilinx_dma_get_residue(chan, desc); residue = xilinx_dma_get_residue(chan, desc);
}
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
dma_set_residue(txstate, residue); dma_set_residue(txstate, residue);

View File

@ -85,9 +85,10 @@
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
* - 3.36.0 - Allow reading more status registers on si/cik * - 3.36.0 - Allow reading more status registers on si/cik
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 36 #define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;

View File

@ -73,6 +73,22 @@
#define SDMA_OP_AQL_COPY 0 #define SDMA_OP_AQL_COPY 0
#define SDMA_OP_AQL_BARRIER_OR 0 #define SDMA_OP_AQL_BARRIER_OR 0
#define SDMA_GCR_RANGE_IS_PA (1 << 18)
#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
#define SDMA_GCR_GL2_WB (1 << 15)
#define SDMA_GCR_GL2_INV (1 << 14)
#define SDMA_GCR_GL2_DISCARD (1 << 13)
#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
#define SDMA_GCR_GL2_US (1 << 10)
#define SDMA_GCR_GL1_INV (1 << 9)
#define SDMA_GCR_GLV_INV (1 << 8)
#define SDMA_GCR_GLK_INV (1 << 7)
#define SDMA_GCR_GLK_WB (1 << 6)
#define SDMA_GCR_GLM_INV (1 << 5)
#define SDMA_GCR_GLM_WB (1 << 4)
#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
/*define for op field*/ /*define for op field*/
#define SDMA_PKT_HEADER_op_offset 0 #define SDMA_PKT_HEADER_op_offset 0
#define SDMA_PKT_HEADER_op_mask 0x000000FF #define SDMA_PKT_HEADER_op_mask 0x000000FF

View File

@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job); unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
/* Invalidate L2, because if we don't do it, we might get stale cache
* lines from previous IBs.
*/
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
SDMA_GCR_GL2_WB |
SDMA_GCR_GLM_INV |
SDMA_GCR_GLM_WB) << 16);
amdgpu_ring_write(ring, 0xffffff80);
amdgpu_ring_write(ring, 0xffff);
/* An IB packet must end on a 8 DW boundary--the next dword /* An IB packet must end on a 8 DW boundary--the next dword
* must be on a 8-dword boundary. Our IB packet below is 6 * must be on a 8-dword boundary. Our IB packet below is 6
* dwords long, thus add x number of NOPs, such that, in * dwords long, thus add x number of NOPs, such that, in
@ -1595,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib, .emit_ib = sdma_v5_0_ring_emit_ib,
.emit_fence = sdma_v5_0_ring_emit_fence, .emit_fence = sdma_v5_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,

View File

@ -3340,7 +3340,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
const union dc_tiling_info *tiling_info, const union dc_tiling_info *tiling_info,
const uint64_t info, const uint64_t info,
struct dc_plane_dcc_param *dcc, struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address) struct dc_plane_address *address,
bool force_disable_dcc)
{ {
struct dc *dc = adev->dm.dc; struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input; struct dc_dcc_surface_param input;
@ -3352,6 +3353,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&input, 0, sizeof(input)); memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output)); memset(&output, 0, sizeof(output));
if (force_disable_dcc)
return 0;
if (!offset) if (!offset)
return 0; return 0;
@ -3401,7 +3405,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
union dc_tiling_info *tiling_info, union dc_tiling_info *tiling_info,
struct plane_size *plane_size, struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc, struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address) struct dc_plane_address *address,
bool force_disable_dcc)
{ {
const struct drm_framebuffer *fb = &afb->base; const struct drm_framebuffer *fb = &afb->base;
int ret; int ret;
@ -3507,7 +3512,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = fill_plane_dcc_attributes(adev, afb, format, rotation, ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info, plane_size, tiling_info,
tiling_flags, dcc, address); tiling_flags, dcc, address,
force_disable_dcc);
if (ret) if (ret)
return ret; return ret;
} }
@ -3599,7 +3605,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state, const struct drm_plane_state *plane_state,
const uint64_t tiling_flags, const uint64_t tiling_flags,
struct dc_plane_info *plane_info, struct dc_plane_info *plane_info,
struct dc_plane_address *address) struct dc_plane_address *address,
bool force_disable_dcc)
{ {
const struct drm_framebuffer *fb = plane_state->fb; const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb = const struct amdgpu_framebuffer *afb =
@ -3681,7 +3688,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->rotation, tiling_flags, plane_info->rotation, tiling_flags,
&plane_info->tiling_info, &plane_info->tiling_info,
&plane_info->plane_size, &plane_info->plane_size,
&plane_info->dcc, address); &plane_info->dcc, address,
force_disable_dcc);
if (ret) if (ret)
return ret; return ret;
@ -3704,6 +3712,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_info plane_info; struct dc_plane_info plane_info;
uint64_t tiling_flags; uint64_t tiling_flags;
int ret; int ret;
bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info); ret = fill_dc_scaling_info(plane_state, &scaling_info);
if (ret) if (ret)
@ -3718,9 +3727,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
if (ret) if (ret)
return ret; return ret;
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info, &plane_info,
&dc_plane_state->address); &dc_plane_state->address,
force_disable_dcc);
if (ret) if (ret)
return ret; return ret;
@ -5342,6 +5353,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
uint64_t tiling_flags; uint64_t tiling_flags;
uint32_t domain; uint32_t domain;
int r; int r;
bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_old = to_dm_plane_state(plane->state);
dm_plane_state_new = to_dm_plane_state(new_state); dm_plane_state_new = to_dm_plane_state(new_state);
@ -5400,11 +5412,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
fill_plane_buffer_attributes( fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation, adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info, tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc, &plane_state->plane_size, &plane_state->dcc,
&plane_state->address); &plane_state->address,
force_disable_dcc);
} }
return 0; return 0;
@ -6676,7 +6690,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_plane_info_and_addr( fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags, dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count], &bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address); &bundle->flip_addrs[planes_count].address,
false);
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
bundle->plane_infos[planes_count].dcc.enable);
bundle->surface_updates[planes_count].plane_info = bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count]; &bundle->plane_infos[planes_count];
@ -8096,7 +8115,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
ret = fill_dc_plane_info_and_addr( ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags, dm->adev, new_plane_state, tiling_flags,
plane_info, plane_info,
&flip_addr->address); &flip_addr->address,
false);
if (ret) if (ret)
goto cleanup; goto cleanup;

View File

@ -2908,6 +2908,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
sizeof(hpd_irq_dpcd_data), sizeof(hpd_irq_dpcd_data),
"Status: "); "Status: ");
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
link->dc->hwss.blank_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
@ -2927,6 +2933,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_reallocate_mst_payload(link); dc_link_reallocate_mst_payload(link);
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
}
status = false; status = false;
if (out_link_loss) if (out_link_loss)
*out_link_loss = true; *out_link_loss = true;
@ -4227,6 +4239,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link) void dpcd_set_source_specific_data(struct dc_link *link)
{ {
const uint32_t post_oui_delay = 30; // 30ms const uint32_t post_oui_delay = 30; // 30ms
uint8_t dspc = 0;
enum dc_status ret = DC_ERROR_UNEXPECTED;
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
sizeof(dspc));
if (ret != DC_OK) {
DC_LOG_ERROR("Error in DP aux read transaction,"
" not writing source specific data\n");
return;
}
/* Return if OUI unsupported */
if (!(dspc & DP_OUI_SUPPORT))
return;
if (!link->dc->vendor_signature.is_valid) { if (!link->dc->vendor_signature.is_valid) {
struct dpcd_amd_signature amd_signature; struct dpcd_amd_signature amd_signature;

View File

@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream); return dc_stream_get_status_from_state(dc->current_state, stream);
} }
static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
{
#if defined(CONFIG_DRM_AMD_DC_DCN)
unsigned int vupdate_line;
unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int us_per_line;
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
return;
if (vpos >= vupdate_line)
return;
us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
lines_to_vupdate = vupdate_line - vpos;
us_to_vupdate = lines_to_vupdate * us_per_line;
/* 70 us is a conservative estimate of cursor update time*/
if (us_to_vupdate < 70)
udelay(us_to_vupdate);
}
#endif
}
/** /**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) { if (!pipe_to_program) {
pipe_to_program = pipe_ctx; pipe_to_program = pipe_ctx;
dc->hwss.cursor_lock(dc, pipe_to_program, true);
delay_cursor_until_vupdate(pipe_ctx, dc);
dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
} }
dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx);
@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
} }
if (pipe_to_program) if (pipe_to_program)
dc->hwss.pipe_control_lock(dc, pipe_to_program, false); dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true; return true;
} }
@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) { if (!pipe_to_program) {
pipe_to_program = pipe_ctx; pipe_to_program = pipe_ctx;
dc->hwss.cursor_lock(dc, pipe_to_program, true);
delay_cursor_until_vupdate(pipe_ctx, dc);
dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
} }
dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx);
} }
if (pipe_to_program) if (pipe_to_program)
dc->hwss.pipe_control_lock(dc, pipe_to_program, false); dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true; return true;
} }

View File

@ -2757,6 +2757,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_plane = dce110_power_down_fe, .disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock, .pipe_control_lock = dce_pipe_control_lock,
.interdependent_update_lock = NULL, .interdependent_update_lock = NULL,
.cursor_lock = dce_pipe_control_lock,
.prepare_bandwidth = dce110_prepare_bandwidth, .prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth, .optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr, .set_drr = set_drr,

View File

@ -1625,6 +1625,16 @@ void dcn10_pipe_control_lock(
hws->funcs.verify_allow_pstate_change_high(dc); hws->funcs.verify_allow_pstate_change_high(dc);
} }
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
{
/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
if (!pipe || pipe->top_pipe)
return;
dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
pipe->stream_res.opp->inst, lock);
}
static bool wait_for_reset_trigger_to_occur( static bool wait_for_reset_trigger_to_occur(
struct dc_context *dc_ctx, struct dc_context *dc_ctx,
struct timing_generator *tg) struct timing_generator *tg)

View File

@ -49,6 +49,7 @@ void dcn10_pipe_control_lock(
struct dc *dc, struct dc *dc,
struct pipe_ctx *pipe, struct pipe_ctx *pipe,
bool lock); bool lock);
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
void dcn10_blank_pixel_data( void dcn10_blank_pixel_data(
struct dc *dc, struct dc *dc,
struct pipe_ctx *pipe_ctx, struct pipe_ctx *pipe_ctx,

View File

@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_audio_stream = dce110_disable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane, .disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock, .pipe_control_lock = dcn10_pipe_control_lock,
.cursor_lock = dcn10_cursor_lock,
.interdependent_update_lock = dcn10_lock_all_pipes, .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth, .prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth, .optimize_bandwidth = dcn10_optimize_bandwidth,

View File

@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
/* Configure VUPDATE lock set for this MPCC to map to the OPP */
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
/* update mpc tree mux setting */ /* update mpc tree mux setting */
if (tree->opp_list == insert_above_mpcc) { if (tree->opp_list == insert_above_mpcc) {
/* insert the toppest mpcc */ /* insert the toppest mpcc */
@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
/* mark this mpcc as not in use */ /* mark this mpcc as not in use */
mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id); mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
} }
} }
@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
} }
@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
MPCC_BUSY, &s->busy); MPCC_BUSY, &s->busy);
} }
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
}
static const struct mpc_funcs dcn10_mpc_funcs = { static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state, .read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane, .insert_plane = mpc1_insert_plane,
@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.update_blending = mpc1_update_blending, .update_blending = mpc1_update_blending,
.cursor_lock = mpc1_cursor_lock,
.set_denorm = NULL, .set_denorm = NULL,
.set_denorm_clamp = NULL, .set_denorm_clamp = NULL,
.set_output_csc = NULL, .set_output_csc = NULL,

View File

@ -39,11 +39,12 @@
SRII(MPCC_BG_G_Y, MPCC, inst),\ SRII(MPCC_BG_G_Y, MPCC, inst),\
SRII(MPCC_BG_R_CR, MPCC, inst),\ SRII(MPCC_BG_R_CR, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\ SRII(MPCC_BG_B_CB, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\ SRII(MPCC_SM_CONTROL, MPCC, inst),\
SRII(MPCC_SM_CONTROL, MPCC, inst) SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \ #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MUX, MPC_OUT, inst) SRII(MUX, MPC_OUT, inst),\
VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
#define MPC_COMMON_REG_VARIABLE_LIST \ #define MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_SEL[MAX_MPCC]; \ uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
@ -55,7 +56,9 @@
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \ uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \ uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \ uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
uint32_t MUX[MAX_OPP]; uint32_t MUX[MAX_OPP]; \
uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
uint32_t CUR[MAX_OPP];
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\ SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
@ -78,7 +81,8 @@
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh) SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
#define MPC_REG_FIELD_LIST(type) \ #define MPC_REG_FIELD_LIST(type) \
type MPCC_TOP_SEL;\ type MPCC_TOP_SEL;\
@ -101,7 +105,9 @@
type MPCC_SM_FIELD_ALT;\ type MPCC_SM_FIELD_ALT;\
type MPCC_SM_FORCE_NEXT_FRAME_POL;\ type MPCC_SM_FORCE_NEXT_FRAME_POL;\
type MPCC_SM_FORCE_NEXT_TOP_POL;\ type MPCC_SM_FORCE_NEXT_TOP_POL;\
type MPC_OUT_MUX; type MPC_OUT_MUX;\
type MPCC_UPDATE_LOCK_SEL;\
type CUR_VUPDATE_LOCK_SET;
struct dcn_mpc_registers { struct dcn_mpc_registers {
MPC_COMMON_REG_VARIABLE_LIST MPC_COMMON_REG_VARIABLE_LIST
@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
int mpcc_inst, int mpcc_inst,
struct mpcc_state *s); struct mpcc_state *s);
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
#endif #endif

View File

@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## 0 ## _ ## block ## id
/* set field/register/bitfield name */
#define SFRB(field_name, reg_name, bitfield, post_fix)\
.field_name = reg_name ## __ ## bitfield ## post_fix
/* NBIO */ /* NBIO */
#define NBIO_BASE_INNER(seg) \ #define NBIO_BASE_INNER(seg) \
NBIF_BASE__INST0_SEG ## seg NBIF_BASE__INST0_SEG ## seg
@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
}; };
static const struct dcn_mpc_shift mpc_shift = { static const struct dcn_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
}; };
static const struct dcn_mpc_mask mpc_mask = { static const struct dcn_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
}; };
#define tg_regs(id)\ #define tg_regs(id)\

View File

@ -2294,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
REG_WRITE(REFCLK_CNTL, 0); if (REG(REFCLK_CNTL))
REG_WRITE(REFCLK_CNTL, 0);
// //

View File

@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_plane = dcn20_disable_plane, .disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock, .pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes, .interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth, .prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth, .update_bandwidth = dcn20_update_bandwidth,

View File

@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.mpc_init = mpc1_mpc_init, .mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst, .mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending, .update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp, .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc, .wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,

View File

@ -179,7 +179,8 @@
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
/* /*
* DCN2 MPC_OCSC debug status register: * DCN2 MPC_OCSC debug status register:

View File

@ -508,6 +508,10 @@ enum dcn20_clk_src_array_id {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## _ ## block ## id
/* NBIO */ /* NBIO */
#define NBIO_BASE_INNER(seg) \ #define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg NBIO_BASE__INST0_SEG ## seg

View File

@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.disable_plane = dcn20_disable_plane, .disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock, .pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes, .interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth, .prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth, .update_bandwidth = dcn20_update_bandwidth,

View File

@ -284,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.dram_channel_width_bytes = 4, .dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32, .fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5, .dcn_downspread_percent = 0.5,
.downspread_percent = 0.5, .downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0, .dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5, .dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192, .dram_return_buffer_per_channel_bytes = 8192,
@ -340,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name mm ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
mm ## reg_name ## _ ## block ## id
/* NBIO */ /* NBIO */
#define NBIO_BASE_INNER(seg) \ #define NBIO_BASE_INNER(seg) \
NBIF0_BASE__INST0_SEG ## seg NBIF0_BASE__INST0_SEG ## seg
@ -1374,64 +1378,49 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
{ {
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table; struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, j, k; struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
int closest_clk_lvl; unsigned int i, j, closest_clk_lvl;
// Default clock levels are used for diags, which may lead to overclocking. // Default clock levels are used for diags, which may lead to overclocking.
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) { if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels; dcn2_1_soc.num_chans = bw_params->num_channels;
/* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */ ASSERT(clk_table->num_entries);
dcn2_1_soc.clock_limits[0].state = 0; for (i = 0; i < clk_table->num_entries; i++) {
dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz; /* loop backwards*/
dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz; for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
/*
* Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
* as indicator
*/
closest_clk_lvl = -1;
/* index currently being filled */
k = 1;
for (i = 1; i < clk_table->num_entries; i++) {
/* loop backwards, skip duplicate state*/
for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j; closest_clk_lvl = j;
break; break;
} }
} }
/* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/ clock_limits[i].state = i;
if (closest_clk_lvl != -1) { clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[k].state = i; clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz; clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
k++; }
} for (i = 0; i < clk_table->num_entries; i++)
dcn2_1_soc.clock_limits[i] = clock_limits[i];
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries;
/* duplicate last level */
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
} }
dcn2_1_soc.num_states = k;
} }
/* duplicate last level */
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
} }

View File

@ -210,6 +210,22 @@ struct mpc_funcs {
struct mpcc_blnd_cfg *blnd_cfg, struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id); int mpcc_id);
/*
* Lock cursor updates for the specified OPP.
* OPP defines the set of MPCC that are locked together for cursor.
*
* Parameters:
* [in] mpc - MPC context.
* [in] opp_id - The OPP to lock cursor updates on
* [in] lock - lock/unlock the OPP
*
* Return: void
*/
void (*cursor_lock)(
struct mpc *mpc,
int opp_id,
bool lock);
struct mpcc* (*get_mpcc_for_dpp)( struct mpcc* (*get_mpcc_for_dpp)(
struct mpc_tree *tree, struct mpc_tree *tree,
int dpp_id); int dpp_id);

View File

@ -86,6 +86,7 @@ struct hw_sequencer_funcs {
struct dc_state *context, bool lock); struct dc_state *context, bool lock);
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
bool flip_immediate); bool flip_immediate);
void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
/* Timing Related */ /* Timing Related */
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,

View File

@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
if (!hwmgr) if (!hwmgr)
return -EINVAL; return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->get_asic_baco_capability)
return 0; return 0;
mutex_lock(&hwmgr->smu_lock); mutex_lock(&hwmgr->smu_lock);
@ -1452,8 +1453,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr) if (!hwmgr)
return -EINVAL; return -EINVAL;
if (!(hwmgr->not_vf && amdgpu_dpm) || if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
!hwmgr->hwmgr_func->get_asic_baco_state)
return 0; return 0;
mutex_lock(&hwmgr->smu_lock); mutex_lock(&hwmgr->smu_lock);
@ -1470,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
if (!hwmgr) if (!hwmgr)
return -EINVAL; return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->set_asic_baco_state)
return 0; return 0;
mutex_lock(&hwmgr->smu_lock); mutex_lock(&hwmgr->smu_lock);

View File

@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_queue_down_tx(mgr, txmsg); drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) if (ret > 0) {
ret = -EIO; if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EIO;
else
ret = size;
}
kfree(txmsg); kfree(txmsg);
fail_put: fail_put:

View File

@ -5111,7 +5111,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode; struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] | unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) | (timings->pixel_clock[1] << 8) |
(timings->pixel_clock[2] << 16)); (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;

View File

@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride) int tiling_mode, unsigned int stride)
{ {
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma; struct i915_vma *vma, *vn;
LIST_HEAD(unbind);
int ret = 0; int ret = 0;
if (tiling_mode == I915_TILING_NONE) if (tiling_mode == I915_TILING_NONE)
return 0; return 0;
mutex_lock(&ggtt->vm.mutex); mutex_lock(&ggtt->vm.mutex);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) { for_each_ggtt_vma(vma, obj) {
GEM_BUG_ON(vma->vm != &ggtt->vm);
if (i915_vma_fence_prepare(vma, tiling_mode, stride)) if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue; continue;
ret = __i915_vma_unbind(vma); list_move(&vma->vm_link, &unbind);
if (ret)
break;
} }
spin_unlock(&obj->vma.lock);
list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
ret = __i915_vma_unbind(vma);
if (ret) {
/* Restore the remaining vma on an error */
list_splice(&unbind, &ggtt->vm.bound_list);
break;
}
}
mutex_unlock(&ggtt->vm.mutex); mutex_unlock(&ggtt->vm.mutex);
return ret; return ret;
@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
} }
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) { for_each_ggtt_vma(vma, obj) {
vma->fence_size = vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride); i915_gem_fence_size(i915, vma->size, tiling, stride);
@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if (vma->fence) if (vma->fence)
vma->fence->dirty = true; vma->fence->dirty = true;
} }
spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride; obj->tiling_and_stride = tiling | stride;
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);

View File

@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
unsigned int page_size = BIT(first); unsigned int page_size = BIT(first);
obj = i915_gem_object_create_internal(dev_priv, page_size); obj = i915_gem_object_create_internal(dev_priv, page_size);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
return PTR_ERR(obj); err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
} }
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj)) {
return PTR_ERR(obj); err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {

View File

@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
rcu_read_lock(); rcu_read_lock();
cl = rcu_dereference(from->hwsp_cacheline); cl = rcu_dereference(from->hwsp_cacheline);
if (i915_request_completed(from)) /* confirm cacheline is valid */
goto unlock;
if (unlikely(!i915_active_acquire_if_busy(&cl->active))) if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
goto unlock; /* seqno wrapped and completed! */ goto unlock; /* seqno wrapped and completed! */
if (unlikely(i915_request_completed(from))) if (unlikely(i915_request_completed(from)))

View File

@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{ {
struct intel_uncore *uncore = &dev_priv->uncore; struct intel_uncore *uncore = &dev_priv->uncore;
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables; u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables; u32 de_port_enables;
@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_misc_masked |= GEN8_DE_MISC_GSE; de_misc_masked |= GEN8_DE_MISC_GSE;
if (INTEL_GEN(dev_priv) >= 9) { if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D; GEN9_AUX_CHANNEL_D;
if (IS_GEN9_LP(dev_priv)) if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS; de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
} }
if (INTEL_GEN(dev_priv) >= 11) if (INTEL_GEN(dev_priv) >= 11)

View File

@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
spin_lock(&obj->vma.lock);
if (i915_is_ggtt(vm)) { if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32))) if (unlikely(overflows_type(vma->size, u32)))
goto err_vma; goto err_unlock;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj)); i915_gem_object_get_stride(obj));
if (unlikely(vma->fence_size < vma->size || /* overflow */ if (unlikely(vma->fence_size < vma->size || /* overflow */
vma->fence_size > vm->total)) vma->fence_size > vm->total))
goto err_vma; goto err_unlock;
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
} }
spin_lock(&obj->vma.lock);
rb = NULL; rb = NULL;
p = &obj->vma.tree.rb_node; p = &obj->vma.tree.rb_node;
while (*p) { while (*p) {
@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
return vma; return vma;
err_unlock:
spin_unlock(&obj->vma.lock);
err_vma: err_vma:
i915_vma_free(vma); i915_vma_free(vma);
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);

View File

@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
return ret; return ret;
ret = qxl_release_reserve_list(release, true); ret = qxl_release_reserve_list(release, true);
if (ret) if (ret) {
qxl_release_free(qdev, release);
return ret; return ret;
}
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE; cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA; cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
/* no need to add a release to the fence for this surface bo, /* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface since it is only released when we ask to destroy the surface
and it would never signal otherwise */ and it would never signal otherwise */
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
surf->hw_surf_alloc = true; surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock); spin_lock(&qdev->surf_id_idr_lock);
@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id; cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
return 0; return 0;
} }

View File

@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd->u.set.visible = 1; cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret; return ret;
@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.position.y = plane->state->crtc_y + fb->hot_y; cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
if (old_cursor_bo != NULL) if (old_cursor_bo != NULL)
qxl_bo_unpin(old_cursor_bo); qxl_bo_unpin(old_cursor_bo);
@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
cmd->type = QXL_CURSOR_HIDE; cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
} }
static void qxl_update_dumb_head(struct qxl_device *qdev, static void qxl_update_dumb_head(struct qxl_device *qdev,

View File

@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
goto out_release_backoff; goto out_release_backoff;
rects = drawable_set_clipping(qdev, num_clips, clips_bo); rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects) if (!rects) {
ret = -EINVAL;
goto out_release_backoff; goto out_release_backoff;
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS; drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
} }
qxl_bo_kunmap(clips_bo); qxl_bo_kunmap(clips_bo);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_release_backoff: out_release_backoff:
if (ret) if (ret)

View File

@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
break; break;
default: default:
DRM_ERROR("unsupported image bit depth\n"); DRM_ERROR("unsupported image bit depth\n");
return -EINVAL; /* TODO: cleanup */ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return -EINVAL;
} }
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width; image->u.bitmap.x = width;

View File

@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
apply_surf_reloc(qdev, &reloc_info[i]); apply_surf_reloc(qdev, &reloc_info[i]);
} }
qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
if (ret)
qxl_release_backoff_reserve_list(release);
else
qxl_release_fence_buffer_objects(release);
out_free_bos: out_free_bos:
out_free_release: out_free_release:

View File

@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear); events_clear, &events_clear);
} }
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
virtio_gpu_notify(vgdev);
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
void (*work_func)(struct work_struct *work)) void (*work_func)(struct work_struct *work))
{ {
@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv; struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
if (!vgdev->has_virgl_3d) if (!vgdev->has_virgl_3d)
return; return;
vfpriv = file->driver_priv; if (vfpriv->context_created) {
virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
virtio_gpu_notify(vgdev);
}
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
mutex_destroy(&vfpriv->context_lock); mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv); kfree(vfpriv);
file->driver_priv = NULL; file->driver_priv = NULL;

View File

@ -184,11 +184,7 @@ void hv_synic_enable_regs(unsigned int cpu)
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR; shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false; shared_sint.masked = false;
if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED) shared_sint.auto_eoi = hv_recommend_using_aeoi();
shared_sint.auto_eoi = false;
else
shared_sint.auto_eoi = true;
hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */ /* Enable the global synic bit */

View File

@ -286,8 +286,8 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
__field(int, ret) __field(int, ret)
), ),
TP_fast_assign( TP_fast_assign(
memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16); export_guid(__entry->guest_id, &msg->guest_endpoint_id);
memcpy(__entry->host_id, &msg->host_service_id.b, 16); export_guid(__entry->host_id, &msg->host_service_id);
__entry->ret = ret; __entry->ret = ret;
), ),
TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, " TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "

View File

@ -978,6 +978,9 @@ static int vmbus_resume(struct device *child_device)
return drv->resume(dev); return drv->resume(dev);
} }
#else
#define vmbus_suspend NULL
#define vmbus_resume NULL
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
/* /*
@ -997,11 +1000,22 @@ static void vmbus_device_release(struct device *device)
} }
/* /*
* Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
* SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm. *
* suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
* shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
* is no way to wake up a Generation-2 VM.
*
* The other 4 ops are for hibernation.
*/ */
static const struct dev_pm_ops vmbus_pm = { static const struct dev_pm_ops vmbus_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume) .suspend_noirq = NULL,
.resume_noirq = NULL,
.freeze_noirq = vmbus_suspend,
.thaw_noirq = vmbus_resume,
.poweroff_noirq = vmbus_suspend,
.restore_noirq = vmbus_resume,
}; };
/* The one and only one */ /* The one and only one */
@ -2281,6 +2295,9 @@ static int vmbus_bus_resume(struct device *dev)
return 0; return 0;
} }
#else
#define vmbus_bus_suspend NULL
#define vmbus_bus_resume NULL
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
static const struct acpi_device_id vmbus_acpi_device_ids[] = { static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@ -2291,16 +2308,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
/* /*
* Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
* SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
* "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
* pci "noirq" restore callback runs before "non-noirq" callbacks (see
* resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() -> * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
* dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
* resume callback must also run via the "noirq" callbacks. * resume callback must also run via the "noirq" ops.
*
* Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
* earlier in this file before vmbus_pm.
*/ */
static const struct dev_pm_ops vmbus_bus_pm = { static const struct dev_pm_ops vmbus_bus_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume) .suspend_noirq = NULL,
.resume_noirq = NULL,
.freeze_noirq = vmbus_bus_suspend,
.thaw_noirq = vmbus_bus_resume,
.poweroff_noirq = vmbus_bus_suspend,
.restore_noirq = vmbus_bus_resume
}; };
static struct acpi_driver vmbus_acpi_driver = { static struct acpi_driver vmbus_acpi_driver = {

View File

@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
if (!privdata) if (!privdata)
return -ENOMEM; return -ENOMEM;
privdata->pci_dev = pci_dev;
rc = amd_mp2_pci_init(privdata, pci_dev); rc = amd_mp2_pci_init(privdata, pci_dev);
if (rc) if (rc)
return rc; return rc;
mutex_init(&privdata->c2p_lock); mutex_init(&privdata->c2p_lock);
privdata->pci_dev = pci_dev;
pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000); pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
pm_runtime_use_autosuspend(&pci_dev->dev); pm_runtime_use_autosuspend(&pci_dev->dev);

View File

@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
/* Ack all interrupts except for Rx done */ /* Ack all interrupts except for Rx done */
writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG); bus->base + ASPEED_I2C_INTR_STS_REG);
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
irq_remaining = irq_received; irq_remaining = irq_received;
#if IS_ENABLED(CONFIG_I2C_SLAVE) #if IS_ENABLED(CONFIG_I2C_SLAVE)
@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
irq_received, irq_handled); irq_received, irq_handled);
/* Ack Rx done */ /* Ack Rx done */
if (irq_received & ASPEED_I2CD_INTR_RX_DONE) if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
writel(ASPEED_I2CD_INTR_RX_DONE, writel(ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG); bus->base + ASPEED_I2C_INTR_STS_REG);
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
}
spin_unlock(&bus->lock); spin_unlock(&bus->lock);
return irq_remaining ? IRQ_NONE : IRQ_HANDLED; return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
} }

View File

@ -360,6 +360,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
i2c_slave_event(iproc_i2c->slave, i2c_slave_event(iproc_i2c->slave,
I2C_SLAVE_WRITE_RECEIVED, &value); I2C_SLAVE_WRITE_RECEIVED, &value);
if (rx_status == I2C_SLAVE_RX_END)
i2c_slave_event(iproc_i2c->slave,
I2C_SLAVE_STOP, &value);
} }
} else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
/* Master read other than start */ /* Master read other than start */

View File

@ -996,13 +996,14 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
do { do {
u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
if (status) if (status) {
tegra_i2c_isr(i2c_dev->irq, i2c_dev); tegra_i2c_isr(i2c_dev->irq, i2c_dev);
if (completion_done(complete)) { if (completion_done(complete)) {
s64 delta = ktime_ms_delta(ktimeout, ktime); s64 delta = ktime_ms_delta(ktimeout, ktime);
return msecs_to_jiffies(delta) ?: 1; return msecs_to_jiffies(delta) ?: 1;
}
} }
ktime = ktime_get(); ktime = ktime_get();
@ -1029,18 +1030,14 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
disable_irq(i2c_dev->irq); disable_irq(i2c_dev->irq);
/* /*
* Under some rare circumstances (like running KASAN + * There is a chance that completion may happen after IRQ
* NFS root) CPU, which handles interrupt, may stuck in * synchronization, which is done by disable_irq().
* uninterruptible state for a significant time. In this
* case we will get timeout if I2C transfer is running on
* a sibling CPU, despite of IRQ being raised.
*
* In order to handle this rare condition, the IRQ status
* needs to be checked after timeout.
*/ */
if (ret == 0) if (ret == 0 && completion_done(complete)) {
ret = tegra_i2c_poll_completion_timeout(i2c_dev, dev_warn(i2c_dev->dev,
complete, 0); "completion done after timeout\n");
ret = 1;
}
} }
return ret; return ret;
@ -1219,15 +1216,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
time_left = tegra_i2c_wait_completion_timeout( time_left = tegra_i2c_wait_completion_timeout(
i2c_dev, &i2c_dev->dma_complete, xfer_time); i2c_dev, &i2c_dev->dma_complete, xfer_time);
/*
* Synchronize DMA first, since dmaengine_terminate_sync()
* performs synchronization after the transfer's termination
* and we want to get a completion if transfer succeeded.
*/
dmaengine_synchronize(i2c_dev->msg_read ?
i2c_dev->rx_dma_chan :
i2c_dev->tx_dma_chan);
dmaengine_terminate_sync(i2c_dev->msg_read ? dmaengine_terminate_sync(i2c_dev->msg_read ?
i2c_dev->rx_dma_chan : i2c_dev->rx_dma_chan :
i2c_dev->tx_dma_chan); i2c_dev->tx_dma_chan);

View File

@ -862,7 +862,7 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
&cm.local_id_next, GFP_KERNEL); &cm.local_id_next, GFP_KERNEL);
if (ret) if (ret < 0)
goto error; goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
@ -1828,11 +1828,9 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
static void cm_format_rej(struct cm_rej_msg *rej_msg, static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv, struct cm_id_private *cm_id_priv,
enum ib_cm_rej_reason reason, enum ib_cm_rej_reason reason, void *ari,
void *ari, u8 ari_length, const void *private_data,
u8 ari_length, u8 private_data_len, enum ib_cm_state state)
const void *private_data,
u8 private_data_len)
{ {
lockdep_assert_held(&cm_id_priv->lock); lockdep_assert_held(&cm_id_priv->lock);
@ -1840,7 +1838,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
be32_to_cpu(cm_id_priv->id.remote_id)); be32_to_cpu(cm_id_priv->id.remote_id));
switch(cm_id_priv->id.state) { switch (state) {
case IB_CM_REQ_RCVD: case IB_CM_REQ_RCVD:
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
@ -1905,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work,
cm_id_priv->private_data_len); cm_id_priv->private_data_len);
break; break;
case IB_CM_TIMEWAIT: case IB_CM_TIMEWAIT:
cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
IB_CM_TIMEWAIT);
break; break;
default: default:
goto unlock; goto unlock;
@ -2904,6 +2903,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
u8 ari_length, const void *private_data, u8 ari_length, const void *private_data,
u8 private_data_len) u8 private_data_len)
{ {
enum ib_cm_state state = cm_id_priv->id.state;
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
int ret; int ret;
@ -2913,7 +2913,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
(ari && ari_length > IB_CM_REJ_ARI_LENGTH)) (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL; return -EINVAL;
switch (cm_id_priv->id.state) { switch (state) {
case IB_CM_REQ_SENT: case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD: case IB_CM_REQ_RCVD:
@ -2925,7 +2925,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
if (ret) if (ret)
return ret; return ret;
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
ari, ari_length, private_data, private_data_len); ari, ari_length, private_data, private_data_len,
state);
break; break;
case IB_CM_REP_SENT: case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD: case IB_CM_MRA_REP_RCVD:
@ -2934,7 +2935,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
if (ret) if (ret)
return ret; return ret;
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
ari, ari_length, private_data, private_data_len); ari, ari_length, private_data, private_data_len,
state);
break; break;
default: default:
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__, pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,

View File

@ -360,7 +360,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
* uverbs_uobject_fd_release(), and the caller is expected to ensure * uverbs_uobject_fd_release(), and the caller is expected to ensure
* that release is never done while a call to lookup is possible. * that release is never done while a call to lookup is possible.
*/ */
if (f->f_op != fd_type->fops) { if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
fput(f); fput(f);
return ERR_PTR(-EBADF); return ERR_PTR(-EBADF);
} }
@ -474,16 +474,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags); fd_type->flags);
if (IS_ERR(filp)) { if (IS_ERR(filp)) {
uverbs_uobject_put(uobj);
uobj = ERR_CAST(filp); uobj = ERR_CAST(filp);
goto err_uobj; goto err_fd;
} }
uobj->object = filp; uobj->object = filp;
uobj->id = new_fd; uobj->id = new_fd;
return uobj; return uobj;
err_uobj:
uverbs_uobject_put(uobj);
err_fd: err_fd:
put_unused_fd(new_fd); put_unused_fd(new_fd);
return uobj; return uobj;
@ -679,7 +678,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode) enum rdma_lookup_mode mode)
{ {
assert_uverbs_usecnt(uobj, mode); assert_uverbs_usecnt(uobj, mode);
uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* /*
* In order to unlock an object, either decrease its usecnt for * In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See * read access or zero it in case of exclusive access. See
@ -696,6 +694,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
break; break;
} }
uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* Pairs with the kref obtained by type->lookup_get */ /* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
} }

View File

@ -820,6 +820,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
ret = mmget_not_zero(mm); ret = mmget_not_zero(mm);
if (!ret) { if (!ret) {
list_del_init(&priv->list); list_del_init(&priv->list);
if (priv->entry) {
rdma_user_mmap_entry_put(priv->entry);
priv->entry = NULL;
}
mm = NULL; mm = NULL;
continue; continue;
} }

View File

@ -1046,7 +1046,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
u64 header; u64 header;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (wqe) if (!wqe)
return I40IW_ERR_RING_FULL; return I40IW_ERR_RING_FULL;
set_64bit_val(wqe, 32, feat_mem->pa); set_64bit_val(wqe, 32, feat_mem->pa);

View File

@ -1499,8 +1499,9 @@ static int __mlx4_ib_create_default_rules(
int i; int i;
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
union ib_flow_spec ib_spec = {};
int ret; int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) { switch (pdefault_rules->rules_create_list[i]) {
case 0: case 0:
/* no rule */ /* no rule */

View File

@ -5558,7 +5558,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
rdma_ah_set_static_rate(ah_attr, rdma_ah_set_static_rate(ah_attr,
path->static_rate ? path->static_rate - 5 : 0); path->static_rate ? path->static_rate - 5 : 0);
if (path->grh_mlid & (1 << 7)) {
if (path->grh_mlid & (1 << 7) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
rdma_ah_set_grh(ah_attr, NULL, rdma_ah_set_grh(ah_attr, NULL,

View File

@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
*/ */
if (udata && udata->outlen >= sizeof(__u64)) { if (udata && udata->outlen >= sizeof(__u64)) {
cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
if (!cq->ip) { if (IS_ERR(cq->ip)) {
err = -ENOMEM; err = PTR_ERR(cq->ip);
goto bail_wc; goto bail_wc;
} }

View File

@ -154,7 +154,7 @@ done:
* @udata: user data (must be valid!) * @udata: user data (must be valid!)
* @obj: opaque pointer to a cq, wq etc * @obj: opaque pointer to a cq, wq etc
* *
* Return: rvt_mmap struct on success * Return: rvt_mmap struct on success, ERR_PTR on failure
*/ */
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
struct ib_udata *udata, void *obj) struct ib_udata *udata, void *obj)
@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
if (!ip) if (!ip)
return ip; return ERR_PTR(-ENOMEM);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);

View File

@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->ip = rvt_create_mmap_info(rdi, s, udata, qp->ip = rvt_create_mmap_info(rdi, s, udata,
qp->r_rq.wq); qp->r_rq.wq);
if (!qp->ip) { if (IS_ERR(qp->ip)) {
ret = ERR_PTR(-ENOMEM); ret = ERR_CAST(qp->ip);
goto bail_qpn; goto bail_qpn;
} }

View File

@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
if (!srq->ip) { if (IS_ERR(srq->ip)) {
ret = -ENOMEM; ret = PTR_ERR(srq->ip);
goto bail_wq; goto bail_wq;
} }

View File

@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
{ {
struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
struct siw_device *sdev = to_siw_dev(pd->device); struct siw_device *sdev = to_siw_dev(pd->device);
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); struct siw_mem *mem;
int rv = 0; int rv = 0;
siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
if (unlikely(!mem || !base_mr)) { if (unlikely(!base_mr)) {
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
return -EINVAL; return -EINVAL;
} }
if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
rv = -EINVAL; return -EINVAL;
goto out;
} }
mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
if (unlikely(!mem)) {
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
return -EINVAL;
}
if (unlikely(mem->pd != pd)) { if (unlikely(mem->pd != pd)) {
pr_warn("siw: fastreg: PD mismatch\n"); pr_warn("siw: fastreg: PD mismatch\n");
rv = -EINVAL; rv = -EINVAL;

View File

@ -362,7 +362,7 @@ config IPMMU_VMSA
config SPAPR_TCE_IOMMU config SPAPR_TCE_IOMMU
bool "sPAPR TCE IOMMU Support" bool "sPAPR TCE IOMMU Support"
depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST) depends on PPC_POWERNV || PPC_PSERIES
select IOMMU_API select IOMMU_API
help help
Enables bits of IOMMU API required by VFIO. The iommu_ops Enables bits of IOMMU API required by VFIO. The iommu_ops
@ -457,7 +457,7 @@ config S390_AP_IOMMU
config MTK_IOMMU config MTK_IOMMU
bool "MTK IOMMU Support" bool "MTK IOMMU Support"
depends on ARM || ARM64 || COMPILE_TEST depends on HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU select ARM_DMA_USE_IOMMU
select IOMMU_API select IOMMU_API

View File

@ -2936,7 +2936,7 @@ static int __init parse_amd_iommu_intr(char *str)
{ {
for (; *str; ++str) { for (; *str; ++str) {
if (strncmp(str, "legacy", 6) == 0) { if (strncmp(str, "legacy", 6) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
break; break;
} }
if (strncmp(str, "vapic", 5) == 0) { if (strncmp(str, "vapic", 5) == 0) {

View File

@ -371,11 +371,11 @@ int dmar_disabled = 0;
int dmar_disabled = 1; int dmar_disabled = 1;
#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */ #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
int intel_iommu_sm = 1; int intel_iommu_sm = 1;
#else #else
int intel_iommu_sm; int intel_iommu_sm;
#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
int intel_iommu_enabled = 0; int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled); EXPORT_SYMBOL_GPL(intel_iommu_enabled);

View File

@ -184,6 +184,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
static void dev_iommu_free(struct device *dev) static void dev_iommu_free(struct device *dev)
{ {
iommu_fwspec_free(dev);
kfree(dev->iommu); kfree(dev->iommu);
dev->iommu = NULL; dev->iommu = NULL;
} }

View File

@ -814,8 +814,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
qcom_iommu->dev = dev; qcom_iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res) if (res) {
qcom_iommu->local_base = devm_ioremap_resource(dev, res); qcom_iommu->local_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qcom_iommu->local_base))
return PTR_ERR(qcom_iommu->local_base);
}
qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(qcom_iommu->iface_clk)) { if (IS_ERR(qcom_iommu->iface_clk)) {

View File

@ -585,10 +585,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath); pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, bio->bi_iter.bi_size); pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
/* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if ((pgpath && queue_io) || if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
/* Queue for the daemon to resubmit */ /* Queue for the daemon to resubmit */

View File

@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
fio->level++; fio->level++;
if (type == DM_VERITY_BLOCK_TYPE_METADATA) if (type == DM_VERITY_BLOCK_TYPE_METADATA)
block += v->data_blocks; block = block - v->hash_start + v->data_blocks;
/* /*
* For RS(M, N), the continuous FEC data is divided into blocks of N * For RS(M, N), the continuous FEC data is divided into blocks of N

View File

@ -931,6 +931,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
return 0; return 0;
} }
static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
{
struct dm_io_region region;
struct dm_io_request req;
region.bdev = wc->ssd_dev->bdev;
region.sector = wc->start_sector;
region.count = n_sectors;
req.bi_op = REQ_OP_READ;
req.bi_op_flags = REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
req.notify.fn = NULL;
return dm_io(&req, 1, &region, NULL);
}
static void writecache_resume(struct dm_target *ti) static void writecache_resume(struct dm_target *ti)
{ {
struct dm_writecache *wc = ti->private; struct dm_writecache *wc = ti->private;
@ -941,8 +959,18 @@ static void writecache_resume(struct dm_target *ti)
wc_lock(wc); wc_lock(wc);
if (WC_MODE_PMEM(wc)) if (WC_MODE_PMEM(wc)) {
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
} else {
r = writecache_read_metadata(wc, wc->metadata_sectors);
if (r) {
size_t sb_entries_offset;
writecache_error(wc, r, "unable to read metadata: %d", r);
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
memset((char *)wc->memory_map + sb_entries_offset, -1,
(wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
}
}
wc->tree = RB_ROOT; wc->tree = RB_ROOT;
INIT_LIST_HEAD(&wc->lru); INIT_LIST_HEAD(&wc->lru);
@ -2102,6 +2130,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Invalid block size"; ti->error = "Invalid block size";
goto bad; goto bad;
} }
if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
r = -EINVAL;
ti->error = "Block size is smaller than device logical block size";
goto bad;
}
wc->block_size_bits = __ffs(wc->block_size); wc->block_size_bits = __ffs(wc->block_size);
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@ -2200,8 +2234,6 @@ invalid_optional:
goto bad; goto bad;
} }
} else { } else {
struct dm_io_region region;
struct dm_io_request req;
size_t n_blocks, n_metadata_blocks; size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits; uint64_t n_bitmap_bits;
@ -2258,19 +2290,9 @@ invalid_optional:
goto bad; goto bad;
} }
region.bdev = wc->ssd_dev->bdev; r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
region.sector = wc->start_sector;
region.count = wc->metadata_sectors;
req.bi_op = REQ_OP_READ;
req.bi_op_flags = REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
req.notify.fn = NULL;
r = dm_io(&req, 1, &region, NULL);
if (r) { if (r) {
ti->error = "Unable to read metadata"; ti->error = "Unable to read first block of metadata";
goto bad; goto bad;
} }
} }

View File

@ -878,7 +878,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
* Issued High Priority Interrupt, and check for card status * Issued High Priority Interrupt, and check for card status
* until out-of prg-state. * until out-of prg-state.
*/ */
int mmc_interrupt_hpi(struct mmc_card *card) static int mmc_interrupt_hpi(struct mmc_card *card)
{ {
int err; int err;
u32 status; u32 status;

View File

@ -5,6 +5,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -349,12 +350,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
/* CQHCI is idle and should halt immediately, so set a small timeout */ /* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT 100 #define CQHCI_OFF_TIMEOUT 100
static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
{
return cqhci_readl(cq_host, CQHCI_CTL);
}
static void cqhci_off(struct mmc_host *mmc) static void cqhci_off(struct mmc_host *mmc)
{ {
struct cqhci_host *cq_host = mmc->cqe_private; struct cqhci_host *cq_host = mmc->cqe_private;
ktime_t timeout;
bool timed_out;
u32 reg; u32 reg;
int err;
if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
return; return;
@ -364,15 +369,9 @@ static void cqhci_off(struct mmc_host *mmc)
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
while (1) { reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
timed_out = ktime_compare(ktime_get(), timeout) > 0; if (err < 0)
reg = cqhci_readl(cq_host, CQHCI_CTL);
if ((reg & CQHCI_HALT) || timed_out)
break;
}
if (timed_out)
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
else else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));

View File

@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
meson_mx_mmc_start_cmd(mmc, mrq->cmd); meson_mx_mmc_start_cmd(mmc, mrq->cmd);
} }
static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
}
static void meson_mx_mmc_read_response(struct mmc_host *mmc, static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd) struct mmc_command *cmd)
{ {
@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
static struct mmc_host_ops meson_mx_mmc_ops = { static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request, .request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios, .set_ios = meson_mx_mmc_set_ios,
.card_busy = meson_mx_mmc_card_busy,
.get_cd = mmc_gpio_get_cd, .get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro, .get_ro = mmc_gpio_get_ro,
}; };
@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk, mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk)); clk_get_rate(host->parent_clk));
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops; mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc); ret = mmc_of_parse(mmc);

View File

@ -2087,6 +2087,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable; goto clk_disable;
} }
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev); pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);

View File

@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
struct sdhci_pci_slot *slot = sdhci_priv(host); struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot); struct intel_host *intel_host = sdhci_pci_priv(slot);
if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
return 0;
return intel_host->drv_strength; return intel_host->drv_strength;
} }

View File

@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{ {
/* Wait for 5ms after set 1.8V signal enable bit */ /* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500); usleep_range(5000, 5500);
/*
* For some reason the controller's Host Control2 register reports
* the bit representing 1.8V signaling as 0 when read after it was
* written as 1. Subsequent read reports 1.
*
* Since this may cause some issues, do an empty read of the Host
* Control2 register here to circumvent this.
*/
sdhci_readw(host, SDHCI_HOST_CONTROL2);
} }
static const struct sdhci_ops sdhci_xenon_ops = { static const struct sdhci_ops sdhci_xenon_ops = {

View File

@ -3642,6 +3642,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
return; return;
out_put_disk: out_put_disk:
/* prevent double queue cleanup */
ns->disk->queue = NULL;
put_disk(ns->disk); put_disk(ns->disk);
out_unlink_ns: out_unlink_ns:
mutex_lock(&ctrl->subsys->lock); mutex_lock(&ctrl->subsys->lock);

View File

@ -3732,6 +3732,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
} }
qla2x00_wait_for_hba_ready(base_vha); qla2x00_wait_for_hba_ready(base_vha);
/*
* if UNLOADING flag is already set, then continue unload,
* where it was set first.
*/
if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
return;
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) { IS_QLA28XX(ha)) {
if (ha->flags.fw_started) if (ha->flags.fw_started)
@ -3750,15 +3757,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_wait_for_sess_deletion(base_vha); qla2x00_wait_for_sess_deletion(base_vha);
/*
* if UNLOAD flag is already set, then continue unload,
* where it was set first.
*/
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
qla_nvme_delete(base_vha); qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev, dma_free_coherent(&ha->pdev->dev,
@ -4864,6 +4862,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
struct qla_work_evt *e; struct qla_work_evt *e;
uint8_t bail; uint8_t bail;
if (test_bit(UNLOADING, &vha->dpc_flags))
return NULL;
QLA_VHA_MARK_BUSY(vha, bail); QLA_VHA_MARK_BUSY(vha, bail);
if (bail) if (bail)
return NULL; return NULL;
@ -6628,13 +6629,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev; struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/*
* if UNLOAD flag is already set, then continue unload,
* where it was set first.
*/
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return;
ql_log(ql_log_warn, base_vha, 0x015b, ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n"); "Disabling adapter.\n");
@ -6645,9 +6639,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return; return;
} }
qla2x00_wait_for_sess_deletion(base_vha); /*
* if UNLOADING flag is already set, then continue unload,
* where it was set first.
*/
if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
return;
set_bit(UNLOADING, &base_vha->dpc_flags); qla2x00_wait_for_sess_deletion(base_vha);
qla2x00_delete_all_vps(ha, base_vha); qla2x00_delete_all_vps(ha, base_vha);

View File

@ -2284,6 +2284,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) { switch (oldstate) {
case SDEV_RUNNING: case SDEV_RUNNING:
case SDEV_CREATED_BLOCK: case SDEV_CREATED_BLOCK:
case SDEV_QUIESCE:
case SDEV_OFFLINE: case SDEV_OFFLINE:
break; break;
default: default:

View File

@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
target_to_linux_sector(dev, cmd->t_task_lba), target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev, target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)), sbc_get_write_same_sectors(cmd)),
GFP_KERNEL, false); GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
if (ret) if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

Some files were not shown because too many files have changed in this diff Show More