mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 12:21:37 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
e243f39685
@ -13389,6 +13389,7 @@ F: net/core/drop_monitor.c
|
||||
NETWORKING DRIVERS
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
@ -13435,6 +13436,7 @@ F: tools/testing/selftests/drivers/net/dsa/
|
||||
NETWORKING [GENERAL]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -25,7 +25,13 @@ enum {
|
||||
SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
|
||||
void spectre_v2_update_state(unsigned int state, unsigned int methods);
|
||||
#else
|
||||
static inline void spectre_v2_update_state(unsigned int state,
|
||||
unsigned int methods)
|
||||
{}
|
||||
#endif
|
||||
|
||||
int spectre_bhb_update_vectors(unsigned int method);
|
||||
|
||||
|
@ -1040,9 +1040,9 @@ vector_bhb_loop8_\name:
|
||||
|
||||
@ bhb workaround
|
||||
mov r0, #8
|
||||
1: b . + 4
|
||||
3: b . + 4
|
||||
subs r0, r0, #1
|
||||
bne 1b
|
||||
bne 3b
|
||||
dsb
|
||||
isb
|
||||
b 2b
|
||||
|
@ -9,7 +9,7 @@ long soft_nmi_interrupt(struct pt_regs *regs);
|
||||
static inline void arch_touch_nmi_watchdog(void) {}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
|
||||
#ifdef CONFIG_NMI_IPI
|
||||
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
|
@ -2,6 +2,7 @@ menu "CPU errata selection"
|
||||
|
||||
config RISCV_ERRATA_ALTERNATIVE
|
||||
bool "RISC-V alternative scheme"
|
||||
depends on !XIP_KERNEL
|
||||
default y
|
||||
help
|
||||
This Kconfig allows the kernel to automatically patch the
|
||||
|
@ -14,8 +14,8 @@ config SOC_SIFIVE
|
||||
select CLK_SIFIVE
|
||||
select CLK_SIFIVE_PRCI
|
||||
select SIFIVE_PLIC
|
||||
select RISCV_ERRATA_ALTERNATIVE
|
||||
select ERRATA_SIFIVE
|
||||
select RISCV_ERRATA_ALTERNATIVE if !XIP_KERNEL
|
||||
select ERRATA_SIFIVE if !XIP_KERNEL
|
||||
help
|
||||
This enables support for SiFive SoC platform hardware.
|
||||
|
||||
|
@ -13,6 +13,19 @@
|
||||
#include <linux/pgtable.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/*
|
||||
* The auipc+jalr instruction pair can reach any PC-relative offset
|
||||
* in the range [-2^31 - 2^11, 2^31 - 2^11)
|
||||
*/
|
||||
static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
|
||||
{
|
||||
#ifdef CONFIG_32BIT
|
||||
return true;
|
||||
#else
|
||||
return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
|
||||
#endif
|
||||
}
|
||||
|
||||
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
||||
{
|
||||
if (v != (u32)v) {
|
||||
@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 hi20;
|
||||
|
||||
if (offset != (s32)offset) {
|
||||
if (!riscv_insn_valid_32bit_offset(offset)) {
|
||||
pr_err(
|
||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||
me->name, (long long)v, location);
|
||||
@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 fill_v = offset;
|
||||
u32 hi20, lo12;
|
||||
|
||||
if (offset != fill_v) {
|
||||
if (!riscv_insn_valid_32bit_offset(offset)) {
|
||||
/* Only emit the plt entry if offset over 32-bit range */
|
||||
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
|
||||
offset = module_emit_plt_entry(me, v);
|
||||
@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
|
||||
Elf_Addr v)
|
||||
{
|
||||
ptrdiff_t offset = (void *)v - (void *)location;
|
||||
s32 fill_v = offset;
|
||||
u32 hi20, lo12;
|
||||
|
||||
if (offset != fill_v) {
|
||||
if (!riscv_insn_valid_32bit_offset(offset)) {
|
||||
pr_err(
|
||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||
me->name, (long long)v, location);
|
||||
|
@ -12,6 +12,30 @@
|
||||
#include "encls.h"
|
||||
#include "sgx.h"
|
||||
|
||||
/*
|
||||
* Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
|
||||
* follow right after the EPC data in the backing storage. In addition to the
|
||||
* visible enclave pages, there's one extra page slot for SECS, before PCMD
|
||||
* structs.
|
||||
*/
|
||||
static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
|
||||
unsigned long page_index)
|
||||
{
|
||||
pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
|
||||
|
||||
return epc_end_off + page_index * sizeof(struct sgx_pcmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a page from the backing storage in the given page index.
|
||||
*/
|
||||
static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
|
||||
{
|
||||
struct inode *inode = file_inode(encl->backing);
|
||||
|
||||
shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
|
||||
* Pages" in the SDM.
|
||||
@ -22,9 +46,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
||||
{
|
||||
unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
|
||||
struct sgx_encl *encl = encl_page->encl;
|
||||
pgoff_t page_index, page_pcmd_off;
|
||||
struct sgx_pageinfo pginfo;
|
||||
struct sgx_backing b;
|
||||
pgoff_t page_index;
|
||||
bool pcmd_page_empty;
|
||||
u8 *pcmd_page;
|
||||
int ret;
|
||||
|
||||
if (secs_page)
|
||||
@ -32,14 +58,16 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
||||
else
|
||||
page_index = PFN_DOWN(encl->size);
|
||||
|
||||
page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
|
||||
|
||||
ret = sgx_encl_get_backing(encl, page_index, &b);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pginfo.addr = encl_page->desc & PAGE_MASK;
|
||||
pginfo.contents = (unsigned long)kmap_atomic(b.contents);
|
||||
pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
|
||||
b.pcmd_offset;
|
||||
pcmd_page = kmap_atomic(b.pcmd);
|
||||
pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
|
||||
|
||||
if (secs_page)
|
||||
pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
|
||||
@ -55,11 +83,24 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
|
||||
memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
|
||||
|
||||
/*
|
||||
* The area for the PCMD in the page was zeroed above. Check if the
|
||||
* whole page is now empty meaning that all PCMD's have been zeroed:
|
||||
*/
|
||||
pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(pcmd_page);
|
||||
kunmap_atomic((void *)(unsigned long)pginfo.contents);
|
||||
|
||||
sgx_encl_put_backing(&b, false);
|
||||
|
||||
sgx_encl_truncate_backing_page(encl, page_index);
|
||||
|
||||
if (pcmd_page_empty)
|
||||
sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -579,7 +620,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
|
||||
int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
struct sgx_backing *backing)
|
||||
{
|
||||
pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
|
||||
pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
|
||||
struct page *contents;
|
||||
struct page *pcmd;
|
||||
|
||||
@ -587,7 +628,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
if (IS_ERR(contents))
|
||||
return PTR_ERR(contents);
|
||||
|
||||
pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
|
||||
pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
|
||||
if (IS_ERR(pcmd)) {
|
||||
put_page(contents);
|
||||
return PTR_ERR(pcmd);
|
||||
@ -596,9 +637,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
backing->page_index = page_index;
|
||||
backing->contents = contents;
|
||||
backing->pcmd = pcmd;
|
||||
backing->pcmd_offset =
|
||||
(page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
|
||||
sizeof(struct sgx_pcmd);
|
||||
backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt);
|
||||
*/
|
||||
void __init e820__reserve_setup_data(void)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 pa_data;
|
||||
u64 pa_data, pa_next;
|
||||
u32 len;
|
||||
|
||||
pa_data = boot_params.hdr.setup_data;
|
||||
if (!pa_data)
|
||||
@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void)
|
||||
|
||||
while (pa_data) {
|
||||
data = early_memremap(pa_data, sizeof(*data));
|
||||
if (!data) {
|
||||
pr_warn("e820: failed to memremap setup_data entry\n");
|
||||
return;
|
||||
}
|
||||
|
||||
len = sizeof(*data);
|
||||
pa_next = data->next;
|
||||
|
||||
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
|
||||
/*
|
||||
@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void)
|
||||
sizeof(*data) + data->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
e820__range_update(((struct setup_indirect *)data->data)->addr,
|
||||
((struct setup_indirect *)data->data)->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
e820__range_update_kexec(((struct setup_indirect *)data->data)->addr,
|
||||
((struct setup_indirect *)data->data)->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len += data->len;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
data = early_memremap(pa_data, len);
|
||||
if (!data) {
|
||||
pr_warn("e820: failed to memremap indirect setup_data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
e820__range_update(indirect->addr, indirect->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
e820__range_update_kexec(indirect->addr, indirect->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
}
|
||||
}
|
||||
|
||||
pa_data = data->next;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
pa_data = pa_next;
|
||||
early_memunmap(data, len);
|
||||
}
|
||||
|
||||
e820__update_table(e820_table);
|
||||
|
@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no,
|
||||
|
||||
static int __init create_setup_data_nodes(struct dentry *parent)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data_node *node;
|
||||
struct setup_data *data;
|
||||
int error;
|
||||
u64 pa_data, pa_next;
|
||||
struct dentry *d;
|
||||
u64 pa_data;
|
||||
int error;
|
||||
u32 len;
|
||||
int no = 0;
|
||||
|
||||
d = debugfs_create_dir("setup_data", parent);
|
||||
@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent)
|
||||
error = -ENOMEM;
|
||||
goto err_dir;
|
||||
}
|
||||
pa_next = data->next;
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
node->paddr = ((struct setup_indirect *)data->data)->addr;
|
||||
node->type = ((struct setup_indirect *)data->data)->type;
|
||||
node->len = ((struct setup_indirect *)data->data)->len;
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(pa_data, len, MEMREMAP_WB);
|
||||
if (!data) {
|
||||
kfree(node);
|
||||
error = -ENOMEM;
|
||||
goto err_dir;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
node->paddr = indirect->addr;
|
||||
node->type = indirect->type;
|
||||
node->len = indirect->len;
|
||||
} else {
|
||||
node->paddr = pa_data;
|
||||
node->type = data->type;
|
||||
node->len = data->len;
|
||||
}
|
||||
} else {
|
||||
node->paddr = pa_data;
|
||||
node->type = data->type;
|
||||
@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
|
||||
}
|
||||
|
||||
create_setup_data_node(d, no, node);
|
||||
pa_data = data->next;
|
||||
pa_data = pa_next;
|
||||
|
||||
memunmap(data);
|
||||
no++;
|
||||
|
@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr)
|
||||
|
||||
static int __init get_setup_data_size(int nr, size_t *size)
|
||||
{
|
||||
int i = 0;
|
||||
u64 pa_data = boot_params.hdr.setup_data, pa_next;
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 pa_data = boot_params.hdr.setup_data;
|
||||
int i = 0;
|
||||
u32 len;
|
||||
|
||||
while (pa_data) {
|
||||
data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
pa_next = data->next;
|
||||
|
||||
if (nr == i) {
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
|
||||
*size = ((struct setup_indirect *)data->data)->len;
|
||||
else
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(pa_data, len, MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT)
|
||||
*size = indirect->len;
|
||||
else
|
||||
*size = data->len;
|
||||
} else {
|
||||
*size = data->len;
|
||||
}
|
||||
|
||||
memunmap(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pa_data = data->next;
|
||||
pa_data = pa_next;
|
||||
memunmap(data);
|
||||
i++;
|
||||
}
|
||||
@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size)
|
||||
static ssize_t type_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
int nr, ret;
|
||||
u64 paddr;
|
||||
struct setup_data *data;
|
||||
u32 len;
|
||||
|
||||
ret = kobj_to_setup_data_nr(kobj, &nr);
|
||||
if (ret)
|
||||
@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (data->type == SETUP_INDIRECT)
|
||||
ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type);
|
||||
else
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(paddr, len, MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
ret = sprintf(buf, "0x%x\n", indirect->type);
|
||||
} else {
|
||||
ret = sprintf(buf, "0x%x\n", data->type);
|
||||
}
|
||||
|
||||
memunmap(data);
|
||||
return ret;
|
||||
}
|
||||
@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp,
|
||||
char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
int nr, ret = 0;
|
||||
u64 paddr, len;
|
||||
struct setup_data *data;
|
||||
void *p;
|
||||
|
||||
ret = kobj_to_setup_data_nr(kobj, &nr);
|
||||
@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
paddr = ((struct setup_indirect *)data->data)->addr;
|
||||
len = ((struct setup_indirect *)data->data)->len;
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(paddr, len, MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
paddr = indirect->addr;
|
||||
len = indirect->len;
|
||||
} else {
|
||||
/*
|
||||
* Even though this is technically undefined, return
|
||||
* the data as though it is a normal setup_data struct.
|
||||
* This will at least allow it to be inspected.
|
||||
*/
|
||||
paddr += sizeof(*data);
|
||||
len = data->len;
|
||||
}
|
||||
} else {
|
||||
paddr += sizeof(*data);
|
||||
len = data->len;
|
||||
|
@ -273,6 +273,14 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
retpolines = s;
|
||||
}
|
||||
|
||||
/*
|
||||
* See alternative_instructions() for the ordering rules between the
|
||||
* various patching types.
|
||||
*/
|
||||
if (para) {
|
||||
void *pseg = (void *)para->sh_addr;
|
||||
apply_paravirt(pseg, pseg + para->sh_size);
|
||||
}
|
||||
if (retpolines) {
|
||||
void *rseg = (void *)retpolines->sh_addr;
|
||||
apply_retpolines(rseg, rseg + retpolines->sh_size);
|
||||
@ -290,11 +298,6 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
tseg, tseg + text->sh_size);
|
||||
}
|
||||
|
||||
if (para) {
|
||||
void *pseg = (void *)para->sh_addr;
|
||||
apply_paravirt(pseg, pseg + para->sh_size);
|
||||
}
|
||||
|
||||
/* make jump label nops */
|
||||
jump_label_apply_nops(me);
|
||||
|
||||
|
@ -369,21 +369,41 @@ static void __init parse_setup_data(void)
|
||||
|
||||
static void __init memblock_x86_reserve_range_setup_data(void)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 pa_data;
|
||||
u64 pa_data, pa_next;
|
||||
u32 len;
|
||||
|
||||
pa_data = boot_params.hdr.setup_data;
|
||||
while (pa_data) {
|
||||
data = early_memremap(pa_data, sizeof(*data));
|
||||
if (!data) {
|
||||
pr_warn("setup: failed to memremap setup_data entry\n");
|
||||
return;
|
||||
}
|
||||
|
||||
len = sizeof(*data);
|
||||
pa_next = data->next;
|
||||
|
||||
memblock_reserve(pa_data, sizeof(*data) + data->len);
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
|
||||
memblock_reserve(((struct setup_indirect *)data->data)->addr,
|
||||
((struct setup_indirect *)data->data)->len);
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len += data->len;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
data = early_memremap(pa_data, len);
|
||||
if (!data) {
|
||||
pr_warn("setup: failed to memremap indirect setup_data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pa_data = data->next;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT)
|
||||
memblock_reserve(indirect->addr, indirect->len);
|
||||
}
|
||||
|
||||
pa_data = pa_next;
|
||||
early_memunmap(data, len);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -659,6 +659,7 @@ static bool do_int3(struct pt_regs *regs)
|
||||
|
||||
return res == NOTIFY_STOP;
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_int3);
|
||||
|
||||
static void do_int3_user(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -615,6 +615,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr,
|
||||
static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 paddr, paddr_next;
|
||||
|
||||
@ -627,6 +628,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
|
||||
data = memremap(paddr, sizeof(*data),
|
||||
MEMREMAP_WB | MEMREMAP_DEC);
|
||||
if (!data) {
|
||||
pr_warn("failed to memremap setup_data entry\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
paddr_next = data->next;
|
||||
len = data->len;
|
||||
@ -636,10 +641,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
paddr = ((struct setup_indirect *)data->data)->addr;
|
||||
len = ((struct setup_indirect *)data->data)->len;
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
memunmap(data);
|
||||
data = memremap(paddr, sizeof(*data) + len,
|
||||
MEMREMAP_WB | MEMREMAP_DEC);
|
||||
if (!data) {
|
||||
pr_warn("failed to memremap indirect setup_data\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
paddr = indirect->addr;
|
||||
len = indirect->len;
|
||||
}
|
||||
}
|
||||
|
||||
memunmap(data);
|
||||
@ -660,22 +676,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 paddr, paddr_next;
|
||||
|
||||
paddr = boot_params.hdr.setup_data;
|
||||
while (paddr) {
|
||||
unsigned int len;
|
||||
unsigned int len, size;
|
||||
|
||||
if (phys_addr == paddr)
|
||||
return true;
|
||||
|
||||
data = early_memremap_decrypted(paddr, sizeof(*data));
|
||||
if (!data) {
|
||||
pr_warn("failed to early memremap setup_data entry\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
size = sizeof(*data);
|
||||
|
||||
paddr_next = data->next;
|
||||
len = data->len;
|
||||
|
||||
early_memunmap(data, sizeof(*data));
|
||||
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
|
||||
early_memunmap(data, sizeof(*data));
|
||||
return true;
|
||||
}
|
||||
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
size += len;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
data = early_memremap_decrypted(paddr, size);
|
||||
if (!data) {
|
||||
pr_warn("failed to early memremap indirect setup_data\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
paddr = indirect->addr;
|
||||
len = indirect->len;
|
||||
}
|
||||
}
|
||||
|
||||
early_memunmap(data, size);
|
||||
|
||||
if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
|
||||
return true;
|
||||
|
@ -1377,11 +1377,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
|
||||
if (info->valid & ACPI_VALID_HID) {
|
||||
acpi_add_id(pnp, info->hardware_id.string);
|
||||
pnp->type.platform_id = 1;
|
||||
if (info->valid & ACPI_VALID_CID) {
|
||||
cid_list = &info->compatible_id_list;
|
||||
for (i = 0; i < cid_list->count; i++)
|
||||
acpi_add_id(pnp, cid_list->ids[i].string);
|
||||
}
|
||||
}
|
||||
if (info->valid & ACPI_VALID_CID) {
|
||||
cid_list = &info->compatible_id_list;
|
||||
for (i = 0; i < cid_list->count; i++)
|
||||
acpi_add_id(pnp, cid_list->ids[i].string);
|
||||
}
|
||||
if (info->valid & ACPI_VALID_ADR) {
|
||||
pnp->bus_address = info->address;
|
||||
|
@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
|
||||
skb_data3 = skb->data[3];
|
||||
paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
|
||||
return enq_next;
|
||||
ENI_PRV_PADDR(skb) = paddr;
|
||||
/* prepare DMA queue entries */
|
||||
j = 0;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
|
||||
{
|
||||
unsigned int currsize = 0;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/* read random data from hardware */
|
||||
do {
|
||||
val = readl_relaxed(rng->base + PRNG_STATUS);
|
||||
if (!(val & PRNG_STATUS_DATA_AVAIL))
|
||||
break;
|
||||
ret = readl_poll_timeout(rng->base + PRNG_STATUS, val,
|
||||
val & PRNG_STATUS_DATA_AVAIL,
|
||||
200, 10000);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = readl_relaxed(rng->base + PRNG_DATA_OUT);
|
||||
if (!val)
|
||||
break;
|
||||
return -EINVAL;
|
||||
|
||||
if ((max - currsize) >= WORD_SZ) {
|
||||
memcpy(data, &val, WORD_SZ);
|
||||
@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
|
||||
} else {
|
||||
/* copy only remaining bytes */
|
||||
memcpy(data, &val, max - currsize);
|
||||
break;
|
||||
}
|
||||
} while (currsize < max);
|
||||
|
||||
return currsize;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_rng_generate(struct crypto_rng *tfm,
|
||||
@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm,
|
||||
mutex_unlock(&rng->lock);
|
||||
clk_disable_unprepare(rng->clk);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
|
||||
|
@ -24,7 +24,7 @@ static bool dump_properties __initdata;
|
||||
static int __init dump_properties_enable(char *arg)
|
||||
{
|
||||
dump_properties = true;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("dump_apple_properties", dump_properties_enable);
|
||||
|
@ -212,7 +212,7 @@ static int __init efivar_ssdt_setup(char *str)
|
||||
memcpy(efivar_ssdt, str, strlen(str));
|
||||
else
|
||||
pr_warn("efivar_ssdt: name too long: %s\n", str);
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("efivar_ssdt=", efivar_ssdt_setup);
|
||||
|
||||
|
@ -1701,6 +1701,11 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
|
||||
*/
|
||||
int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset)
|
||||
{
|
||||
#ifdef CONFIG_PINCTRL
|
||||
if (list_empty(&gc->gpiodev->pin_ranges))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return pinctrl_gpio_request(gc->gpiodev->base + offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
|
||||
@ -1712,6 +1717,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
|
||||
*/
|
||||
void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset)
|
||||
{
|
||||
#ifdef CONFIG_PINCTRL
|
||||
if (list_empty(&gc->gpiodev->pin_ranges))
|
||||
return;
|
||||
#endif
|
||||
|
||||
pinctrl_gpio_free(gc->gpiodev->base + offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
|
||||
|
@ -1406,6 +1406,13 @@ static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private
|
||||
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
|
||||
}
|
||||
|
||||
static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return IS_ALDERLAKE_P(dev_priv) ?
|
||||
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
|
||||
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
|
||||
}
|
||||
|
||||
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
@ -1510,7 +1517,13 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 val = PSR2_MAN_TRK_CTL_ENABLE;
|
||||
u32 val = 0;
|
||||
|
||||
if (!IS_ALDERLAKE_P(dev_priv))
|
||||
val = PSR2_MAN_TRK_CTL_ENABLE;
|
||||
|
||||
/* SF partial frame enable has to be set even on full update */
|
||||
val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
|
||||
|
||||
if (full_update) {
|
||||
/*
|
||||
@ -1530,7 +1543,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
|
||||
} else {
|
||||
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
|
||||
|
||||
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
|
||||
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
|
||||
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
|
||||
}
|
||||
|
@ -4829,6 +4829,7 @@ enum {
|
||||
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
|
||||
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0)
|
||||
#define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
|
||||
#define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31)
|
||||
#define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14)
|
||||
#define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13)
|
||||
|
||||
|
@ -106,6 +106,7 @@ config DRM_PANEL_EDP
|
||||
depends on PM
|
||||
select VIDEOMODE_HELPERS
|
||||
select DRM_DP_AUX_BUS
|
||||
select DRM_DP_HELPER
|
||||
help
|
||||
DRM panel driver for dumb eDP panels that need at most a regulator and
|
||||
a GPIO to be powered up. Optionally a backlight can be attached so
|
||||
|
@ -111,10 +111,10 @@
|
||||
/* format 13 is semi-planar YUV411 VUVU */
|
||||
#define SUN8I_MIXER_FBFMT_YUV411 14
|
||||
/* format 15 doesn't exist */
|
||||
/* format 16 is P010 YVU */
|
||||
#define SUN8I_MIXER_FBFMT_P010_YUV 17
|
||||
/* format 18 is P210 YVU */
|
||||
#define SUN8I_MIXER_FBFMT_P210_YUV 19
|
||||
#define SUN8I_MIXER_FBFMT_P010_YUV 16
|
||||
/* format 17 is P010 YVU */
|
||||
#define SUN8I_MIXER_FBFMT_P210_YUV 18
|
||||
/* format 19 is P210 YVU */
|
||||
/* format 20 is packed YVU444 10-bit */
|
||||
/* format 21 is packed YUV444 10-bit */
|
||||
|
||||
|
@ -1908,7 +1908,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
|
||||
|
||||
cb_data.card = card;
|
||||
cb_data.status = 0;
|
||||
err = __mmc_poll_for_busy(card->host, MMC_BLK_TIMEOUT_MS,
|
||||
err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
|
||||
&mmc_blk_busy_cb, &cb_data);
|
||||
|
||||
/*
|
||||
|
@ -1962,7 +1962,7 @@ static int mmc_sleep(struct mmc_host *host)
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
err = __mmc_poll_for_busy(host, timeout_ms, &mmc_sleep_busy_cb, host);
|
||||
err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
|
||||
|
||||
out_release:
|
||||
mmc_retune_release(host);
|
||||
|
@ -21,6 +21,8 @@
|
||||
|
||||
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
|
||||
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
|
||||
#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
|
||||
#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
|
||||
|
||||
static const u8 tuning_blk_pattern_4bit[] = {
|
||||
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
|
||||
@ -232,7 +234,9 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
|
||||
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
|
||||
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
|
||||
|
||||
err = __mmc_poll_for_busy(host, 1000, &__mmc_send_op_cond_cb, &cb_data);
|
||||
err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
|
||||
MMC_OP_COND_TIMEOUT_MS,
|
||||
&__mmc_send_op_cond_cb, &cb_data);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -495,13 +499,14 @@ static int mmc_busy_cb(void *cb_data, bool *busy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
|
||||
int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
|
||||
unsigned int timeout_ms,
|
||||
int (*busy_cb)(void *cb_data, bool *busy),
|
||||
void *cb_data)
|
||||
{
|
||||
int err;
|
||||
unsigned long timeout;
|
||||
unsigned int udelay = 32, udelay_max = 32768;
|
||||
unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
|
||||
bool expired = false;
|
||||
bool busy = false;
|
||||
|
||||
@ -546,7 +551,7 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
||||
cb_data.retry_crc_err = retry_crc_err;
|
||||
cb_data.busy_cmd = busy_cmd;
|
||||
|
||||
return __mmc_poll_for_busy(host, timeout_ms, &mmc_busy_cb, &cb_data);
|
||||
return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
|
||||
|
||||
|
@ -41,7 +41,8 @@ int mmc_can_ext_csd(struct mmc_card *card);
|
||||
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
|
||||
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
|
||||
unsigned int timeout_ms);
|
||||
int __mmc_poll_for_busy(struct mmc_host *host, unsigned int timeout_ms,
|
||||
int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
|
||||
unsigned int timeout_ms,
|
||||
int (*busy_cb)(void *cb_data, bool *busy),
|
||||
void *cb_data);
|
||||
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
||||
|
@ -1672,7 +1672,7 @@ static int sd_poweroff_notify(struct mmc_card *card)
|
||||
|
||||
cb_data.card = card;
|
||||
cb_data.reg_buf = reg_buf;
|
||||
err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
|
||||
err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
|
||||
&sd_busy_poweroff_notify_cb, &cb_data);
|
||||
|
||||
out:
|
||||
|
@ -173,6 +173,8 @@ struct meson_host {
|
||||
int irq;
|
||||
|
||||
bool vqmmc_enabled;
|
||||
bool needs_pre_post_req;
|
||||
|
||||
};
|
||||
|
||||
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
|
||||
@ -663,6 +665,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc,
|
||||
struct meson_host *host = mmc_priv(mmc);
|
||||
|
||||
host->cmd = NULL;
|
||||
if (host->needs_pre_post_req)
|
||||
meson_mmc_post_req(mmc, mrq, 0);
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
}
|
||||
|
||||
@ -880,7 +884,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data
|
||||
static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
{
|
||||
struct meson_host *host = mmc_priv(mmc);
|
||||
bool needs_pre_post_req = mrq->data &&
|
||||
host->needs_pre_post_req = mrq->data &&
|
||||
!(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
|
||||
|
||||
/*
|
||||
@ -896,22 +900,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
}
|
||||
}
|
||||
|
||||
if (needs_pre_post_req) {
|
||||
if (host->needs_pre_post_req) {
|
||||
meson_mmc_get_transfer_mode(mmc, mrq);
|
||||
if (!meson_mmc_desc_chain_mode(mrq->data))
|
||||
needs_pre_post_req = false;
|
||||
host->needs_pre_post_req = false;
|
||||
}
|
||||
|
||||
if (needs_pre_post_req)
|
||||
if (host->needs_pre_post_req)
|
||||
meson_mmc_pre_req(mmc, mrq);
|
||||
|
||||
/* Stop execution */
|
||||
writel(0, host->regs + SD_EMMC_START);
|
||||
|
||||
meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
|
||||
|
||||
if (needs_pre_post_req)
|
||||
meson_mmc_post_req(mmc, mrq, 0);
|
||||
}
|
||||
|
||||
static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
|
||||
|
@ -122,12 +122,23 @@ static const struct of_device_id ksz8795_dt_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
|
||||
|
||||
static const struct spi_device_id ksz8795_spi_ids[] = {
|
||||
{ "ksz8765" },
|
||||
{ "ksz8794" },
|
||||
{ "ksz8795" },
|
||||
{ "ksz8863" },
|
||||
{ "ksz8873" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids);
|
||||
|
||||
static struct spi_driver ksz8795_spi_driver = {
|
||||
.driver = {
|
||||
.name = "ksz8795-switch",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(ksz8795_dt_ids),
|
||||
},
|
||||
.id_table = ksz8795_spi_ids,
|
||||
.probe = ksz8795_spi_probe,
|
||||
.remove = ksz8795_spi_remove,
|
||||
.shutdown = ksz8795_spi_shutdown,
|
||||
|
@ -96,12 +96,24 @@ static const struct of_device_id ksz9477_dt_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
|
||||
|
||||
static const struct spi_device_id ksz9477_spi_ids[] = {
|
||||
{ "ksz9477" },
|
||||
{ "ksz9897" },
|
||||
{ "ksz9893" },
|
||||
{ "ksz9563" },
|
||||
{ "ksz8563" },
|
||||
{ "ksz9567" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids);
|
||||
|
||||
static struct spi_driver ksz9477_spi_driver = {
|
||||
.driver = {
|
||||
.name = "ksz9477-switch",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(ksz9477_dt_ids),
|
||||
},
|
||||
.id_table = ksz9477_spi_ids,
|
||||
.probe = ksz9477_spi_probe,
|
||||
.remove = ksz9477_spi_remove,
|
||||
.shutdown = ksz9477_spi_shutdown,
|
||||
|
@ -1181,8 +1181,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
|
||||
alx->hw.mtu = mtu;
|
||||
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
|
||||
netdev_update_features(netdev);
|
||||
if (netif_running(netdev))
|
||||
if (netif_running(netdev)) {
|
||||
mutex_lock(&alx->mtx);
|
||||
alx_reinit(alx);
|
||||
mutex_unlock(&alx->mtx);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2533,6 +2533,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
|
||||
* Meant for implicit re-load flows.
|
||||
*/
|
||||
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
|
||||
int bnx2x_init_firmware(struct bnx2x *bp);
|
||||
void bnx2x_release_firmware(struct bnx2x *bp);
|
||||
#endif /* bnx2x.h */
|
||||
|
@ -2364,24 +2364,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
|
||||
/* is another pf loaded on this engine? */
|
||||
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
|
||||
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
|
||||
/* build my FW version dword */
|
||||
u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
|
||||
(bp->fw_rev << 16) + (bp->fw_eng << 24);
|
||||
u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
|
||||
u32 loaded_fw;
|
||||
|
||||
/* read loaded FW from chip */
|
||||
u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
|
||||
loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
|
||||
|
||||
DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
|
||||
loaded_fw, my_fw);
|
||||
loaded_fw_major = loaded_fw & 0xff;
|
||||
loaded_fw_minor = (loaded_fw >> 8) & 0xff;
|
||||
loaded_fw_rev = (loaded_fw >> 16) & 0xff;
|
||||
loaded_fw_eng = (loaded_fw >> 24) & 0xff;
|
||||
|
||||
DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
|
||||
loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
|
||||
|
||||
/* abort nic load if version mismatch */
|
||||
if (my_fw != loaded_fw) {
|
||||
if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
|
||||
loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
|
||||
loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
|
||||
loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
|
||||
if (print_err)
|
||||
BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
|
||||
loaded_fw, my_fw);
|
||||
BNX2X_ERR("loaded FW incompatible. Aborting\n");
|
||||
else
|
||||
BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
|
||||
loaded_fw, my_fw);
|
||||
BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
@ -12319,15 +12319,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
||||
|
||||
bnx2x_read_fwinfo(bp);
|
||||
|
||||
if (IS_PF(bp)) {
|
||||
rc = bnx2x_init_firmware(bp);
|
||||
|
||||
if (rc) {
|
||||
bnx2x_free_mem_bp(bp);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
func = BP_FUNC(bp);
|
||||
|
||||
/* need to reset chip if undi was active */
|
||||
@ -12340,7 +12331,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
||||
|
||||
rc = bnx2x_prev_unload(bp);
|
||||
if (rc) {
|
||||
bnx2x_release_firmware(bp);
|
||||
bnx2x_free_mem_bp(bp);
|
||||
return rc;
|
||||
}
|
||||
@ -13409,7 +13399,7 @@ do { \
|
||||
(u8 *)bp->arr, len); \
|
||||
} while (0)
|
||||
|
||||
int bnx2x_init_firmware(struct bnx2x *bp)
|
||||
static int bnx2x_init_firmware(struct bnx2x *bp)
|
||||
{
|
||||
const char *fw_file_name, *fw_file_name_v15;
|
||||
struct bnx2x_fw_file_hdr *fw_hdr;
|
||||
@ -13509,7 +13499,7 @@ request_firmware_exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnx2x_release_firmware(struct bnx2x *bp)
|
||||
static void bnx2x_release_firmware(struct bnx2x *bp)
|
||||
{
|
||||
kfree(bp->init_ops_offsets);
|
||||
kfree(bp->init_ops);
|
||||
@ -14026,7 +14016,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
init_one_freemem:
|
||||
bnx2x_release_firmware(bp);
|
||||
bnx2x_free_mem_bp(bp);
|
||||
|
||||
init_one_exit:
|
||||
|
@ -2287,8 +2287,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
|
||||
dma_length_status = status->length_status;
|
||||
if (dev->features & NETIF_F_RXCSUM) {
|
||||
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
|
||||
skb->csum = (__force __wsum)ntohs(rx_csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
if (rx_csum) {
|
||||
skb->csum = (__force __wsum)ntohs(rx_csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
}
|
||||
|
||||
/* DMA flags and length are still valid no matter how
|
||||
|
@ -2705,6 +2705,13 @@ restart_watchdog:
|
||||
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_disable_vf - disable VF
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Set communication failed flag and free all resources.
|
||||
* NOTE: This function is expected to be called with crit_lock being held.
|
||||
**/
|
||||
static void iavf_disable_vf(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_mac_filter *f, *ftmp;
|
||||
@ -2759,7 +2766,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
|
||||
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
|
||||
iavf_shutdown_adminq(&adapter->hw);
|
||||
adapter->netdev->flags &= ~IFF_UP;
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
|
||||
iavf_change_state(adapter, __IAVF_DOWN);
|
||||
wake_up(&adapter->down_waitqueue);
|
||||
@ -4778,6 +4784,13 @@ static void iavf_remove(struct pci_dev *pdev)
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
int err;
|
||||
|
||||
/* When reboot/shutdown is in progress no need to do anything
|
||||
* as the adapter is already REMOVE state that was set during
|
||||
* iavf_shutdown() callback.
|
||||
*/
|
||||
if (adapter->state == __IAVF_REMOVE)
|
||||
return;
|
||||
|
||||
set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
|
||||
/* Wait until port initialization is complete.
|
||||
* There are flows where register/unregister netdev may race.
|
||||
|
@ -4921,7 +4921,6 @@ static void ice_remove(struct pci_dev *pdev)
|
||||
ice_devlink_unregister_params(pf);
|
||||
set_bit(ICE_DOWN, pf->state);
|
||||
|
||||
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
|
||||
ice_deinit_lag(pf);
|
||||
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
|
||||
ice_ptp_release(pf);
|
||||
@ -4931,6 +4930,7 @@ static void ice_remove(struct pci_dev *pdev)
|
||||
ice_remove_arfs(pf);
|
||||
ice_setup_mc_magic_wake(pf);
|
||||
ice_vsi_release_all(pf);
|
||||
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
|
||||
ice_set_wake(pf);
|
||||
ice_free_irq_msix_misc(pf);
|
||||
ice_for_each_vsi(pf, i) {
|
||||
@ -6179,8 +6179,9 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
|
||||
u64 pkts = 0, bytes = 0;
|
||||
|
||||
ring = READ_ONCE(rings[i]);
|
||||
if (ring)
|
||||
ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
|
||||
if (!ring)
|
||||
continue;
|
||||
ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
|
||||
vsi_stats->tx_packets += pkts;
|
||||
vsi_stats->tx_bytes += bytes;
|
||||
vsi->tx_restart += ring->tx_stats.restart_q;
|
||||
|
@ -61,6 +61,12 @@ static int ocelot_chain_to_block(int chain, bool ingress)
|
||||
*/
|
||||
static int ocelot_chain_to_lookup(int chain)
|
||||
{
|
||||
/* Backwards compatibility with older, single-chain tc-flower
|
||||
* offload support in Ocelot
|
||||
*/
|
||||
if (chain == 0)
|
||||
return 0;
|
||||
|
||||
return (chain / VCAP_LOOKUP) % 10;
|
||||
}
|
||||
|
||||
@ -69,7 +75,15 @@ static int ocelot_chain_to_lookup(int chain)
|
||||
*/
|
||||
static int ocelot_chain_to_pag(int chain)
|
||||
{
|
||||
int lookup = ocelot_chain_to_lookup(chain);
|
||||
int lookup;
|
||||
|
||||
/* Backwards compatibility with older, single-chain tc-flower
|
||||
* offload support in Ocelot
|
||||
*/
|
||||
if (chain == 0)
|
||||
return 0;
|
||||
|
||||
lookup = ocelot_chain_to_lookup(chain);
|
||||
|
||||
/* calculate PAG value as chain index relative to the first PAG */
|
||||
return chain - VCAP_IS2_CHAIN(lookup, 0);
|
||||
|
@ -1587,6 +1587,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
|
||||
pcpu_sum = kvmalloc_array(num_possible_cpus(),
|
||||
sizeof(struct netvsc_ethtool_pcpu_stats),
|
||||
GFP_KERNEL);
|
||||
if (!pcpu_sum)
|
||||
return;
|
||||
|
||||
netvsc_get_pcpu_stats(dev, pcpu_sum);
|
||||
for_each_present_cpu(cpu) {
|
||||
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
|
||||
|
@ -187,6 +187,13 @@ static const struct regmap_config mscc_miim_regmap_config = {
|
||||
.reg_stride = 4,
|
||||
};
|
||||
|
||||
static const struct regmap_config mscc_miim_phy_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.name = "phy",
|
||||
};
|
||||
|
||||
int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
|
||||
struct regmap *mii_regmap, int status_offset)
|
||||
{
|
||||
@ -250,7 +257,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs,
|
||||
&mscc_miim_regmap_config);
|
||||
&mscc_miim_phy_regmap_config);
|
||||
if (IS_ERR(phy_regmap)) {
|
||||
dev_err(&pdev->dev, "Unable to create phy register regmap\n");
|
||||
return PTR_ERR(phy_regmap);
|
||||
|
@ -1687,8 +1687,8 @@ static int marvell_suspend(struct phy_device *phydev)
|
||||
int err;
|
||||
|
||||
/* Suspend the fiber mode first */
|
||||
if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
|
||||
phydev->supported)) {
|
||||
if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
|
||||
phydev->supported)) {
|
||||
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
@ -1722,8 +1722,8 @@ static int marvell_resume(struct phy_device *phydev)
|
||||
int err;
|
||||
|
||||
/* Resume the fiber mode first */
|
||||
if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
|
||||
phydev->supported)) {
|
||||
if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
|
||||
phydev->supported)) {
|
||||
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
|
@ -2685,3 +2685,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
|
||||
MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver");
|
||||
MODULE_AUTHOR("Nagaraju Lakkaraju");
|
||||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
|
||||
MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW);
|
||||
MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW);
|
||||
|
@ -2611,36 +2611,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
ath10k_mac_handle_beacon(ar, skb);
|
||||
|
||||
if (ieee80211_is_beacon(hdr->frame_control) ||
|
||||
ieee80211_is_probe_resp(hdr->frame_control)) {
|
||||
struct ieee80211_mgmt *mgmt = (void *)skb->data;
|
||||
enum cfg80211_bss_frame_type ftype;
|
||||
u8 *ies;
|
||||
int ies_ch;
|
||||
|
||||
ieee80211_is_probe_resp(hdr->frame_control))
|
||||
status->boottime_ns = ktime_get_boottime_ns();
|
||||
|
||||
if (!ar->scan_channel)
|
||||
goto drop;
|
||||
|
||||
ies = mgmt->u.beacon.variable;
|
||||
|
||||
if (ieee80211_is_beacon(mgmt->frame_control))
|
||||
ftype = CFG80211_BSS_FTYPE_BEACON;
|
||||
else
|
||||
ftype = CFG80211_BSS_FTYPE_PRESP;
|
||||
|
||||
ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable,
|
||||
skb_tail_pointer(skb) - ies,
|
||||
sband->band, ftype);
|
||||
|
||||
if (ies_ch > 0 && ies_ch != channel) {
|
||||
ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
||||
"channel mismatched ds channel %d scan channel %d\n",
|
||||
ies_ch, channel);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
||||
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
|
||||
skb, skb->len,
|
||||
@ -2654,10 +2627,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
ieee80211_rx_ni(ar->hw, skb);
|
||||
|
||||
return 0;
|
||||
|
||||
drop:
|
||||
dev_kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int freq_to_idx(struct ath10k *ar, int freq)
|
||||
|
@ -1170,7 +1170,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (msg.size == 0) {
|
||||
if ((msg.type == VHOST_IOTLB_UPDATE ||
|
||||
msg.type == VHOST_IOTLB_INVALIDATE) &&
|
||||
msg.size == 0) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
@ -753,7 +753,8 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
|
||||
|
||||
/* Iterating over all connections for all CIDs to find orphans is
|
||||
* inefficient. Room for improvement here. */
|
||||
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
|
||||
vsock_for_each_connected_socket(&vhost_transport.transport,
|
||||
vhost_vsock_reset_orphans);
|
||||
|
||||
/* Don't check the owner, because we are in the release path, so we
|
||||
* need to stop the vsock device in any case.
|
||||
|
@ -703,7 +703,7 @@ static int afs_writepages_region(struct address_space *mapping,
|
||||
struct folio *folio;
|
||||
struct page *head_page;
|
||||
ssize_t ret;
|
||||
int n;
|
||||
int n, skips = 0;
|
||||
|
||||
_enter("%llx,%llx,", start, end);
|
||||
|
||||
@ -754,8 +754,15 @@ static int afs_writepages_region(struct address_space *mapping,
|
||||
#ifdef CONFIG_AFS_FSCACHE
|
||||
folio_wait_fscache(folio);
|
||||
#endif
|
||||
} else {
|
||||
start += folio_size(folio);
|
||||
}
|
||||
folio_put(folio);
|
||||
if (wbc->sync_mode == WB_SYNC_NONE) {
|
||||
if (skips >= 5 || need_resched())
|
||||
break;
|
||||
skips++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,11 @@ struct cachefiles_xattr {
|
||||
static const char cachefiles_xattr_cache[] =
|
||||
XATTR_USER_PREFIX "CacheFiles.cache";
|
||||
|
||||
struct cachefiles_vol_xattr {
|
||||
__be32 reserved; /* Reserved, should be 0 */
|
||||
__u8 data[]; /* netfs volume coherency data */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* set the state xattr on a cache file
|
||||
*/
|
||||
@ -185,6 +190,7 @@ void cachefiles_prepare_to_write(struct fscache_cookie *cookie)
|
||||
*/
|
||||
bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
|
||||
{
|
||||
struct cachefiles_vol_xattr *buf;
|
||||
unsigned int len = volume->vcookie->coherency_len;
|
||||
const void *p = volume->vcookie->coherency;
|
||||
struct dentry *dentry = volume->dentry;
|
||||
@ -192,10 +198,17 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
|
||||
|
||||
_enter("%x,#%d", volume->vcookie->debug_id, len);
|
||||
|
||||
len += sizeof(*buf);
|
||||
buf = kmalloc(len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return false;
|
||||
buf->reserved = cpu_to_be32(0);
|
||||
memcpy(buf->data, p, len);
|
||||
|
||||
ret = cachefiles_inject_write_error();
|
||||
if (ret == 0)
|
||||
ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
|
||||
p, len, 0);
|
||||
buf, len, 0);
|
||||
if (ret < 0) {
|
||||
trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
|
||||
cachefiles_trace_setxattr_error);
|
||||
@ -209,6 +222,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
|
||||
cachefiles_coherency_vol_set_ok);
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
_leave(" = %d", ret);
|
||||
return ret == 0;
|
||||
}
|
||||
@ -218,7 +232,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
|
||||
*/
|
||||
int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
|
||||
{
|
||||
struct cachefiles_xattr *buf;
|
||||
struct cachefiles_vol_xattr *buf;
|
||||
struct dentry *dentry = volume->dentry;
|
||||
unsigned int len = volume->vcookie->coherency_len;
|
||||
const void *p = volume->vcookie->coherency;
|
||||
@ -228,6 +242,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
|
||||
|
||||
_enter("");
|
||||
|
||||
len += sizeof(*buf);
|
||||
buf = kmalloc(len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
@ -245,7 +260,9 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
|
||||
"Failed to read xattr with error %zd", xlen);
|
||||
}
|
||||
why = cachefiles_coherency_vol_check_xattr;
|
||||
} else if (memcmp(buf->data, p, len) != 0) {
|
||||
} else if (buf->reserved != cpu_to_be32(0)) {
|
||||
why = cachefiles_coherency_vol_check_resv;
|
||||
} else if (memcmp(buf->data, p, len - sizeof(*buf)) != 0) {
|
||||
why = cachefiles_coherency_vol_check_cmp;
|
||||
} else {
|
||||
why = cachefiles_coherency_vol_check_ok;
|
||||
|
@ -1105,17 +1105,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto read_super_error;
|
||||
}
|
||||
|
||||
root = d_make_root(inode);
|
||||
if (!root) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
goto read_super_error;
|
||||
}
|
||||
|
||||
sb->s_root = root;
|
||||
|
||||
ocfs2_complete_mount_recovery(osb);
|
||||
|
||||
osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
|
||||
&ocfs2_kset->kobj);
|
||||
if (!osb->osb_dev_kset) {
|
||||
@ -1133,6 +1122,17 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto read_super_error;
|
||||
}
|
||||
|
||||
root = d_make_root(inode);
|
||||
if (!root) {
|
||||
status = -ENOMEM;
|
||||
mlog_errno(status);
|
||||
goto read_super_error;
|
||||
}
|
||||
|
||||
sb->s_root = root;
|
||||
|
||||
ocfs2_complete_mount_recovery(osb);
|
||||
|
||||
if (ocfs2_mount_local(osb))
|
||||
snprintf(nodestr, sizeof(nodestr), "local");
|
||||
else
|
||||
|
11
fs/pipe.c
11
fs/pipe.c
@ -253,7 +253,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
|
||||
*/
|
||||
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
|
||||
for (;;) {
|
||||
unsigned int head = pipe->head;
|
||||
/* Read ->head with a barrier vs post_one_notification() */
|
||||
unsigned int head = smp_load_acquire(&pipe->head);
|
||||
unsigned int tail = pipe->tail;
|
||||
unsigned int mask = pipe->ring_size - 1;
|
||||
|
||||
@ -831,10 +832,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_WATCH_QUEUE
|
||||
if (pipe->watch_queue) {
|
||||
if (pipe->watch_queue)
|
||||
watch_queue_clear(pipe->watch_queue);
|
||||
put_watch_queue(pipe->watch_queue);
|
||||
}
|
||||
#endif
|
||||
|
||||
(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
|
||||
@ -844,6 +843,10 @@ void free_pipe_info(struct pipe_inode_info *pipe)
|
||||
if (buf->ops)
|
||||
pipe_buf_release(pipe, buf);
|
||||
}
|
||||
#ifdef CONFIG_WATCH_QUEUE
|
||||
if (pipe->watch_queue)
|
||||
put_watch_queue(pipe->watch_queue);
|
||||
#endif
|
||||
if (pipe->tmp_page)
|
||||
__free_page(pipe->tmp_page);
|
||||
kfree(pipe->bufs);
|
||||
|
@ -52,6 +52,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
|
||||
case ARPHRD_VOID:
|
||||
case ARPHRD_NONE:
|
||||
case ARPHRD_RAWIP:
|
||||
case ARPHRD_PIMREG:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
|
@ -28,7 +28,8 @@ struct watch_type_filter {
|
||||
struct watch_filter {
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
unsigned long type_filter[2]; /* Bitmask of accepted types */
|
||||
/* Bitmask of accepted types */
|
||||
DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
|
||||
};
|
||||
u32 nr_filters; /* Number of filters */
|
||||
struct watch_type_filter filters[];
|
||||
|
@ -205,7 +205,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
|
||||
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
|
||||
struct sockaddr_vm *dst);
|
||||
void vsock_remove_sock(struct vsock_sock *vsk);
|
||||
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
|
||||
void vsock_for_each_connected_socket(struct vsock_transport *transport,
|
||||
void (*fn)(struct sock *sk));
|
||||
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
|
||||
bool vsock_find_cid(unsigned int cid);
|
||||
|
||||
|
@ -97,7 +97,6 @@ struct nf_conn {
|
||||
unsigned long status;
|
||||
|
||||
u16 cpu;
|
||||
u16 local_origin:1;
|
||||
possible_net_t ct_net;
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
|
@ -56,6 +56,7 @@ enum cachefiles_coherency_trace {
|
||||
cachefiles_coherency_set_ok,
|
||||
cachefiles_coherency_vol_check_cmp,
|
||||
cachefiles_coherency_vol_check_ok,
|
||||
cachefiles_coherency_vol_check_resv,
|
||||
cachefiles_coherency_vol_check_xattr,
|
||||
cachefiles_coherency_vol_set_fail,
|
||||
cachefiles_coherency_vol_set_ok,
|
||||
@ -139,6 +140,7 @@ enum cachefiles_error_trace {
|
||||
EM(cachefiles_coherency_set_ok, "SET ok ") \
|
||||
EM(cachefiles_coherency_vol_check_cmp, "VOL BAD cmp ") \
|
||||
EM(cachefiles_coherency_vol_check_ok, "VOL OK ") \
|
||||
EM(cachefiles_coherency_vol_check_resv, "VOL BAD resv") \
|
||||
EM(cachefiles_coherency_vol_check_xattr,"VOL BAD xatt") \
|
||||
EM(cachefiles_coherency_vol_set_fail, "VOL SET fail") \
|
||||
E_(cachefiles_coherency_vol_set_ok, "VOL SET ok ")
|
||||
|
@ -16,6 +16,7 @@ CONFIG_SYMBOLIC_ERRNAME=y
|
||||
#
|
||||
# Compile-time checks and compiler options
|
||||
#
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||
CONFIG_FRAME_WARN=2048
|
||||
|
@ -7790,7 +7790,7 @@ int ftrace_is_dead(void)
|
||||
|
||||
/**
|
||||
* register_ftrace_function - register a function for profiling
|
||||
* @ops - ops structure that holds the function for profiling.
|
||||
* @ops: ops structure that holds the function for profiling.
|
||||
*
|
||||
* Register a function to be called by all functions in the
|
||||
* kernel.
|
||||
@ -7817,7 +7817,7 @@ EXPORT_SYMBOL_GPL(register_ftrace_function);
|
||||
|
||||
/**
|
||||
* unregister_ftrace_function - unregister a function for profiling.
|
||||
* @ops - ops structure that holds the function to unregister
|
||||
* @ops: ops structure that holds the function to unregister
|
||||
*
|
||||
* Unregister a function that was added to be called by ftrace profiling.
|
||||
*/
|
||||
|
@ -1386,6 +1386,26 @@ static int run_osnoise(void)
|
||||
osnoise_stop_tracing();
|
||||
}
|
||||
|
||||
/*
|
||||
* In some cases, notably when running on a nohz_full CPU with
|
||||
* a stopped tick PREEMPT_RCU has no way to account for QSs.
|
||||
* This will eventually cause unwarranted noise as PREEMPT_RCU
|
||||
* will force preemption as the means of ending the current
|
||||
* grace period. We avoid this problem by calling
|
||||
* rcu_momentary_dyntick_idle(), which performs a zero duration
|
||||
* EQS allowing PREEMPT_RCU to end the current grace period.
|
||||
* This call shouldn't be wrapped inside an RCU critical
|
||||
* section.
|
||||
*
|
||||
* Note that in non PREEMPT_RCU kernels QSs are handled through
|
||||
* cond_resched()
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
|
||||
local_irq_disable();
|
||||
rcu_momentary_dyntick_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* For the non-preemptive kernel config: let threads runs, if
|
||||
* they so wish.
|
||||
@ -2200,6 +2220,17 @@ static void osnoise_workload_stop(void)
|
||||
if (osnoise_has_registered_instances())
|
||||
return;
|
||||
|
||||
/*
|
||||
* If callbacks were already disabled in a previous stop
|
||||
* call, there is no need to disable then again.
|
||||
*
|
||||
* For instance, this happens when tracing is stopped via:
|
||||
* echo 0 > tracing_on
|
||||
* echo nop > current_tracer.
|
||||
*/
|
||||
if (!trace_osnoise_callback_enabled)
|
||||
return;
|
||||
|
||||
trace_osnoise_callback_enabled = false;
|
||||
/*
|
||||
* Make sure that ftrace_nmi_enter/exit() see
|
||||
|
@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
|
||||
bit += page->index;
|
||||
|
||||
set_bit(bit, wqueue->notes_bitmap);
|
||||
generic_pipe_buf_release(pipe, buf);
|
||||
}
|
||||
|
||||
// No try_steal function => no stealing
|
||||
@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue,
|
||||
buf->offset = offset;
|
||||
buf->len = len;
|
||||
buf->flags = PIPE_BUF_FLAG_WHOLE;
|
||||
pipe->head = head + 1;
|
||||
smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
|
||||
|
||||
if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
|
||||
spin_unlock_irq(&pipe->rd_wait.lock);
|
||||
@ -219,7 +220,6 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
|
||||
struct page **pages;
|
||||
unsigned long *bitmap;
|
||||
unsigned long user_bufs;
|
||||
unsigned int bmsize;
|
||||
int ret, i, nr_pages;
|
||||
|
||||
if (!wqueue)
|
||||
@ -243,7 +243,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = pipe_resize_ring(pipe, nr_notes);
|
||||
nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
|
||||
ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
@ -258,17 +259,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
|
||||
pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
|
||||
}
|
||||
|
||||
bmsize = (nr_notes + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
bmsize *= sizeof(unsigned long);
|
||||
bitmap = kmalloc(bmsize, GFP_KERNEL);
|
||||
bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
|
||||
if (!bitmap)
|
||||
goto error_p;
|
||||
|
||||
memset(bitmap, 0xff, bmsize);
|
||||
bitmap_fill(bitmap, nr_notes);
|
||||
wqueue->notes = pages;
|
||||
wqueue->notes_bitmap = bitmap;
|
||||
wqueue->nr_pages = nr_pages;
|
||||
wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
|
||||
wqueue->nr_notes = nr_notes;
|
||||
return 0;
|
||||
|
||||
error_p:
|
||||
@ -320,7 +319,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
|
||||
tf[i].info_mask & WATCH_INFO_LENGTH)
|
||||
goto err_filter;
|
||||
/* Ignore any unknown types */
|
||||
if (tf[i].type >= sizeof(wfilter->type_filter) * 8)
|
||||
if (tf[i].type >= WATCH_TYPE__NR)
|
||||
continue;
|
||||
nr_filter++;
|
||||
}
|
||||
@ -336,7 +335,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
|
||||
|
||||
q = wfilter->filters;
|
||||
for (i = 0; i < filter.nr_filters; i++) {
|
||||
if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG)
|
||||
if (tf[i].type >= WATCH_TYPE__NR)
|
||||
continue;
|
||||
|
||||
q->type = tf[i].type;
|
||||
@ -371,6 +370,7 @@ static void __put_watch_queue(struct kref *kref)
|
||||
|
||||
for (i = 0; i < wqueue->nr_pages; i++)
|
||||
__free_page(wqueue->notes[i]);
|
||||
bitmap_free(wqueue->notes_bitmap);
|
||||
|
||||
wfilter = rcu_access_pointer(wqueue->filter);
|
||||
if (wfilter)
|
||||
@ -566,7 +566,7 @@ void watch_queue_clear(struct watch_queue *wqueue)
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&wqueue->lock);
|
||||
|
||||
/* Prevent new additions and prevent notifications from happening */
|
||||
/* Prevent new notifications from being stored. */
|
||||
wqueue->defunct = true;
|
||||
|
||||
while (!hlist_empty(&wqueue->watches)) {
|
||||
|
@ -478,7 +478,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
|
||||
* in swap_map, but not yet added its page to swap cache.
|
||||
*/
|
||||
cond_resched();
|
||||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1497,6 +1497,7 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
|
||||
const char *user_protocol;
|
||||
|
||||
master = of_find_net_device_by_node(ethernet);
|
||||
of_node_put(ethernet);
|
||||
if (!master)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
|
@ -812,8 +812,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
|
||||
struct tcphdr *th;
|
||||
|
||||
offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
|
||||
|
||||
if (offset < 0) {
|
||||
if (offset == -1) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1475,8 +1475,8 @@ static int __ip6_append_data(struct sock *sk,
|
||||
sizeof(struct frag_hdr) : 0) +
|
||||
rt->rt6i_nfheader_len;
|
||||
|
||||
if (mtu < fragheaderlen ||
|
||||
((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr))
|
||||
if (mtu <= fragheaderlen ||
|
||||
((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr))
|
||||
goto emsgsize;
|
||||
|
||||
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
|
||||
|
@ -1699,7 +1699,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
|
||||
|
||||
xfrm_probe_algs();
|
||||
|
||||
supp_skb = compose_sadb_supported(hdr, GFP_KERNEL);
|
||||
supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
|
||||
if (!supp_skb) {
|
||||
if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
|
||||
pfk->registered &= ~(1<<hdr->sadb_msg_satype);
|
||||
|
@ -1757,9 +1757,6 @@ resolve_normal_ct(struct nf_conn *tmpl,
|
||||
return 0;
|
||||
if (IS_ERR(h))
|
||||
return PTR_ERR(h);
|
||||
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
ct->local_origin = state->hook == NF_INET_LOCAL_OUT;
|
||||
}
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
|
@ -494,38 +494,6 @@ another_round:
|
||||
goto another_round;
|
||||
}
|
||||
|
||||
static bool tuple_force_port_remap(const struct nf_conntrack_tuple *tuple)
|
||||
{
|
||||
u16 sp, dp;
|
||||
|
||||
switch (tuple->dst.protonum) {
|
||||
case IPPROTO_TCP:
|
||||
sp = ntohs(tuple->src.u.tcp.port);
|
||||
dp = ntohs(tuple->dst.u.tcp.port);
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
case IPPROTO_UDPLITE:
|
||||
sp = ntohs(tuple->src.u.udp.port);
|
||||
dp = ntohs(tuple->dst.u.udp.port);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* IANA: System port range: 1-1023,
|
||||
* user port range: 1024-49151,
|
||||
* private port range: 49152-65535.
|
||||
*
|
||||
* Linux default ephemeral port range is 32768-60999.
|
||||
*
|
||||
* Enforce port remapping if sport is significantly lower
|
||||
* than dport to prevent NAT port shadowing, i.e.
|
||||
* accidental match of 'new' inbound connection vs.
|
||||
* existing outbound one.
|
||||
*/
|
||||
return sp < 16384 && dp >= 32768;
|
||||
}
|
||||
|
||||
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
|
||||
* we change the source to map into the range. For NF_INET_PRE_ROUTING
|
||||
* and NF_INET_LOCAL_OUT, we change the destination to map into the
|
||||
@ -539,17 +507,11 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conn *ct,
|
||||
enum nf_nat_manip_type maniptype)
|
||||
{
|
||||
bool random_port = range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL;
|
||||
const struct nf_conntrack_zone *zone;
|
||||
struct net *net = nf_ct_net(ct);
|
||||
|
||||
zone = nf_ct_zone(ct);
|
||||
|
||||
if (maniptype == NF_NAT_MANIP_SRC &&
|
||||
!random_port &&
|
||||
!ct->local_origin)
|
||||
random_port = tuple_force_port_remap(orig_tuple);
|
||||
|
||||
/* 1) If this srcip/proto/src-proto-part is currently mapped,
|
||||
* and that same mapping gives a unique tuple within the given
|
||||
* range, use that.
|
||||
@ -558,7 +520,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
* So far, we don't do local source mappings, so multiple
|
||||
* manips not an issue.
|
||||
*/
|
||||
if (maniptype == NF_NAT_MANIP_SRC && !random_port) {
|
||||
if (maniptype == NF_NAT_MANIP_SRC &&
|
||||
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
|
||||
/* try the original tuple first */
|
||||
if (in_range(orig_tuple, range)) {
|
||||
if (!nf_nat_used_tuple(orig_tuple, ct)) {
|
||||
@ -582,7 +545,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
|
||||
*/
|
||||
|
||||
/* Only bother mapping if it's not already in range and unique */
|
||||
if (!random_port) {
|
||||
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
|
||||
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
|
||||
if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
|
||||
l4proto_in_range(tuple, maniptype,
|
||||
|
@ -8287,6 +8287,12 @@ void nf_tables_trans_destroy_flush_work(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
|
||||
|
||||
static bool nft_expr_reduce(struct nft_regs_track *track,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
|
||||
{
|
||||
const struct nft_expr *expr, *last;
|
||||
@ -8334,8 +8340,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
|
||||
nft_rule_for_each_expr(expr, last, rule) {
|
||||
track.cur = expr;
|
||||
|
||||
if (expr->ops->reduce &&
|
||||
expr->ops->reduce(&track, expr)) {
|
||||
if (nft_expr_reduce(&track, expr)) {
|
||||
expr = track.cur;
|
||||
continue;
|
||||
}
|
||||
|
@ -2318,8 +2318,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
copy_skb = skb_get(skb);
|
||||
skb_head = skb->data;
|
||||
}
|
||||
if (copy_skb)
|
||||
if (copy_skb) {
|
||||
memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
|
||||
sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
|
||||
skb_set_owner_r(copy_skb, sk);
|
||||
}
|
||||
}
|
||||
snaplen = po->rx_ring.frame_size - macoff;
|
||||
if ((int)snaplen < 0) {
|
||||
@ -3464,6 +3467,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
sock_recv_ts_and_drops(msg, sk, skb);
|
||||
|
||||
if (msg->msg_name) {
|
||||
const size_t max_len = min(sizeof(skb->cb),
|
||||
sizeof(struct sockaddr_storage));
|
||||
int copy_len;
|
||||
|
||||
/* If the address length field is there to be filled
|
||||
@ -3486,6 +3491,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
msg->msg_namelen = sizeof(struct sockaddr_ll);
|
||||
}
|
||||
}
|
||||
if (WARN_ON_ONCE(copy_len > max_len)) {
|
||||
copy_len = max_len;
|
||||
msg->msg_namelen = copy_len;
|
||||
}
|
||||
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,8 @@ void vsock_remove_sock(struct vsock_sock *vsk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_remove_sock);
|
||||
|
||||
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
|
||||
void vsock_for_each_connected_socket(struct vsock_transport *transport,
|
||||
void (*fn)(struct sock *sk))
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -343,8 +344,12 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
|
||||
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
|
||||
struct vsock_sock *vsk;
|
||||
list_for_each_entry(vsk, &vsock_connected_table[i],
|
||||
connected_table)
|
||||
connected_table) {
|
||||
if (vsk->transport != transport)
|
||||
continue;
|
||||
|
||||
fn(sk_vsock(vsk));
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&vsock_table_lock);
|
||||
|
@ -24,6 +24,7 @@
|
||||
static struct workqueue_struct *virtio_vsock_workqueue;
|
||||
static struct virtio_vsock __rcu *the_virtio_vsock;
|
||||
static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
|
||||
static struct virtio_transport virtio_transport; /* forward declaration */
|
||||
|
||||
struct virtio_vsock {
|
||||
struct virtio_device *vdev;
|
||||
@ -384,7 +385,8 @@ static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
|
||||
switch (le32_to_cpu(event->id)) {
|
||||
case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
|
||||
virtio_vsock_update_guest_cid(vsock);
|
||||
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
|
||||
vsock_for_each_connected_socket(&virtio_transport.transport,
|
||||
virtio_vsock_reset_sock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -662,7 +664,8 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
|
||||
synchronize_rcu();
|
||||
|
||||
/* Reset all connected sockets when the device disappear */
|
||||
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
|
||||
vsock_for_each_connected_socket(&virtio_transport.transport,
|
||||
virtio_vsock_reset_sock);
|
||||
|
||||
/* Stop all work handlers to make sure no one is accessing the device,
|
||||
* so we can safely call virtio_reset_device().
|
||||
|
@ -75,6 +75,8 @@ static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
|
||||
|
||||
static int PROTOCOL_OVERRIDE = -1;
|
||||
|
||||
static struct vsock_transport vmci_transport; /* forward declaration */
|
||||
|
||||
/* Helper function to convert from a VMCI error code to a VSock error code. */
|
||||
|
||||
static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
|
||||
@ -882,7 +884,8 @@ static void vmci_transport_qp_resumed_cb(u32 sub_id,
|
||||
const struct vmci_event_data *e_data,
|
||||
void *client_data)
|
||||
{
|
||||
vsock_for_each_connected_socket(vmci_transport_handle_detach);
|
||||
vsock_for_each_connected_socket(&vmci_transport,
|
||||
vmci_transport_handle_detach);
|
||||
}
|
||||
|
||||
static void vmci_transport_recv_pkt_work(struct work_struct *work)
|
||||
|
@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags {
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
|
||||
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
|
||||
|
||||
/* SVE registers */
|
||||
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
|
||||
|
@ -204,7 +204,7 @@
|
||||
/* FREE! ( 7*32+10) */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
|
@ -106,7 +106,7 @@ static void nest_epollfd(void)
|
||||
printinfo("Nesting level(s): %d\n", nested);
|
||||
|
||||
epollfdp = calloc(nested, sizeof(int));
|
||||
if (!epollfd)
|
||||
if (!epollfdp)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
for (i = 0; i < nested; i++) {
|
||||
|
@ -1648,6 +1648,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
|
||||
{
|
||||
struct parse_events_term *term;
|
||||
struct list_head *list = NULL;
|
||||
struct list_head *orig_head = NULL;
|
||||
struct perf_pmu *pmu = NULL;
|
||||
int ok = 0;
|
||||
char *config;
|
||||
@ -1674,7 +1675,6 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
|
||||
}
|
||||
list_add_tail(&term->list, head);
|
||||
|
||||
|
||||
/* Add it for all PMUs that support the alias */
|
||||
list = malloc(sizeof(struct list_head));
|
||||
if (!list)
|
||||
@ -1687,13 +1687,15 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
|
||||
|
||||
list_for_each_entry(alias, &pmu->aliases, list) {
|
||||
if (!strcasecmp(alias->name, str)) {
|
||||
parse_events_copy_term_list(head, &orig_head);
|
||||
if (!parse_events_add_pmu(parse_state, list,
|
||||
pmu->name, head,
|
||||
pmu->name, orig_head,
|
||||
true, true)) {
|
||||
pr_debug("%s -> %s/%s/\n", str,
|
||||
pmu->name, alias->str);
|
||||
ok++;
|
||||
}
|
||||
parse_events_terms__delete(orig_head);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2193,7 +2195,7 @@ int perf_pmu__test_parse_init(void)
|
||||
for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
|
||||
tmp->type = symbols[i].type;
|
||||
tmp->symbol = strdup(symbols[i].symbol);
|
||||
if (!list->symbol)
|
||||
if (!tmp->symbol)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
@ -880,9 +880,8 @@ EOF
|
||||
return $ksft_skip
|
||||
fi
|
||||
|
||||
# test default behaviour. Packet from ns1 to ns0 is not redirected
|
||||
# due to automatic port translation.
|
||||
test_port_shadow "default" "ROUTER"
|
||||
# test default behaviour. Packet from ns1 to ns0 is redirected to ns2.
|
||||
test_port_shadow "default" "CLIENT"
|
||||
|
||||
# test packet filter based mitigation: prevent forwarding of
|
||||
# packets claiming to come from the service port.
|
||||
|
@ -1,6 +1,8 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Makefile for vm selftests
|
||||
|
||||
LOCAL_HDRS += $(selfdir)/vm/local_config.h $(top_srcdir)/mm/gup_test.h
|
||||
|
||||
include local_config.mk
|
||||
|
||||
uname_M := $(shell uname -m 2>/dev/null || echo not)
|
||||
@ -140,10 +142,6 @@ endif
|
||||
|
||||
$(OUTPUT)/mlock-random-test $(OUTPUT)/memfd_secret: LDLIBS += -lcap
|
||||
|
||||
$(OUTPUT)/gup_test: ../../../../mm/gup_test.h
|
||||
|
||||
$(OUTPUT)/hmm-tests: local_config.h
|
||||
|
||||
# HMM_EXTRA_LIBS may get set in local_config.mk, or it may be left empty.
|
||||
$(OUTPUT)/hmm-tests: LDLIBS += $(HMM_EXTRA_LIBS)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user