Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-12-02 11:44:56 -08:00
commit fc993be36f
221 changed files with 3008 additions and 1455 deletions

View File

@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
--------------------------------
ksmbd.mountd is userspace process to, transfer user account and password that
are registered using ksmbd.adduser(part of utils for user space). Further it
are registered using ksmbd.adduser (part of utils for user space). Further it
allows sharing information parameters that parsed from smb.conf to ksmbd in
kernel. For the execution part it has a daemon which is continuously running
and connected to the kernel interface using netlink socket, it waits for the
requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
dozen) that are most important for file server from NetShareEnum and
NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
and passed over to the associated kernel thread for the client.
@ -154,11 +154,11 @@ Each layer
1. Enable all component prints
# sudo ksmbd.control -d "all"
2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
# sudo ksmbd.control -d "smb"
3. Show what prints are enable.
# cat/sys/class/ksmbd-control/debug
3. Show what prints are enabled.
# cat /sys/class/ksmbd-control/debug
[smb] auth vfs oplock ipc conn [rdma]
4. Disable prints:

View File

@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
=================================
NETWORK FILESYSTEM HELPER LIBRARY
Network Filesystem Helper Library
=================================
.. Contents:
@ -37,22 +37,22 @@ into a common call framework.
The following services are provided:
* Handles transparent huge pages (THPs).
* Handle folios that span multiple pages.
* Insulates the netfs from VM interface changes.
* Insulate the netfs from VM interface changes.
* Allows the netfs to arbitrarily split reads up into pieces, even ones that
don't match page sizes or page alignments and that may cross pages.
* Allow the netfs to arbitrarily split reads up into pieces, even ones that
don't match folio sizes or folio alignments and that may cross folios.
* Allows the netfs to expand a readahead request in both directions to meet
its needs.
* Allow the netfs to expand a readahead request in both directions to meet its
needs.
* Allows the netfs to partially fulfil a read, which will then be resubmitted.
* Allow the netfs to partially fulfil a read, which will then be resubmitted.
* Handles local caching, allowing cached data and server-read data to be
* Handle local caching, allowing cached data and server-read data to be
interleaved for a single request.
* Handles clearing of bufferage that aren't on the server.
* Handle clearing of bufferage that aren't on the server.
* Handle retrying of reads that failed, switching reads from the cache to the
server as necessary.
@ -70,22 +70,22 @@ Read Helper Functions
Three read helpers are provided::
* void netfs_readahead(struct readahead_control *ractl,
const struct netfs_read_request_ops *ops,
void *netfs_priv);``
* int netfs_readpage(struct file *file,
struct page *page,
const struct netfs_read_request_ops *ops,
void *netfs_priv);
* int netfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos,
unsigned int len,
unsigned int flags,
struct page **_page,
void **_fsdata,
const struct netfs_read_request_ops *ops,
void *netfs_priv);
void netfs_readahead(struct readahead_control *ractl,
const struct netfs_read_request_ops *ops,
void *netfs_priv);
int netfs_readpage(struct file *file,
struct folio *folio,
const struct netfs_read_request_ops *ops,
void *netfs_priv);
int netfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos,
unsigned int len,
unsigned int flags,
struct folio **_folio,
void **_fsdata,
const struct netfs_read_request_ops *ops,
void *netfs_priv);
Each corresponds to a VM operation, with the addition of a couple of parameters
for the use of the read helpers:
@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
For ->readahead() and ->readpage(), the network filesystem should just jump
into the corresponding read helper; whereas for ->write_begin(), it may be a
little more complicated as the network filesystem might want to flush
conflicting writes or track dirty data and needs to put the acquired page if an
error occurs after calling the helper.
conflicting writes or track dirty data and needs to put the acquired folio if
an error occurs after calling the helper.
The helpers manage the read request, calling back into the network filesystem
through the suppplied table of operations. Waits will be performed as
@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
void (*issue_op)(struct netfs_read_subrequest *subreq);
bool (*is_still_valid)(struct netfs_read_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
struct page *page, void **_fsdata);
struct folio *folio, void **_fsdata);
void (*done)(struct netfs_read_request *rreq);
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
};
@ -313,13 +313,14 @@ The operations are as follows:
There is no return value; the netfs_subreq_terminated() function should be
called to indicate whether or not the operation succeeded and how much data
it transferred. The filesystem also should not deal with setting pages
it transferred. The filesystem also should not deal with setting folios
uptodate, unlocking them or dropping their refs - the helpers need to deal
with this as they have to coordinate with copying to the local cache.
Note that the helpers have the pages locked, but not pinned. It is possible
to use the ITER_XARRAY iov iterator to refer to the range of the inode that
is being operated upon without the need to allocate large bvec tables.
Note that the helpers have the folios locked, but not pinned. It is
possible to use the ITER_XARRAY iov iterator to refer to the range of the
inode that is being operated upon without the need to allocate large bvec
tables.
* ``is_still_valid()``
@ -330,15 +331,15 @@ The operations are as follows:
* ``check_write_begin()``
[Optional] This is called from the netfs_write_begin() helper once it has
allocated/grabbed the page to be modified to allow the filesystem to flush
allocated/grabbed the folio to be modified to allow the filesystem to flush
conflicting state before allowing it to be modified.
It should return 0 if everything is now fine, -EAGAIN if the page should be
It should return 0 if everything is now fine, -EAGAIN if the folio should be
regrabbed and any other error code to abort the operation.
* ``done``
[Optional] This is called after the pages in the request have all been
[Optional] This is called after the folios in the request have all been
unlocked (and marked uptodate if applicable).
* ``cleanup``
@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
end of the slice instead of reissuing.
* Once the data is read, the pages that have been fully read/cleared:
* Once the data is read, the folios that have been fully read/cleared:
* Will be marked uptodate.
@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
* Unlocked
* Any pages that need writing to the cache will then have DIO writes issued.
* Any folios that need writing to the cache will then have DIO writes issued.
* Synchronous operations will wait for reading to be complete.
* Writes to the cache will proceed asynchronously and the pages will have the
* Writes to the cache will proceed asynchronously and the folios will have the
PG_fscache mark removed when that completes.
* The request structures will be cleaned up when everything has completed.
@ -452,6 +453,9 @@ operation table looks like the following::
netfs_io_terminated_t term_func,
void *term_func_priv);
int (*prepare_write)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size);
int (*write)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
@ -509,6 +513,14 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
* ``prepare_write()``
[Required] Called to adjust a write to the cache and check that there is
sufficient space in the cache. The start and length values indicate the
size of the write that netfslib is proposing, and this can be adjusted by
the cache to respect DIO boundaries. The file size is passed for
information.
* ``write()``
[Required] Called to write to the cache. The start file offset is given
@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
cache.
API Function Reference
======================
.. kernel-doc:: include/linux/netfs.h
.. kernel-doc:: fs/netfs/read_helper.c

View File

@ -15994,6 +15994,7 @@ F: arch/mips/generic/board-ranchu.c
RANDOM NUMBER DRIVER
M: "Theodore Ts'o" <tytso@mit.edu>
M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: drivers/char/random.c
@ -16638,7 +16639,8 @@ F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
@ -16649,7 +16651,8 @@ F: net/iucv/
S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Gobble Gobble
# *DOCUMENTATION*

View File

@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.
config PAGE_SIZE_LESS_THAN_64KB
def_bool y
depends on !ARM64_64K_PAGES
depends on !IA64_PAGE_SIZE_64KB
depends on !PAGE_SIZE_64KB
depends on !PARISC_PAGE_SIZE_64KB
depends on !PPC_64K_PAGES
depends on !PPC_256K_PAGES
depends on !PAGE_SIZE_256KB
# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
# is not in legacy mode (compat task, unlimited stack size or

View File

@ -91,7 +91,7 @@
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS_SHIFT 16
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
@ -276,7 +276,7 @@
#define CPTR_EL2_TFP_SHIFT 10
/* Hyp Coprocessor Trap Register */
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TCPAC (1U << 31)
#define CPTR_EL2_TAM (1 << 30)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)

View File

@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
/*
* Check whether we want to repaint the state one way or
* another.
*/
early_exit_filter(vcpu, exit_code);
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);

View File

@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
/*
* Guest PSTATE gets saved at guest fixup time in all
* cases. We still need to handle the nVHE host side here.
*/
if (!has_vhe() && ctxt->__hyp_running_vcpu)
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);

View File

@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
vcpu->arch.target = -1;
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
return false;
}
return true;
}
/* Switch to the guest for legacy non-VHE systems */
@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
break;
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));

View File

@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers;
}
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
}
/* Switch to the guest for VHE systems running in EL2 */
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{

View File

@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
config PGTABLE_LEVELS
int
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
default 3 if 64BIT && !PAGE_SIZE_64KB
default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
default 2
config MIPS_AUTO_PFN_OFFSET

View File

@ -52,7 +52,7 @@ endif
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
targets := $(notdir $(vmlinuzobjs-y))

View File

@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
c->options |= MIPS_CPU_GSEXCEX;
@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
panic("Unknown Loongson Processor ID!");
break;
}
decode_configs(c);
}
#else
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }

View File

@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_puts(m, " tx39_cache");
if (cpu_has_octeon_cache)
seq_puts(m, " octeon_cache");
if (cpu_has_fpu)
if (raw_cpu_has_fpu)
seq_puts(m, " fpu");
if (cpu_has_32fpr)
seq_puts(m, " 32fpr");

View File

@ -202,11 +202,11 @@ vmap_stack_overflow:
mfspr r1, SPRN_SPRG_THREAD
lwz r1, TASK_CPU - THREAD(r1)
slwi r1, r1, 3
addis r1, r1, emergency_ctx@ha
addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
#else
lis r1, emergency_ctx@ha
lis r1, emergency_ctx-PAGE_OFFSET@ha
#endif
lwz r1, emergency_ctx@l(r1)
lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
prepare_transfer_to_handler

View File

@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
"r" (0) : "memory");
}
asm volatile("ptesync": : :"memory");
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
} else {
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
rb += PPC_BIT(51); /* increment set number */
}
asm volatile("ptesync": : :"memory");
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
if (cpu_has_feature(CPU_FTR_ARCH_300))
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
}
}

View File

@ -12,14 +12,12 @@
#include <linux/types.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <asm/csr.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_timer.h>
#ifdef CONFIG_64BIT
#define KVM_MAX_VCPUS (1U << 16)
#else
#define KVM_MAX_VCPUS (1U << 9)
#endif
#define KVM_MAX_VCPUS \
((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
#define KVM_HALT_POLL_NS_DEFAULT 500000

View File

@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
phys_addr_t size = slot->npages << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
stage2_unmap_range(kvm, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,

View File

@ -102,12 +102,6 @@ extern void switch_fpu_return(void);
*/
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
/*
* Tasks that are not using SVA have mm->pasid set to zero to note that they
* will not have the valid bit set in MSR_IA32_PASID while they are running.
*/
#define PASID_DISABLED 0
/* Trap handling */
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern void fpu_sync_fpstate(struct fpu *fpu);

View File

@ -742,7 +742,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
static char *prepare_command_line(void)
static char * __init prepare_command_line(void)
{
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE

View File

@ -81,7 +81,6 @@ struct kvm_ioapic {
unsigned long irq_states[IOAPIC_NUM_PINS];
struct kvm_io_device dev;
struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq);
spinlock_t lock;
struct rtc_status rtc_status;
struct delayed_work eoi_inject;

View File

@ -56,7 +56,6 @@ struct kvm_pic {
struct kvm_io_device dev_master;
struct kvm_io_device dev_slave;
struct kvm_io_device dev_elcr;
void (*ack_notifier)(void *opaque, int irq);
unsigned long irq_states[PIC_NUM_PINS];
};

View File

@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
if (apic->vcpu->arch.apicv_active)
if (kvm_x86_ops.sync_pir_to_irr)
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);

View File

@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
return flush;
}
@ -2173,10 +2173,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu->shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL &&
if (iterator->level >= PT64_ROOT_4LEVEL &&
vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
!vcpu->arch.mmu->direct_map)
--iterator->level;
iterator->level = PT32E_ROOT_LEVEL;
if (iterator->level == PT32E_ROOT_LEVEL) {
/*
@ -4855,7 +4855,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
struct kvm_mmu_role_regs regs = {
.cr0 = cr0,
.cr4 = cr4,
.cr4 = cr4 & ~X86_CR4_PKE,
.efer = efer,
};
union kvm_mmu_role new_role;
@ -4919,7 +4919,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->direct_map = false;
update_permission_bitmask(context, true);
update_pkru_bitmask(context);
context->pkru_mask = 0;
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
@ -5025,6 +5025,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
/*
* Invalidate all MMU roles to force them to reinitialize as CPUID
* information is factored into reserved bit calculations.
*
* Correctly handling multiple vCPU models with respect to paging and
* physical address properties) in a single VM would require tracking
* all relevant CPUID information in kvm_mmu_page_role. That is very
* undesirable as it would increase the memory requirements for
* gfn_track (see struct kvm_mmu_page_role comments). For now that
* problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
@ -5032,24 +5040,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
/*
* KVM does not correctly handle changing guest CPUID after KVM_RUN, as
* MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
* tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
* faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
* sweep the problem under the rug.
*
* KVM's horrific CPUID ABI makes the problem all but impossible to
* solve, as correctly handling multiple vCPU models (with respect to
* paging and physical address properties) in a single VM would require
* tracking all relevant CPUID information in kvm_mmu_page_role. That
* is very undesirable as it would double the memory requirements for
* gfn_track (see struct kvm_mmu_page_role comments), and in practice
* no sane VMM mucks with the core vCPU model on the fly.
* Changing guest CPUID after KVM_RUN is forbidden, see the comment in
* kvm_arch_vcpu_ioctl().
*/
if (vcpu->arch.last_vmentry_cpu != -1) {
pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
}
KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@ -5369,7 +5363,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@ -5854,8 +5848,6 @@ restart:
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{
bool flush = false;
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
/*
@ -5863,17 +5855,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
* logging at a 4k granularity and never creates collapsible
* 2m SPTEs during dirty logging.
*/
flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
if (flush)
if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
read_unlock(&kvm->mmu_lock);
}
}
@ -6182,23 +6171,46 @@ void kvm_mmu_module_exit(void)
mmu_audit_disable();
}
/*
* Calculate the effective recovery period, accounting for '0' meaning "let KVM
* select a halving time of 1 hour". Returns true if recovery is enabled.
*/
static bool calc_nx_huge_pages_recovery_period(uint *period)
{
/*
* Use READ_ONCE to get the params, this may be called outside of the
* param setters, e.g. by the kthread to compute its next timeout.
*/
bool enabled = READ_ONCE(nx_huge_pages);
uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
if (!enabled || !ratio)
return false;
*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
if (!*period) {
/* Make sure the period is not less than one second. */
ratio = min(ratio, 3600u);
*period = 60 * 60 * 1000 / ratio;
}
return true;
}
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
{
bool was_recovery_enabled, is_recovery_enabled;
uint old_period, new_period;
int err;
was_recovery_enabled = nx_huge_pages_recovery_ratio;
old_period = nx_huge_pages_recovery_period_ms;
was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
err = param_set_uint(val, kp);
if (err)
return err;
is_recovery_enabled = nx_huge_pages_recovery_ratio;
new_period = nx_huge_pages_recovery_period_ms;
is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
if (is_recovery_enabled &&
(!was_recovery_enabled || old_period > new_period)) {
struct kvm *kvm;
@ -6262,18 +6274,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
static long get_nx_lpage_recovery_timeout(u64 start_time)
{
uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
bool enabled;
uint period;
if (!period && ratio) {
/* Make sure the period is not less than one second. */
ratio = min(ratio, 3600u);
period = 60 * 60 * 1000 / ratio;
}
enabled = calc_nx_huge_pages_recovery_period(&period);
return READ_ONCE(nx_huge_pages) && ratio
? start_time + msecs_to_jiffies(period) - get_jiffies_64()
: MAX_SCHEDULE_TIMEOUT;
return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
: MAX_SCHEDULE_TIMEOUT;
}
static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)

View File

@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
int level = sp->role.level;
gfn_t base_gfn = sp->gfn;
u64 old_child_spte;
u64 *sptep;
gfn_t gfn;
int i;
trace_kvm_mmu_prepare_zap_page(sp);
@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
tdp_mmu_unlink_page(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
sptep = rcu_dereference(pt) + i;
gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
u64 *sptep = rcu_dereference(pt) + i;
gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
u64 old_child_spte;
if (shared) {
/*
@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
shared);
}
kvm_flush_remote_tlbs_with_address(kvm, gfn,
kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
KVM_PAGES_PER_HPAGE(level + 1));
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
{
struct kvm_mmu_page *root;
for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
flush |= zap_gfn_range(kvm, root, range->start, range->end,
range->may_block, flush, false);
for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
flush = zap_gfn_range(kvm, root, range->start, range->end,
range->may_block, flush, false);
return flush;
}
@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot.
*/
static bool zap_collapsible_spte_range(struct kvm *kvm,
static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
const struct kvm_memory_slot *slot,
bool flush)
const struct kvm_memory_slot *slot)
{
gfn_t start = slot->base_gfn;
gfn_t end = start + slot->npages;
@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
tdp_root_for_each_pte(iter, root, start, end) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
flush = false;
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
}
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
@ -1393,6 +1388,7 @@ retry:
pfn, PG_LEVEL_NUM))
continue;
/* Note, a successful atomic zap also does a remote TLB flush. */
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
/*
* The iter must explicitly re-read the SPTE because
@ -1401,30 +1397,24 @@ retry:
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
goto retry;
}
flush = true;
}
rcu_read_unlock();
return flush;
}
/*
* Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot.
*/
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot,
bool flush)
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{
struct kvm_mmu_page *root;
lockdep_assert_held_read(&kvm->mmu_lock);
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
return flush;
zap_collapsible_spte_range(kvm, root, slot);
}
/*

View File

@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot,
bool flush);
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,

View File

@ -989,16 +989,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
int cpu = get_cpu();
WARN_ON(cpu != vcpu->cpu);
svm->avic_is_running = is_run;
if (!kvm_vcpu_apicv_active(vcpu))
return;
if (is_run)
avic_vcpu_load(vcpu, vcpu->cpu);
else
avic_vcpu_put(vcpu);
if (kvm_vcpu_apicv_active(vcpu)) {
if (is_run)
avic_vcpu_load(vcpu, cpu);
else
avic_vcpu_put(vcpu);
}
put_cpu();
}
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)

View File

@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
return false;
}
static int sev_lock_for_migration(struct kvm *kvm)
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
int r = -EBUSY;
if (dst_kvm == src_kvm)
return -EINVAL;
/*
* Bail if this VM is already involved in a migration to avoid deadlock
* between two VMs trying to migrate to/from each other.
* Bail if these VMs are already involved in a migration to avoid
* deadlock between two VMs trying to migrate to/from each other.
*/
if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
return -EBUSY;
mutex_lock(&kvm->lock);
if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
goto release_dst;
r = -EINTR;
if (mutex_lock_killable(&dst_kvm->lock))
goto release_src;
if (mutex_lock_killable(&src_kvm->lock))
goto unlock_dst;
return 0;
unlock_dst:
mutex_unlock(&dst_kvm->lock);
release_src:
atomic_set_release(&src_sev->migration_in_progress, 0);
release_dst:
atomic_set_release(&dst_sev->migration_in_progress, 0);
return r;
}
static void sev_unlock_after_migration(struct kvm *kvm)
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
mutex_unlock(&kvm->lock);
atomic_set_release(&sev->migration_in_progress, 0);
mutex_unlock(&dst_kvm->lock);
mutex_unlock(&src_kvm->lock);
atomic_set_release(&dst_sev->migration_in_progress, 0);
atomic_set_release(&src_sev->migration_in_progress, 0);
}
@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
dst->asid = src->asid;
dst->handle = src->handle;
dst->pages_locked = src->pages_locked;
dst->enc_context_owner = src->enc_context_owner;
src->asid = 0;
src->active = false;
src->handle = 0;
src->pages_locked = 0;
src->enc_context_owner = NULL;
INIT_LIST_HEAD(&dst->regions_list);
list_replace_init(&src->regions_list, &dst->regions_list);
list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
}
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
bool charged = false;
int ret;
ret = sev_lock_for_migration(kvm);
if (ret)
return ret;
if (sev_guest(kvm)) {
ret = -EINVAL;
goto out_unlock;
}
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
}
source_kvm = source_kvm_file->private_data;
ret = sev_lock_for_migration(source_kvm);
ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto out_fput;
if (!sev_guest(source_kvm)) {
if (sev_guest(kvm) || !sev_guest(source_kvm)) {
ret = -EINVAL;
goto out_source;
goto out_unlock;
}
src_sev = &to_kvm_svm(source_kvm)->sev_info;
/*
* VMs mirroring src's encryption context rely on it to keep the
* ASID allocated, but below we are clearing src_sev->asid.
*/
if (src_sev->num_mirrored_vms) {
ret = -EBUSY;
goto out_unlock;
}
dst_sev->misc_cg = get_current_misc_cg();
cg_cleanup_sev = dst_sev;
if (dst_sev->misc_cg != src_sev->misc_cg) {
@ -1728,13 +1752,11 @@ out_dst_cgroup:
sev_misc_cg_uncharge(cg_cleanup_sev);
put_misc_cg(cg_cleanup_sev->misc_cg);
cg_cleanup_sev->misc_cg = NULL;
out_source:
sev_unlock_after_migration(source_kvm);
out_unlock:
sev_unlock_two_vms(kvm, source_kvm);
out_fput:
if (source_kvm_file)
fput(source_kvm_file);
out_unlock:
sev_unlock_after_migration(kvm);
return ret;
}
@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
{
struct file *source_kvm_file;
struct kvm *source_kvm;
struct kvm_sev_info source_sev, *mirror_sev;
struct kvm_sev_info *source_sev, *mirror_sev;
int ret;
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
goto e_source_put;
goto e_source_fput;
}
source_kvm = source_kvm_file->private_data;
mutex_lock(&source_kvm->lock);
ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto e_source_fput;
if (!sev_guest(source_kvm)) {
/*
* Mirrors of mirrors should work, but let's not get silly. Also
* disallow out-of-band SEV/SEV-ES init if the target is already an
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
*/
if (sev_guest(kvm) || !sev_guest(source_kvm) ||
is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
ret = -EINVAL;
goto e_source_unlock;
goto e_unlock;
}
/* Mirrors of mirrors should work, but let's not get silly */
if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
ret = -EINVAL;
goto e_source_unlock;
}
memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
sizeof(source_sev));
/*
* The mirror kvm holds an enc_context_owner ref so its asid can't
* disappear until we're done with it
*/
source_sev = &to_kvm_svm(source_kvm)->sev_info;
kvm_get_kvm(source_kvm);
fput(source_kvm_file);
mutex_unlock(&source_kvm->lock);
mutex_lock(&kvm->lock);
/*
* Disallow out-of-band SEV/SEV-ES init if the target is already an
* SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
* created after SEV/SEV-ES initialization, e.g. to init intercepts.
*/
if (sev_guest(kvm) || kvm->created_vcpus) {
ret = -EINVAL;
goto e_mirror_unlock;
}
source_sev->num_mirrored_vms++;
/* Set enc_context_owner and copy its encryption context over */
mirror_sev = &to_kvm_svm(kvm)->sev_info;
mirror_sev->enc_context_owner = source_kvm;
mirror_sev->active = true;
mirror_sev->asid = source_sev.asid;
mirror_sev->fd = source_sev.fd;
mirror_sev->es_active = source_sev.es_active;
mirror_sev->handle = source_sev.handle;
mirror_sev->asid = source_sev->asid;
mirror_sev->fd = source_sev->fd;
mirror_sev->es_active = source_sev->es_active;
mirror_sev->handle = source_sev->handle;
INIT_LIST_HEAD(&mirror_sev->regions_list);
ret = 0;
/*
* Do not copy ap_jump_table. Since the mirror does not share the same
* KVM contexts as the original, and they may have different
* memory-views.
*/
mutex_unlock(&kvm->lock);
return 0;
e_mirror_unlock:
mutex_unlock(&kvm->lock);
kvm_put_kvm(source_kvm);
return ret;
e_source_unlock:
mutex_unlock(&source_kvm->lock);
e_source_put:
e_unlock:
sev_unlock_two_vms(kvm, source_kvm);
e_source_fput:
if (source_kvm_file)
fput(source_kvm_file);
return ret;
@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
struct list_head *head = &sev->regions_list;
struct list_head *pos, *q;
WARN_ON(sev->num_mirrored_vms);
if (!sev_guest(kvm))
return;
/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
if (is_mirroring_enc_context(kvm)) {
kvm_put_kvm(sev->enc_context_owner);
struct kvm *owner_kvm = sev->enc_context_owner;
struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
mutex_lock(&owner_kvm->lock);
if (!WARN_ON(!owner_sev->num_mirrored_vms))
owner_sev->num_mirrored_vms--;
mutex_unlock(&owner_kvm->lock);
kvm_put_kvm(owner_kvm);
return;
}
mutex_lock(&kvm->lock);
/*
* Ensure that all guest tagged cache entries are flushed before
* releasing the pages back to the system for use. CLFLUSH will
@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
}
}
mutex_unlock(&kvm->lock);
sev_unbind_asid(kvm, sev->handle);
sev_asid_free(sev);
}

View File

@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_irr_update = svm_hwapic_irr_update,
.hwapic_isr_update = svm_hwapic_isr_update,
.sync_pir_to_irr = kvm_lapic_find_highest_irr,
.apicv_post_state_restore = avic_post_state_restore,
.set_tss_addr = svm_set_tss_addr,

View File

@ -79,6 +79,7 @@ struct kvm_sev_info {
struct list_head regions_list; /* List of registered regions */
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
struct kvm *enc_context_owner; /* Owner of copied encryption context */
unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
struct misc_cg *misc_cg; /* For misc cgroup accounting */
atomic_t migration_in_progress;
};

View File

@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
WARN_ON(!enable_vpid);
/*
* If VPID is enabled and used by vmc12, but L2 does not have a unique
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
* a VPID for L2, flush the current context as the effective ASID is
* common to both L1 and L2.
*
* Defer the flush so that it runs after vmcs02.EPTP has been set by
* KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
* redundant flushes further down the nested pipeline.
*
* If a TLB flush isn't required due to any of the above, and vpid12 is
* changing then the new "virtual" VPID (vpid12) will reuse the same
* "real" VPID (vpid02), and so needs to be flushed. There's no direct
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
* all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
* guest-physical mappings, so there is no need to sync the nEPT MMU.
* VPID is enabled and in use by vmcs12. If vpid12 is changing, then
* emulate a guest TLB flush as KVM does not track vpid12 history nor
* is the VPID incorporated into the MMU context. I.e. KVM must assume
* that the new vpid12 has never been used and thus represents a new
* guest ASID that cannot have entries in the TLB.
*/
if (!nested_has_guest_tlb_tag(vcpu)) {
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} else if (is_vmenter &&
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
vpid_sync_context(nested_get_vpid02(vcpu));
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return;
}
/*
* If VPID is enabled, used by vmc12, and vpid12 is not changing but
* does not have a unique TLB tag (ASID), i.e. EPT is disabled and
* KVM was unable to allocate a VPID for L2, flush the current context
* as the effective ASID is common to both L1 and L2.
*/
if (!nested_has_guest_tlb_tag(vcpu))
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
@ -3344,8 +3341,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
};
u32 failed_index;
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
kvm_service_local_tlb_flush_requests(vcpu);
evaluate_pending_interrupts = exec_controls_get(vmx) &
(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
@ -4502,9 +4498,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
(void)nested_get_evmcs_page(vcpu);
}
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
/* Service pending TLB flush requests for L2 before switching to L1. */
kvm_service_local_tlb_flush_requests(vcpu);
/*
* VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
@ -4857,6 +4852,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;
vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;
@ -5289,8 +5285,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
struct vmcs_hdr hdr;
if (ghc->gpa != vmptr &&
kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
/*
* Reads from an unbacked page return all 1s,
* which means that the 32 bits located at the

View File

@ -5,6 +5,7 @@
#include <asm/cpu.h>
#include "lapic.h"
#include "irq.h"
#include "posted_intr.h"
#include "trace.h"
#include "vmx.h"
@ -77,13 +78,18 @@ after_clear_sn:
pi_set_on(pi_desc);
}
static bool vmx_can_use_vtd_pi(struct kvm *kvm)
{
return irqchip_in_kernel(kvm) && enable_apicv &&
kvm_arch_has_assigned_device(kvm) &&
irq_remapping_cap(IRQ_POSTING_CAP);
}
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
!kvm_vcpu_apicv_active(vcpu))
if (!vmx_can_use_vtd_pi(vcpu->kvm))
return;
/* Set SN when the vCPU is preempted */
@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
!kvm_vcpu_apicv_active(vcpu))
if (!vmx_can_use_vtd_pi(vcpu->kvm))
return 0;
WARN_ON(irqs_disabled());
@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
struct vcpu_data vcpu_info;
int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
!kvm_vcpu_apicv_active(kvm->vcpus[0]))
if (!vmx_can_use_vtd_pi(kvm))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);

View File

@ -2918,6 +2918,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
}
}
static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
return nested_get_vpid02(vcpu);
return to_vmx(vcpu)->vpid;
}
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
@ -2930,31 +2937,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
if (enable_ept)
ept_sync_context(construct_eptp(vcpu, root_hpa,
mmu->shadow_root_level));
else if (!is_guest_mode(vcpu))
vpid_sync_context(to_vmx(vcpu)->vpid);
else
vpid_sync_context(nested_get_vpid02(vcpu));
vpid_sync_context(vmx_get_current_vpid(vcpu));
}
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
/*
* vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
* vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
* vmx_flush_tlb_guest() for an explanation of why this is ok.
*/
vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
}
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
/*
* vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
* or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
* are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
* vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
* vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
* required to flush GVA->{G,H}PA mappings from the TLB if vpid is
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
* i.e. no explicit INVVPID is necessary.
*/
vpid_sync_context(to_vmx(vcpu)->vpid);
vpid_sync_context(vmx_get_current_vpid(vcpu));
}
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
@ -6262,9 +6267,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
bool max_irr_updated;
bool got_posted_interrupt;
if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
return -EIO;
if (pi_test_on(&vmx->pi_desc)) {
@ -6274,22 +6279,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
max_irr_updated =
got_posted_interrupt =
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
/*
* If we are running L2 and L1 has a new pending interrupt
* which can be injected, this may cause a vmexit or it may
* be injected into L2. Either way, this interrupt will be
* processed via KVM_REQ_EVENT, not RVI, because we do not use
* virtual interrupt delivery to inject L1 interrupts into L2.
*/
if (is_guest_mode(vcpu) && max_irr_updated)
kvm_make_request(KVM_REQ_EVENT, vcpu);
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
got_posted_interrupt = false;
}
vmx_hwapic_irr_update(vcpu, max_irr);
/*
* Newly recognized interrupts are injected via either virtual interrupt
* delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
* disabled in two cases:
*
* 1) If L2 is running and the vCPU has a new pending interrupt. If L1
* wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
* VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
* into L2, but KVM doesn't use virtual interrupt delivery to inject
* interrupts into L2, and so KVM_REQ_EVENT is again needed.
*
* 2) If APICv is disabled for this vCPU, assigned devices may still
* attempt to post interrupts. The posted interrupt vector will cause
* a VM-Exit and the subsequent entry will call sync_pir_to_irr.
*/
if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
vmx_set_rvi(max_irr);
else if (got_posted_interrupt)
kvm_make_request(KVM_REQ_EVENT, vcpu);
return max_irr;
}
@ -7761,10 +7777,10 @@ static __init int hardware_setup(void)
ple_window_shrink = 0;
}
if (!cpu_has_vmx_apicv()) {
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
if (!enable_apicv)
vmx_x86_ops.sync_pir_to_irr = NULL;
}
if (cpu_has_vmx_tsc_scaling()) {
kvm_has_tsc_control = true;

View File

@ -3258,6 +3258,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
static_call(kvm_x86_tlb_flush_current)(vcpu);
}
/*
* Service "local" TLB flush requests, which are specific to the current MMU
* context. In addition to the generic event handling in vcpu_enter_guest(),
* TLB flushes that are targeted at an MMU context also need to be serviced
* prior before nested VM-Enter/VM-Exit.
*/
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
{
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
static void record_steal_time(struct kvm_vcpu *vcpu)
{
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
@ -4133,6 +4156,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SGX_ATTRIBUTE:
#endif
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
case KVM_CAP_SREGS2:
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES:
@ -4448,8 +4472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
return kvm_apic_get_state(vcpu, s);
}
@ -5124,6 +5147,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
/*
* KVM does not correctly handle changing guest CPUID after KVM_RUN, as
* MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
* tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
* faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
* the core vCPU model on the fly, so fail.
*/
r = -EINVAL;
if (vcpu->arch.last_vmentry_cpu != -1)
goto out;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@ -5134,6 +5168,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
/*
* KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
* KVM_SET_CPUID case above.
*/
r = -EINVAL;
if (vcpu->arch.last_vmentry_cpu != -1)
goto out;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@ -9528,8 +9570,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
@ -9648,10 +9689,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* Flushing all ASIDs flushes the current ASID... */
kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
kvm_service_local_tlb_flush_requests(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@ -9802,10 +9840,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/*
* This handles the case where a posted interrupt was
* notified with kvm_vcpu_kick.
* notified with kvm_vcpu_kick. Assigned devices can
* use the POSTED_INTR_VECTOR even if APICv is disabled,
* so do it even if APICv is disabled on this vCPU.
*/
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
if (kvm_lapic_enabled(vcpu))
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (kvm_vcpu_exit_request(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
@ -9849,8 +9889,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break;
if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
if (kvm_lapic_enabled(vcpu))
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;

View File

@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
static_call(kvm_x86_tlb_flush_current)(vcpu);
}
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);

View File

@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
if (iob->need_ts)
__blk_mq_end_request_acct(rq, now);
rq_qos_done(rq->q, rq);
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
if (!refcount_dec_and_test(&rq->ref))
continue;
blk_crypto_free_request(rq);
blk_pm_mark_last_busy(rq);
rq_qos_done(rq->q, rq);
if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
if (cur_hctx)

View File

@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
unsigned int num;
int num;
int qid = hctx->queue_num;
bool notify = false;
blk_status_t status;
@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,

View File

@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
#ifdef CONFIG_ZRAM_WRITEBACK
static const struct block_device_operations zram_wb_devops = {
.open = zram_open,
.submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
.owner = THIS_MODULE
};
#endif
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);

View File

@ -191,6 +191,8 @@ struct ipmi_user {
struct work_struct remove_work;
};
static struct workqueue_struct *remove_work_wq;
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
__acquires(user->release_barrier)
{
@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
/* SRCU cleanup must happen in task context. */
schedule_work(&user->remove_work);
queue_work(remove_work_wq, &user->remove_work);
}
static void _ipmi_destroy_user(struct ipmi_user *user)
@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
/* We didn't find a user, deliver an error response. */
ipmi_inc_stat(intf, unhandled_commands);
msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
msg->data[1] = msg->rsp[2];
msg->data[2] = msg->rsp[4] & ~0x3;
msg->data[0] = (netfn + 1) << 2;
msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
msg->data[1] = msg->rsp[1]; /* Addr */
msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
msg->data[3] = cmd;
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
@ -4455,13 +4459,24 @@ return_unspecified:
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 3;
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
/* commands must have at least 3 bytes, responses 4. */
if (is_cmd && (msg->rsp_size < 3)) {
/* commands must have at least 4 bytes, responses 5. */
if (is_cmd && (msg->rsp_size < 4)) {
ipmi_inc_stat(intf, invalid_commands);
goto out;
}
if (!is_cmd && (msg->rsp_size < 4))
goto return_unspecified;
if (!is_cmd && (msg->rsp_size < 5)) {
ipmi_inc_stat(intf, invalid_ipmb_responses);
/* Construct a valid error response. */
msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
msg->rsp[0] |= (1 << 2); /* Make it a response */
msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
msg->rsp[1] = msg->data[1]; /* Addr */
msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
msg->rsp[3] = msg->data[3]; /* Cmd */
msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 5;
}
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
if (rv) {
rv->done = free_smi_msg;
rv->user_data = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
return rv;
@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void)
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
if (!remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
goto out;
}
initialized = true;
out:
@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
destroy_workqueue(remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);

View File

@ -929,10 +929,8 @@ static int __init amd_iommu_v2_init(void)
{
int ret;
pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
if (!amd_iommu_v2_supported()) {
pr_info("AMD IOMMUv2 functionality not available on this system\n");
pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
/*
* Load anyway to provide the symbols to other modules
* which may use AMD IOMMUv2 optionally.
@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void)
amd_iommu_register_ppr_notifier(&ppr_nb);
pr_info("AMD IOMMUv2 loaded and initialized\n");
return 0;
out:

View File

@ -144,6 +144,7 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
{
struct dmar_drhd_unit *d;
struct intel_iommu *i;
int rc = 0;
rcu_read_lock();
if (list_empty(&dmar_drhd_units))
@ -169,11 +170,11 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
*/
if (intel_cap_smts_sanity() &&
!intel_cap_flts_sanity() && !intel_cap_slts_sanity())
return -EOPNOTSUPP;
rc = -EOPNOTSUPP;
out:
rcu_read_unlock();
return 0;
return rc;
}
int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)

View File

@ -1339,13 +1339,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
pte = &pte[pfn_level_offset(pfn, level)];
do {
unsigned long level_pfn;
unsigned long level_pfn = pfn & level_mask(level);
if (!dma_pte_present(pte))
goto next;
level_pfn = pfn & level_mask(level);
/* If range covers entire pagetable, free it */
if (start_pfn <= level_pfn &&
last_pfn >= level_pfn + level_size(level) - 1) {
@ -1366,7 +1364,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
freelist);
}
next:
pfn += level_size(level);
pfn = level_pfn + level_size(level);
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
if (first_pte)

View File

@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
#define DTE_HI_MASK2 GENMASK(7, 4)
#define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */
#define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */
#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36)
#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32)
#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
{

View File

@ -567,9 +567,7 @@ config XEN_NETDEV_BACKEND
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \
PPC_64K_PAGES)
depends on PAGE_SIZE_LESS_THAN_64KB
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the

View File

@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
static const struct spi_device_id b53_spi_ids[] = {
{ .name = "bcm5325" },
{ .name = "bcm5365" },
{ .name = "bcm5395" },
{ .name = "bcm5397" },
{ .name = "bcm5398" },
{ .name = "bcm53115" },
{ .name = "bcm53125" },
{ .name = "bcm53128" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, b53_spi_ids);
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
.probe = b53_spi_probe,
.remove = b53_spi_remove,
.shutdown = b53_spi_shutdown,
.id_table = b53_spi_ids,
};
module_spi_driver(b53_spi_driver);

View File

@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
u16 status, u16 lpa,
u16 ctrl, u16 status, u16 lpa,
struct phylink_link_state *state)
{
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
* still get here on link up. But we want to set an_complete
* only if AN was enabled, thus we look at BMCR_ANENABLE.
* (According to 802.3-2008 section 22.2.4.2.10, we should be
* able to get this same value from BMSR_ANEGCAPABLE, but tests
* show that these Marvell PHYs don't conform to this part of
* the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
*/
state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
dev_err(chip->dev, "invalid PHY speed\n");
return -EINVAL;
}
} else if (state->link &&
state->interface != PHY_INTERFACE_MODE_SGMII) {
/* If Speed and Duplex Resolved register is 0 and link is up, it
* means that AN was enabled, but link partner had it disabled
* and the PHY invoked the Auto-Negotiation Bypass feature and
* linked anyway.
*/
state->duplex = DUPLEX_FULL;
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
state->speed = SPEED_2500;
else
state->speed = SPEED_1000;
} else {
state->link = false;
}
@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
u16 lpa, status;
u16 lpa, status, ctrl;
int err;
err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
return err;
}
err = mv88e6352_serdes_read(chip, 0x11, &status);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@ -883,9 +912,16 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
u16 lpa, status;
u16 lpa, status, ctrl;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_BMCR, &ctrl);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
return err;
}
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_PHY_STATUS, &status);
if (err) {
@ -900,7 +936,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@ -1271,9 +1307,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
bool on)
{
u16 reg, pcs;
u16 reg;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_CTRL1, &reg);
if (err)
return err;
if (on)
reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
MV88E6393X_SERDES_CTRL1_RX_PDOWN);
else
reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
MV88E6393X_SERDES_CTRL1_RX_PDOWN;
return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_CTRL1, reg);
}
static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
{
u16 reg;
int err;
/* mv88e6393x family errata 4.6:
@ -1284,26 +1342,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
* It seems that after this workaround the SERDES is automatically
* powered up (the bit is cleared), so power it down.
*/
if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
lane == MV88E6393X_PORT10_LANE) {
err = mv88e6390_serdes_read(chip, lane,
MDIO_MMD_PHYXS,
MV88E6393X_SERDES_POC, &reg);
if (err)
return err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_POC, &reg);
if (err)
return err;
reg &= ~MV88E6393X_SERDES_POC_PDOWN;
reg |= MV88E6393X_SERDES_POC_RESET;
reg &= ~MV88E6393X_SERDES_POC_PDOWN;
reg |= MV88E6393X_SERDES_POC_RESET;
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_POC, reg);
if (err)
return err;
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_POC, reg);
if (err)
return err;
err = mv88e6390_serdes_power_sgmii(chip, lane, false);
if (err)
return err;
}
err = mv88e6390_serdes_power_sgmii(chip, lane, false);
if (err)
return err;
return mv88e6393x_serdes_power_lane(chip, lane, false);
}
int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
{
int err;
err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
if (err)
return err;
err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
if (err)
return err;
return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
}
static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
{
u16 reg, pcs;
int err;
/* mv88e6393x family errata 4.8:
* When a SERDES port is operating in 1000BASE-X or SGMII mode link may
@ -1334,38 +1411,149 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
MV88E6393X_ERRATA_4_8_REG, reg);
}
int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
u8 cmode)
{
static const struct {
u16 dev, reg, val, mask;
} fixes[] = {
{ MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
{ MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
{ MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
{ MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
{ MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
{ MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
{ MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
};
int err, i;
u16 reg;
/* mv88e6393x family errata 5.2:
* For optimal signal integrity the following sequence should be applied
* to SERDES operating in 10G mode. These registers only apply to 10G
* operation and have no effect on other speeds.
*/
if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
return 0;
for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
fixes[i].reg, &reg);
if (err)
return err;
reg &= ~fixes[i].mask;
reg |= fixes[i].val;
err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
fixes[i].reg, reg);
if (err)
return err;
}
return 0;
}
static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
int lane, u8 cmode, bool on)
{
u16 reg;
int err;
err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
return 0;
/* Inband AN is broken on Amethyst in 2500base-x mode when set by
* standard mechanism (via cmode).
* We can get around this by configuring the PCS mode to 1000base-x
* and then writing value 0x58 to register 1e.8000. (This must be done
* while SerDes receiver and transmitter are disabled, which is, when
* this function is called.)
* It seem that when we do this configuration to 2500base-x mode (by
* changing PCS mode to 1000base-x and frequency to 3.125 GHz from
* 1.25 GHz) and then configure to sgmii or 1000base-x, the device
* thinks that it already has SerDes at 1.25 GHz and does not change
* the 1e.8000 register, leaving SerDes at 3.125 GHz.
* To avoid this, change PCS mode back to 2500base-x when disabling
* SerDes from 2500base-x mode.
*/
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_POC, &reg);
if (err)
return err;
err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
if (on)
reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
MV88E6393X_SERDES_POC_AN;
else
reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
reg |= MV88E6393X_SERDES_POC_RESET;
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
MV88E6393X_SERDES_POC, reg);
if (err)
return err;
return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
if (err)
return err;
return 0;
}
int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool on)
{
u8 cmode = chip->ports[port].cmode;
int err;
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
if (on) {
err = mv88e6393x_serdes_erratum_4_8(chip, lane);
if (err)
return err;
err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
if (err)
return err;
err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
true);
if (err)
return err;
err = mv88e6393x_serdes_power_lane(chip, lane, true);
if (err)
return err;
}
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
return mv88e6390_serdes_power_sgmii(chip, lane, on);
err = mv88e6390_serdes_power_sgmii(chip, lane, on);
break;
case MV88E6393X_PORT_STS_CMODE_5GBASER:
case MV88E6393X_PORT_STS_CMODE_10GBASER:
return mv88e6390_serdes_power_10g(chip, lane, on);
err = mv88e6390_serdes_power_10g(chip, lane, on);
break;
}
return 0;
if (err)
return err;
if (!on) {
err = mv88e6393x_serdes_power_lane(chip, lane, false);
if (err)
return err;
err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
false);
}
return err;
}

View File

@ -93,6 +93,10 @@
#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007
#define MV88E6393X_SERDES_POC_RESET BIT(15)
#define MV88E6393X_SERDES_POC_PDOWN BIT(5)
#define MV88E6393X_SERDES_POC_AN BIT(3)
#define MV88E6393X_SERDES_CTRL1 0xf003
#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9)
#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8)
#define MV88E6393X_ERRATA_4_8_REG 0xF074
#define MV88E6393X_ERRATA_4_8_BIT BIT(14)

View File

@ -107,6 +107,7 @@
#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
#define RTL8365MB_NUM_PHYREGS 32
#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
@ -176,7 +177,7 @@
#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
#define RTL8365MB_PHY_BASE 0x2000
#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
u16 val;
int ret;
if (phy > RTL8365MB_PHYADDRMAX)
return -EINVAL;
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
u32 ocp_addr;
int ret;
if (phy > RTL8365MB_PHYADDRMAX)
return -EINVAL;
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;

View File

@ -40,10 +40,12 @@
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
#define AQ_DEVICE_ID_AQC113CS 0x94C0
#define AQ_DEVICE_ID_AQC113CA 0x34C0
#define AQ_DEVICE_ID_AQC114CS 0x93C0
#define AQ_DEVICE_ID_AQC113 0x04C0
#define AQ_DEVICE_ID_AQC113C 0x14C0
#define AQ_DEVICE_ID_AQC115C 0x12C0
#define AQ_DEVICE_ID_AQC116C 0x11C0
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
@ -53,20 +55,19 @@
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
#define AQ_NIC_RATE_5GSR BIT(2)
#define AQ_NIC_RATE_2G5 BIT(3)
#define AQ_NIC_RATE_1G BIT(4)
#define AQ_NIC_RATE_100M BIT(5)
#define AQ_NIC_RATE_10M BIT(6)
#define AQ_NIC_RATE_1G_HALF BIT(7)
#define AQ_NIC_RATE_100M_HALF BIT(8)
#define AQ_NIC_RATE_10M_HALF BIT(9)
#define AQ_NIC_RATE_2G5 BIT(2)
#define AQ_NIC_RATE_1G BIT(3)
#define AQ_NIC_RATE_100M BIT(4)
#define AQ_NIC_RATE_10M BIT(5)
#define AQ_NIC_RATE_1G_HALF BIT(6)
#define AQ_NIC_RATE_100M_HALF BIT(7)
#define AQ_NIC_RATE_10M_HALF BIT(8)
#define AQ_NIC_RATE_EEE_10G BIT(10)
#define AQ_NIC_RATE_EEE_5G BIT(11)
#define AQ_NIC_RATE_EEE_2G5 BIT(12)
#define AQ_NIC_RATE_EEE_1G BIT(13)
#define AQ_NIC_RATE_EEE_100M BIT(14)
#define AQ_NIC_RATE_EEE_10G BIT(9)
#define AQ_NIC_RATE_EEE_5G BIT(10)
#define AQ_NIC_RATE_EEE_2G5 BIT(11)
#define AQ_NIC_RATE_EEE_1G BIT(12)
#define AQ_NIC_RATE_EEE_100M BIT(13)
#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
AQ_NIC_RATE_EEE_5G |\
AQ_NIC_RATE_EEE_2G5 |\

View File

@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
};
struct aq_stats_s {
u64 brc;
u64 btc;
u64 uprc;
u64 mprc;
u64 bprc;

View File

@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
aq_macsec_init(self);
#endif
mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
// If DT has none or an invalid one, ask device for MAC address
mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
mutex_unlock(&self->fwreq_mutex);
eth_hw_addr_set(self->ndev, addr);
if (err)
goto err_exit;
if (!is_valid_ether_addr(self->ndev->dev_addr) ||
!aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
netdev_warn(self->ndev, "MAC is invalid, will use random.");
eth_hw_addr_random(self->ndev);
if (is_valid_ether_addr(addr) &&
aq_nic_is_valid_ether_addr(addr)) {
eth_hw_addr_set(self->ndev, addr);
} else {
netdev_warn(self->ndev, "MAC is invalid, will use random.");
eth_hw_addr_random(self->ndev);
}
}
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data[++i] = stats->mbtc;
data[++i] = stats->bbrc;
data[++i] = stats->bbtc;
data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
if (stats->brc)
data[++i] = stats->brc;
else
data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
if (stats->btc)
data[++i] = stats->btc;
else
data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
data[++i] = stats->dma_pkt_rc;
data[++i] = stats->dma_pkt_tc;
data[++i] = stats->dma_oct_rc;

View File

@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
{}
};
@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
{ AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);

View File

@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
{
unsigned int count;
WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
"Invalid tc %u (#rx=%u, #tx=%u)\n",
tc, self->rx_rings, self->tx_rings);
if (!aq_vec_is_valid_tc(self, tc))
return 0;

View File

@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct aq_stats_s *cs = &self->curr_stats;
struct aq_stats_s curr_stats = *cs;
struct hw_atl_utils_mbox mbox;
bool corrupted_stats = false;
hw_atl_utils_mpi_read_stats(self, &mbox);
#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
mbox.stats._N_ - self->last_stats._N_)
#define AQ_SDELTA(_N_) \
do { \
if (!corrupted_stats && \
((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
else \
corrupted_stats = true; \
} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(bbrc);
AQ_SDELTA(bbtc);
AQ_SDELTA(dpc);
if (!corrupted_stats)
*cs = curr_stats;
}
#undef AQ_SDELTA

View File

@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5G)
rate |= FW2X_RATE_5G;
if (speed & AQ_NIC_RATE_5GSR)
rate |= FW2X_RATE_5G;
if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;

View File

@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
AQ_NIC_RATE_5G |
AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_1G_HALF |
AQ_NIC_RATE_100M |
AQ_NIC_RATE_100M_HALF |
AQ_NIC_RATE_10M |
AQ_NIC_RATE_10M_HALF,
AQ_NIC_RATE_10M,
};
const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
DEFAULT_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M |
AQ_NIC_RATE_10M,
};
const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
DEFAULT_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M |
AQ_NIC_RATE_10M,
};
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)

View File

@ -9,6 +9,8 @@
#include "aq_common.h"
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
extern const struct aq_hw_ops hw_atl2_ops;
#endif /* HW_ATL2_H */

View File

@ -239,7 +239,8 @@ struct version_s {
u8 minor;
u16 build;
} phy;
u32 rsvd;
u32 drv_iface_ver:4;
u32 rsvd:28;
};
struct link_status_s {
@ -424,7 +425,7 @@ struct cable_diag_status_s {
u16 rsvd2;
};
struct statistics_s {
struct statistics_a0_s {
struct {
u32 link_up;
u32 link_down;
@ -457,6 +458,33 @@ struct statistics_s {
u32 reserve_fw_gap;
};
struct __packed statistics_b0_s {
u64 rx_good_octets;
u64 rx_pause_frames;
u64 rx_good_frames;
u64 rx_errors;
u64 rx_unicast_frames;
u64 rx_multicast_frames;
u64 rx_broadcast_frames;
u64 tx_good_octets;
u64 tx_pause_frames;
u64 tx_good_frames;
u64 tx_errors;
u64 tx_unicast_frames;
u64 tx_multicast_frames;
u64 tx_broadcast_frames;
u32 main_loop_cycles;
};
struct __packed statistics_s {
union __packed {
struct statistics_a0_s a0;
struct statistics_b0_s b0;
};
};
struct filter_caps_s {
u8 l2_filters_base_index:6;
u8 flexible_filter_mask:2;
@ -545,7 +573,7 @@ struct management_status_s {
u32 rsvd5;
};
struct fw_interface_out {
struct __packed fw_interface_out {
struct transaction_counter_s transaction_id;
struct version_s version;
struct link_status_s link_status;
@ -569,7 +597,6 @@ struct fw_interface_out {
struct core_dump_s core_dump;
u32 rsvd11;
struct statistics_s stats;
u32 rsvd12;
struct filter_caps_s filter_caps;
struct device_caps_s device_caps;
u32 rsvd13;
@ -592,6 +619,9 @@ struct fw_interface_out {
#define AQ_HOST_MODE_LOW_POWER 3U
#define AQ_HOST_MODE_SHUTDOWN 4U
#define AQ_A2_FW_INTERFACE_A0 0
#define AQ_A2_FW_INTERFACE_B0 1
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);

View File

@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
udelay(1);
mdelay(1);
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
{
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
link_options->rate_N5G = link_options->rate_5G;
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
rate |= AQ_NIC_RATE_10G;
if (lkp_link_caps->rate_5G)
rate |= AQ_NIC_RATE_5G;
if (lkp_link_caps->rate_N5G)
rate |= AQ_NIC_RATE_5GSR;
if (lkp_link_caps->rate_2P5G)
rate |= AQ_NIC_RATE_2G5;
if (lkp_link_caps->rate_1G)
@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
return 0;
}
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
struct statistics_s *stats)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct statistics_s stats;
struct aq_stats_s *cs = &self->curr_stats;
struct aq_stats_s curr_stats = *cs;
bool corrupted_stats = false;
hw_atl2_shared_buffer_read_safe(self, stats, &stats);
#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
stats.msm._F_ - priv->last_stats.msm._F_)
#define AQ_SDELTA(_N, _F) \
do { \
if (!corrupted_stats && \
((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
else \
corrupted_stats = true; \
} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
AQ_SDELTA(mbtc, tx_multicast_octets);
AQ_SDELTA(bbrc, rx_broadcast_octets);
AQ_SDELTA(bbtc, tx_broadcast_octets);
if (!corrupted_stats)
*cs = curr_stats;
}
#undef AQ_SDELTA
self->curr_stats.dma_pkt_rc =
hw_atl_stats_rx_dma_good_pkt_counter_get(self);
self->curr_stats.dma_pkt_tc =
hw_atl_stats_tx_dma_good_pkt_counter_get(self);
self->curr_stats.dma_oct_rc =
hw_atl_stats_rx_dma_good_octet_counter_get(self);
self->curr_stats.dma_oct_tc =
hw_atl_stats_tx_dma_good_octet_counter_get(self);
self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
}
static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
struct statistics_s *stats)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_stats_s *cs = &self->curr_stats;
struct aq_stats_s curr_stats = *cs;
bool corrupted_stats = false;
#define AQ_SDELTA(_N, _F) \
do { \
if (!corrupted_stats && \
((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
else \
corrupted_stats = true; \
} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
AQ_SDELTA(mprc, rx_multicast_frames);
AQ_SDELTA(bprc, rx_broadcast_frames);
AQ_SDELTA(erpr, rx_errors);
AQ_SDELTA(brc, rx_good_octets);
AQ_SDELTA(uptc, tx_unicast_frames);
AQ_SDELTA(mptc, tx_multicast_frames);
AQ_SDELTA(bptc, tx_broadcast_frames);
AQ_SDELTA(erpt, tx_errors);
AQ_SDELTA(btc, tx_good_octets);
if (!corrupted_stats)
*cs = curr_stats;
}
#undef AQ_SDELTA
}
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_stats_s *cs = &self->curr_stats;
struct statistics_s stats;
struct version_s version;
int err;
err = hw_atl2_shared_buffer_read_safe(self, version, &version);
if (err)
return err;
err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
if (err)
return err;
if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
aq_a2_fill_a0_stats(self, &stats);
else
aq_a2_fill_b0_stats(self, &stats);
cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
memcpy(&priv->last_stats, &stats, sizeof(stats));
@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
hw_atl2_shared_buffer_read_safe(self, version, &version);
/* A2 FW version is stored in reverse order */
return version.mac.major << 24 |
version.mac.minor << 16 |
version.mac.build;
return version.bundle.major << 24 |
version.bundle.minor << 16 |
version.bundle.build;
}
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,

View File

@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_portal_free(priv->mc_io);
destroy_workqueue(priv->dpaa2_ptp_wq);
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
free_netdev(net_dev);

View File

@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
old_buff_size = adapter->prev_rx_buf_sz;
new_buff_size = adapter->cur_rx_buf_sz;
/* Require buff size to be exactly same for now */
if (old_buff_size != new_buff_size)
return false;
if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
return true;
if (old_num_pools < adapter->min_rx_queues ||
old_num_pools > adapter->max_rx_queues ||
old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
old_pool_size > adapter->max_rx_add_entries_per_subcrq)
if (old_buff_size != new_buff_size ||
old_num_pools != new_num_pools ||
old_pool_size != new_pool_size)
return false;
return true;
@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
old_mtu = adapter->prev_mtu;
new_mtu = adapter->req_mtu;
/* Require MTU to be exactly same to reuse pools for now */
if (old_mtu != new_mtu)
return false;
if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
return true;
if (old_num_pools < adapter->min_tx_queues ||
old_num_pools > adapter->max_tx_queues ||
old_pool_size < adapter->min_tx_entries_per_subcrq ||
old_pool_size > adapter->max_tx_entries_per_subcrq)
if (old_mtu != new_mtu ||
old_num_pools != new_num_pools ||
old_pool_size != new_pool_size)
return false;
return true;

View File

@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
while (i--) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
rx_desc++;
xdp++;

View File

@ -7454,7 +7454,7 @@ static int mvpp2_probe(struct platform_device *pdev)
shared = num_present_cpus() - priv->nthreads;
if (shared > 0)
bitmap_fill(&priv->lock_map,
bitmap_set(&priv->lock_map, 0,
min_t(int, shared, MVPP2_MAX_THREADS));
for (i = 0; i < MVPP2_MAX_THREADS; i++) {

View File

@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
return err;
goto free_regions;
}
mw->mbox_wq = alloc_workqueue(name,

View File

@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);

View File

@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
bool carry_xdp_prog)
{
struct bpf_prog *xdp_prog;
int i, t;
int i, t, ret;
mlx4_en_copy_priv(tmp, priv, prof);
ret = mlx4_en_copy_priv(tmp, priv, prof);
if (ret) {
en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
__func__);
return ret;
}
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,

View File

@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_SF:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_MODIFY_RQT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_TIS:
case MLX5_CMD_OP_QUERY_TIS:
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:

View File

@ -13,6 +13,9 @@ struct mlx5e_rx_res {
unsigned int max_nch;
u32 drop_rqn;
struct mlx5e_packet_merge_param pkt_merge_param;
struct rw_semaphore pkt_merge_param_sem;
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (err)
goto out;
/* Separated from the channels RQs, does not share pkt_merge state with them */
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
inner_ft_support);
@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
res->pkt_merge_param = *init_pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
if (!builder)
return -ENOMEM;
down_write(&res->pkt_merge_param_sem);
res->pkt_merge_param = *pkt_merge_param;
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
}
}
up_write(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return final_err;
}
@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
{
return mlx5e_rss_get_hash(res->rss[0]);
}
int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
struct mlx5e_tir *tir)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
u32 rqtn;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
inner_ft_support);
mlx5e_tir_builder_build_direct(builder);
mlx5e_tir_builder_build_tls(builder);
down_read(&res->pkt_merge_param_sem);
mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
err = mlx5e_tir_init(tir, builder, res->mdev, false);
up_read(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return err;
}

View File

@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
/* RQTN getters for modules that create their own TIRs */
u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
/* Accel TIRs */
int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
struct mlx5e_tir *tir);
#endif /* __MLX5_EN_RX_RES_H__ */

View File

@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset =
(skb->csum_start + skb->head - skb->data) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
if (inner_ip_hdr(skb)->version == 6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:

View File

@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
{
struct mlx5e_tir_builder *builder;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
mlx5e_tir_builder_build_direct(builder);
mlx5e_tir_builder_build_tls(builder);
err = mlx5e_tir_init(tir, builder, mdev, false);
mlx5e_tir_builder_free(builder);
return err;
}
static void accel_rule_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int rxq, err;
u32 rqtn;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
if (err)
goto err_create_tir;

View File

@ -1086,6 +1086,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)

View File

@ -544,13 +544,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 klm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct page *page = shampo->last_page;
u64 addr = shampo->last_addr;
struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
int headroom;
int headroom, i;
headroom = rq->buff.headroom;
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@ -602,9 +602,7 @@ update_klm:
err_unmap:
while (--i >= 0) {
if (--index < 0)
index = shampo->hd_per_wq - 1;
dma_info = &shampo->info[index];
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, dma_info, true);

View File

@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
/* If vports min rate divider is 0 but their group has bw_share configured, then
* need to set bw_share for vports to minimal value.
*/
if (!group_level && !max_guarantee && group->bw_share)
if (!group_level && !max_guarantee && group && group->bw_share)
return 1;
return 0;
}
@ -419,7 +419,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
return err;
/* Recalculate bw share weights of old and new groups */
if (vport->qos.bw_share) {
if (vport->qos.bw_share || new_group->bw_share) {
esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
esw_qos_normalize_vports_min_rate(esw, new_group, extack);
}

View File

@ -329,14 +329,25 @@ static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
bool result = false;
int i;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
/* Indirect table is supported only for flows with in_port uplink
* and the destination is vport on the same eswitch as the uplink,
* return false in case at least one of destinations doesn't meet
* this criteria.
*/
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
return true;
return false;
esw_attr->dests[i].mdev)) {
result = true;
} else {
result = false;
break;
}
}
return result;
}
static int
@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_eswitch *esw = master->priv.eswitch;
struct mlx5_flow_table_attr ft_attr = {
.max_fte = 1, .prio = 0, .level = 0,
.flags = MLX5_FLOW_TABLE_OTHER_VPORT,
};
struct mlx5_flow_namespace *egress_ns;
struct mlx5_flow_table *acl;

View File

@ -840,6 +840,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@ -907,8 +910,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
if (mlx5_core_is_pf(dev))
queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;

View File

@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
mlx5_lag_destroy_definers(ldev);
memset(port_sel, 0, sizeof(*port_sel));
}

View File

@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
static void tout_set_def_val(struct mlx5_core_dev *dev)
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
{
int i;
for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
}
@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
if (!dev->timeouts)
return -ENOMEM;
tout_set_def_val(dev);
return 0;
}

View File

@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)

View File

@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
err = mlx5_tout_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
return err;
}
mlx5_tout_set_def_val(dev);
/* wait for firmware to accept initialization segments configurations
*/
@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
goto err_tout_cleanup;
return err;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
goto err_tout_cleanup;
return err;
}
mlx5_tout_query_iseg(dev);
@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_set_driver_version(dev);
mlx5_start_health_poll(dev);
err = mlx5_query_hca_caps(dev);
if (err) {
mlx5_core_err(dev, "query hca failed\n");
goto stop_health;
goto reclaim_boot_pages;
}
mlx5_start_health_poll(dev);
return 0;
stop_health:
mlx5_stop_health_poll(dev, boot);
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
@ -1094,8 +1088,6 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
err_tout_cleanup:
mlx5_tout_cleanup(dev);
return err;
}
@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
mlx5_tout_cleanup(dev);
return 0;
}
@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
goto err_timeout_init;
}
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@ -1501,6 +1498,8 @@ err_adev_init:
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
debugfs_remove(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);

View File

@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
static int __init sonic_probe1(struct net_device *dev)
static int sonic_probe1(struct net_device *dev)
{
unsigned int silicon_revision;
struct sonic_local *lp = netdev_priv(dev);

View File

@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to alloc mbx args %d\n", err);
return err;
}
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */

View File

@ -5532,8 +5532,6 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
bool sph_en;
u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@ -5545,10 +5543,13 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
if (priv->sph_cap) {
bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
u32 chan;
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
return 0;
}

View File

@ -2398,7 +2398,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (dev->domain_data.phyirq > 0)
phydev->irq = dev->domain_data.phyirq;
else
phydev->irq = 0;
phydev->irq = PHY_POLL;
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
/* set to AUTOMDIX */

View File

@ -3425,7 +3425,6 @@ static struct virtio_driver virtio_net_driver = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,

View File

@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
/* strip the ethernet header added for pass through VRF device */
__skb_pull(skb, skb_network_offset(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
ret = vrf_ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
RT_SCOPE_LINK);
}
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
vrf_dev->stats.tx_errors++;

View File

@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
return exact;
}
static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
{
node->parent_bit_packed = (unsigned long)parent | bit;
rcu_assign_pointer(*parent, node);

View File

@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
struct wg_peer *peer;
struct sk_buff *skb;
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
}
mutex_unlock(&wg->device_update_lock);
skb_queue_purge(&wg->incoming_handshakes);
while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
kfree_skb(skb);
atomic_set(&wg->handshake_queue_len, 0);
wg_socket_reinit(wg, NULL, NULL);
return 0;
}
@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
wg_packet_queue_free(&wg->decrypt_queue);
wg_packet_queue_free(&wg->encrypt_queue);
wg_packet_queue_free(&wg->handshake_queue, true);
wg_packet_queue_free(&wg->decrypt_queue, false);
wg_packet_queue_free(&wg->encrypt_queue, false);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
free_percpu(wg->incoming_handshakes_worker);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
skb_queue_head_init(&wg->incoming_handshakes);
wg_allowedips_init(&wg->peer_allowedips);
wg_cookie_checker_init(&wg->cookie_checker, wg);
INIT_LIST_HEAD(&wg->peer_list);
@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!dev->tstats)
goto err_free_index_hashtable;
wg->incoming_handshakes_worker =
wg_packet_percpu_multicore_worker_alloc(
wg_packet_handshake_receive_worker, wg);
if (!wg->incoming_handshakes_worker)
goto err_free_tstats;
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
goto err_free_incoming_handshakes;
goto err_free_tstats;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (ret < 0)
goto err_free_encrypt_queue;
ret = wg_ratelimiter_init();
ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
MAX_QUEUED_INCOMING_HANDSHAKES);
if (ret < 0)
goto err_free_decrypt_queue;
ret = wg_ratelimiter_init();
if (ret < 0)
goto err_free_handshake_queue;
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
err_free_handshake_queue:
wg_packet_queue_free(&wg->handshake_queue, false);
err_free_decrypt_queue:
wg_packet_queue_free(&wg->decrypt_queue);
wg_packet_queue_free(&wg->decrypt_queue, false);
err_free_encrypt_queue:
wg_packet_queue_free(&wg->encrypt_queue);
wg_packet_queue_free(&wg->encrypt_queue, false);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
err_free_incoming_handshakes:
free_percpu(wg->incoming_handshakes_worker);
err_free_tstats:
free_percpu(dev->tstats);
err_free_index_hashtable:
@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
static void wg_netns_pre_exit(struct net *net)
{
struct wg_device *wg;
struct wg_peer *peer;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
mutex_lock(&wg->device_update_lock);
rcu_assign_pointer(wg->creating_net, NULL);
wg_socket_reinit(wg, NULL, NULL);
list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_socket_clear_peer_endpoint_src(peer);
mutex_unlock(&wg->device_update_lock);
}
}

View File

@ -39,21 +39,18 @@ struct prev_queue {
struct wg_device {
struct net_device *dev;
struct crypt_queue encrypt_queue, decrypt_queue;
struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
struct sock __rcu *sock4, *sock6;
struct net __rcu *creating_net;
struct noise_static_identity static_identity;
struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
struct workqueue_struct *packet_crypt_wq;
struct sk_buff_head incoming_handshakes;
int incoming_handshake_cpu;
struct multicore_worker __percpu *incoming_handshakes_worker;
struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
struct cookie_checker cookie_checker;
struct pubkey_hashtable *peer_hashtable;
struct index_hashtable *index_hashtable;
struct allowedips peer_allowedips;
struct mutex device_update_lock, socket_update_lock;
struct list_head device_list, peer_list;
atomic_t handshake_queue_len;
unsigned int num_peers, device_update_gen;
u32 fwmark;
u16 incoming_port;

View File

@ -17,7 +17,7 @@
#include <linux/genetlink.h>
#include <net/rtnetlink.h>
static int __init mod_init(void)
static int __init wg_mod_init(void)
{
int ret;
@ -60,7 +60,7 @@ err_allowedips:
return ret;
}
static void __exit mod_exit(void)
static void __exit wg_mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
@ -68,8 +68,8 @@ static void __exit mod_exit(void)
wg_allowedips_slab_uninit();
}
module_init(mod_init);
module_exit(mod_exit);
module_init(wg_mod_init);
module_exit(wg_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("WireGuard secure network tunnel");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");

View File

@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
return 0;
}
void wg_packet_queue_free(struct crypt_queue *queue)
void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
WARN_ON(!__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, NULL);
WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)

View File

@ -23,7 +23,7 @@ struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
unsigned int len);
void wg_packet_queue_free(struct crypt_queue *queue);
void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);

View File

@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
(1U << 14) / sizeof(struct hlist_head)));
max_entries = table_size * 8;
table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
if (unlikely(!table_v4))
goto err_kmemcache;
#if IS_ENABLED(CONFIG_IPV6)
table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
if (unlikely(!table_v6)) {
kvfree(table_v4);
goto err_kmemcache;

View File

@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
return;
}
under_load = skb_queue_len(&wg->incoming_handshakes) >=
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
under_load = atomic_read(&wg->handshake_queue_len) >=
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
} else if (last_under_load) {
@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
void wg_packet_handshake_receive_worker(struct work_struct *work)
{
struct wg_device *wg = container_of(work, struct multicore_worker,
work)->ptr;
struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
struct sk_buff *skb;
while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
wg_receive_handshake_packet(wg, skb);
dev_kfree_skb(skb);
atomic_dec(&wg->handshake_queue_len);
cond_resched();
}
}
@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
int cpu;
int cpu, ret = -EBUSY;
if (skb_queue_len(&wg->incoming_handshakes) >
MAX_QUEUED_INCOMING_HANDSHAKES ||
unlikely(!rng_is_initialized())) {
if (unlikely(!rng_is_initialized()))
goto drop;
if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
}
} else
ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
if (ret) {
drop:
net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
wg->dev->name, skb);
goto err;
}
skb_queue_tail(&wg->incoming_handshakes, skb);
/* Queues up a call to packet_process_queued_handshake_
* packets(skb):
*/
cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
atomic_inc(&wg->handshake_queue_len);
cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
/* Queues up a call to packet_process_queued_handshake_packets(skb): */
queue_work_on(cpu, wg->handshake_receive_wq,
&per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
&per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
break;
}
case cpu_to_le32(MESSAGE_DATA):

View File

@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
dst_cache_reset(&peer->endpoint_cache);
dst_cache_reset_now(&peer->endpoint_cache);
write_unlock_bh(&peer->endpoint_lock);
}

View File

@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
if (len < tlv_len) {
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
len, tlv_len);
kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-EINVAL);
goto out;
}
@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
IWL_DEBUG_FW(trans,
"Couldn't allocate (more) reduce_power_data\n");
kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOMEM);
goto out;
}
@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
done:
if (!size) {
IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
/* Better safe than sorry, but 'reduce_power_data' should
* always be NULL if !size.
*/
kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOENT);
goto out;
}

View File

@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
for (retry = 0; retry <= max_retry; retry++) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
drv->dbgfs_drv);
dbgfs_dir = drv->dbgfs_op_mode;
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
drv->dbgfs_drv);
dbgfs_dir = drv->dbgfs_op_mode;
#endif
op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
op_mode = ops->start(drv->trans, drv->trans->cfg,
&drv->fw, dbgfs_dir);
if (op_mode)
return op_mode;
IWL_ERR(drv, "retry init count %d\n", retry);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (!op_mode) {
debugfs_remove_recursive(drv->dbgfs_op_mode);
drv->dbgfs_op_mode = NULL;
}
#endif
}
return op_mode;
return NULL;
}
static void _iwl_op_mode_stop(struct iwl_drv *drv)

View File

@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
/* max retry for init flow */
#define IWL_MAX_INIT_RETRY 2
#endif /* __iwl_drv_h__ */

View File

@ -16,6 +16,7 @@
#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
#include "iwl-drv.h"
#include "iwl-op-mode.h"
#include "iwl-io.h"
#include "mvm.h"
@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
ret = __iwl_mvm_mac_start(mvm);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
*/
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
for (retry = 0; retry <= max_retry; retry++) {
ret = __iwl_mvm_mac_start(mvm);
if (!ret)
break;
IWL_ERR(mvm, "mac start retry %d\n", retry);
}
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
return ret;

View File

@ -1123,6 +1123,8 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_STARTING: starting mac,
* used to disable restart flow while in STARTING state
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@ -1134,6 +1136,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_STARTING,
};
/* Keep track of completed init configuration */

Some files were not shown because too many files have changed in this diff Show More