Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Both conflict were simple overlapping changes. In the kaweth case, Eric Dumazet's skb_cow() bug fix overlapped the conversion of the driver in net-next to use in-netdev stats. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
fb796707d7
@ -44,13 +44,19 @@ Hip05 Example (note that Hip06 is the same except compatible):
|
||||
};
|
||||
|
||||
HiSilicon Hip06/Hip07 PCIe host bridge DT (almost-ECAM) description.
|
||||
|
||||
Some BIOSes place the host controller in a mode where it is ECAM
|
||||
compliant for all devices other than the root complex. In such cases,
|
||||
the host controller should be described as below.
|
||||
|
||||
The properties and their meanings are identical to those described in
|
||||
host-generic-pci.txt except as listed below.
|
||||
|
||||
Properties of the host controller node that differ from
|
||||
host-generic-pci.txt:
|
||||
|
||||
- compatible : Must be "hisilicon,pcie-almost-ecam"
|
||||
- compatible : Must be "hisilicon,hip06-pcie-ecam", or
|
||||
"hisilicon,hip07-pcie-ecam"
|
||||
|
||||
- reg : Two entries: First the ECAM configuration space for any
|
||||
other bus underneath the root bus. Second, the base
|
||||
@ -59,7 +65,7 @@ host-generic-pci.txt:
|
||||
|
||||
Example:
|
||||
pcie0: pcie@a0090000 {
|
||||
compatible = "hisilicon,pcie-almost-ecam";
|
||||
compatible = "hisilicon,hip06-pcie-ecam";
|
||||
reg = <0 0xb0000000 0 0x2000000>, /* ECAM configuration space */
|
||||
<0 0xa0090000 0 0x10000>; /* host bridge registers */
|
||||
bus-range = <0 31>;
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -2592,12 +2592,26 @@ F: include/uapi/linux/if_bonding.h
|
||||
|
||||
BPF (Safe dynamic programs and tools)
|
||||
M: Alexei Starovoitov <ast@kernel.org>
|
||||
M: Daniel Borkmann <daniel@iogearbox.net>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/net/bpf_jit*
|
||||
F: Documentation/networking/filter.txt
|
||||
F: include/linux/bpf*
|
||||
F: include/linux/filter.h
|
||||
F: include/uapi/linux/bpf*
|
||||
F: include/uapi/linux/filter.h
|
||||
F: kernel/bpf/
|
||||
F: tools/testing/selftests/bpf/
|
||||
F: kernel/trace/bpf_trace.c
|
||||
F: lib/test_bpf.c
|
||||
F: net/bpf/
|
||||
F: net/core/filter.c
|
||||
F: net/sched/act_bpf.c
|
||||
F: net/sched/cls_bpf.c
|
||||
F: samples/bpf/
|
||||
F: tools/net/bpf*
|
||||
F: tools/testing/selftests/bpf/
|
||||
|
||||
BROADCOM B44 10/100 ETHERNET DRIVER
|
||||
M: Michael Chan <michael.chan@broadcom.com>
|
||||
@ -8777,6 +8791,7 @@ W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||
B: mailto:netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: net/
|
||||
F: include/net/
|
||||
@ -12491,7 +12506,6 @@ F: drivers/clk/ti/
|
||||
F: include/linux/clk/ti.h
|
||||
|
||||
TI ETHERNET SWITCH DRIVER (CPSW)
|
||||
M: Mugunthan V N <mugunthanvnm@ti.com>
|
||||
R: Grygorii Strashko <grygorii.strashko@ti.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -236,9 +236,9 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||
mtctr reg; \
|
||||
bctr
|
||||
|
||||
#define BRANCH_LINK_TO_FAR(reg, label) \
|
||||
__LOAD_FAR_HANDLER(reg, label); \
|
||||
mtctr reg; \
|
||||
#define BRANCH_LINK_TO_FAR(label) \
|
||||
__LOAD_FAR_HANDLER(r12, label); \
|
||||
mtctr r12; \
|
||||
bctrl
|
||||
|
||||
/*
|
||||
@ -265,7 +265,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||
#define BRANCH_TO_COMMON(reg, label) \
|
||||
b label
|
||||
|
||||
#define BRANCH_LINK_TO_FAR(reg, label) \
|
||||
#define BRANCH_LINK_TO_FAR(label) \
|
||||
bl label
|
||||
|
||||
#define BRANCH_TO_KVM(reg, label) \
|
||||
|
@ -689,7 +689,7 @@ resume_kernel:
|
||||
|
||||
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
|
||||
|
||||
lwz r3,GPR1(r1)
|
||||
ld r3,GPR1(r1)
|
||||
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
|
||||
mr r4,r1 /* src: current exception frame */
|
||||
mr r1,r3 /* Reroute the trampoline frame to r1 */
|
||||
@ -703,8 +703,8 @@ resume_kernel:
|
||||
addi r6,r6,8
|
||||
bdnz 2b
|
||||
|
||||
/* Do real store operation to complete stwu */
|
||||
lwz r5,GPR1(r1)
|
||||
/* Do real store operation to complete stdu */
|
||||
ld r5,GPR1(r1)
|
||||
std r8,0(r5)
|
||||
|
||||
/* Clear _TIF_EMULATE_STACK_STORE flag */
|
||||
|
@ -982,7 +982,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
|
||||
EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
|
||||
EXCEPTION_PROLOG_COMMON_3(0xe60)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
BRANCH_LINK_TO_FAR(r4, hmi_exception_realmode)
|
||||
BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
|
||||
/* Windup the stack. */
|
||||
/* Move original HSRR0 and HSRR1 into the respective regs */
|
||||
ld r9,_MSR(r1)
|
||||
|
@ -1051,6 +1051,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
if (!MACHINE_HAS_NX)
|
||||
pte_val(entry) &= ~_PAGE_NOEXEC;
|
||||
if (pte_present(entry))
|
||||
pte_val(entry) &= ~_PAGE_UNUSED;
|
||||
if (mm_has_pgste(mm))
|
||||
ptep_set_pte_at(mm, addr, ptep, entry);
|
||||
else
|
||||
|
@ -2928,8 +2928,17 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
|
||||
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
|
||||
if (!blk_qc_t_is_internal(cookie))
|
||||
rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
|
||||
else
|
||||
else {
|
||||
rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
|
||||
/*
|
||||
* With scheduling, if the request has completed, we'll
|
||||
* get a NULL return here, as we clear the sched tag when
|
||||
* that happens. The request still remains valid, like always,
|
||||
* so we should be safe with just the NULL check.
|
||||
*/
|
||||
if (!rq)
|
||||
return false;
|
||||
}
|
||||
|
||||
return __blk_mq_poll(hctx, rq);
|
||||
}
|
||||
|
@ -1098,12 +1098,20 @@ int elevator_change(struct request_queue *q, const char *name)
|
||||
}
|
||||
EXPORT_SYMBOL(elevator_change);
|
||||
|
||||
static inline bool elv_support_iosched(struct request_queue *q)
|
||||
{
|
||||
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
|
||||
BLK_MQ_F_NO_SCHED))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
ssize_t elv_iosched_store(struct request_queue *q, const char *name,
|
||||
size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!(q->mq_ops || q->request_fn))
|
||||
if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
|
||||
return count;
|
||||
|
||||
ret = __elevator_change(q, name);
|
||||
@ -1135,7 +1143,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||
len += sprintf(name+len, "[%s] ", elv->elevator_name);
|
||||
continue;
|
||||
}
|
||||
if (__e->uses_mq && q->mq_ops)
|
||||
if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
|
||||
len += sprintf(name+len, "%s ", __e->elevator_name);
|
||||
else if (!__e->uses_mq && !q->mq_ops)
|
||||
len += sprintf(name+len, "%s ", __e->elevator_name);
|
||||
|
@ -200,6 +200,7 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
|
||||
return -EINVAL;
|
||||
|
||||
/* The state of the list is 'on' IFF all resources are 'on'. */
|
||||
cur_state = 0;
|
||||
list_for_each_entry(entry, list, node) {
|
||||
struct acpi_power_resource *resource = entry->resource;
|
||||
acpi_handle handle = resource->device.handle;
|
||||
|
@ -3969,7 +3969,7 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||
dd->tags.reserved_tags = 1;
|
||||
dd->tags.cmd_size = sizeof(struct mtip_cmd);
|
||||
dd->tags.numa_node = dd->numa_node;
|
||||
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
|
||||
dd->tags.driver_data = dd;
|
||||
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
|
||||
|
||||
|
@ -429,6 +429,13 @@ static const struct clk_div_table pll_divp_table[] = {
|
||||
{ 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 }
|
||||
};
|
||||
|
||||
static const struct clk_div_table pll_divq_table[] = {
|
||||
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
|
||||
{ 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 },
|
||||
{ 14, 14 }, { 15, 15 },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
static const struct clk_div_table pll_divr_table[] = {
|
||||
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
|
||||
};
|
||||
@ -496,9 +503,9 @@ struct stm32f4_div_data {
|
||||
|
||||
#define MAX_PLL_DIV 3
|
||||
static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = {
|
||||
{ 16, 2, 0, pll_divp_table },
|
||||
{ 24, 4, CLK_DIVIDER_ONE_BASED, NULL },
|
||||
{ 28, 3, 0, pll_divr_table },
|
||||
{ 16, 2, 0, pll_divp_table },
|
||||
{ 24, 4, 0, pll_divq_table },
|
||||
{ 28, 3, 0, pll_divr_table },
|
||||
};
|
||||
|
||||
struct stm32f4_pll_data {
|
||||
|
@ -1,6 +1,7 @@
|
||||
config SUNXI_CCU
|
||||
bool "Clock support for Allwinner SoCs"
|
||||
depends on ARCH_SUNXI || COMPILE_TEST
|
||||
select RESET_CONTROLLER
|
||||
default ARCH_SUNXI
|
||||
|
||||
if SUNXI_CCU
|
||||
@ -135,6 +136,7 @@ config SUN8I_V3S_CCU
|
||||
config SUN9I_A80_CCU
|
||||
bool "Support for the Allwinner A80 CCU"
|
||||
select SUNXI_CCU_DIV
|
||||
select SUNXI_CCU_MULT
|
||||
select SUNXI_CCU_GATE
|
||||
select SUNXI_CCU_NKMP
|
||||
select SUNXI_CCU_NM
|
||||
|
@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
|
||||
.num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets),
|
||||
};
|
||||
|
||||
static struct ccu_pll_nb sun8i_a33_pll_cpu_nb = {
|
||||
.common = &pll_cpux_clk.common,
|
||||
/* copy from pll_cpux_clk */
|
||||
.enable = BIT(31),
|
||||
.lock = BIT(28),
|
||||
};
|
||||
|
||||
static struct ccu_mux_nb sun8i_a33_cpu_nb = {
|
||||
.common = &cpux_clk.common,
|
||||
.cm = &cpux_clk.mux,
|
||||
@ -783,6 +790,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
|
||||
|
||||
sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
|
||||
|
||||
/* Gate then ungate PLL CPU after any rate changes */
|
||||
ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
|
||||
|
||||
/* Reparent CPU during PLL CPU rate changes */
|
||||
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
|
||||
&sun8i_a33_cpu_nb);
|
||||
}
|
||||
|
@ -14,11 +14,13 @@
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "ccu_common.h"
|
||||
#include "ccu_gate.h"
|
||||
#include "ccu_reset.h"
|
||||
|
||||
static DEFINE_SPINLOCK(ccu_lock);
|
||||
@ -39,6 +41,53 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
|
||||
WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
|
||||
}
|
||||
|
||||
/*
|
||||
* This clock notifier is called when the frequency of a PLL clock is
|
||||
* changed. In common PLL designs, changes to the dividers take effect
|
||||
* almost immediately, while changes to the multipliers (implemented
|
||||
* as dividers in the feedback loop) take a few cycles to work into
|
||||
* the feedback loop for the PLL to stablize.
|
||||
*
|
||||
* Sometimes when the PLL clock rate is changed, the decrease in the
|
||||
* divider is too much for the decrease in the multiplier to catch up.
|
||||
* The PLL clock rate will spike, and in some cases, might lock up
|
||||
* completely.
|
||||
*
|
||||
* This notifier callback will gate and then ungate the clock,
|
||||
* effectively resetting it, so it proceeds to work. Care must be
|
||||
* taken to reparent consumers to other temporary clocks during the
|
||||
* rate change, and that this notifier callback must be the first
|
||||
* to be registered.
|
||||
*/
|
||||
static int ccu_pll_notifier_cb(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
|
||||
int ret = 0;
|
||||
|
||||
if (event != POST_RATE_CHANGE)
|
||||
goto out;
|
||||
|
||||
ccu_gate_helper_disable(pll->common, pll->enable);
|
||||
|
||||
ret = ccu_gate_helper_enable(pll->common, pll->enable);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ccu_helper_wait_for_lock(pll->common, pll->lock);
|
||||
|
||||
out:
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
|
||||
int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
|
||||
{
|
||||
pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
|
||||
|
||||
return clk_notifier_register(pll_nb->common->hw.clk,
|
||||
&pll_nb->clk_nb);
|
||||
}
|
||||
|
||||
int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
|
||||
const struct sunxi_ccu_desc *desc)
|
||||
{
|
||||
|
@ -83,6 +83,18 @@ struct sunxi_ccu_desc {
|
||||
|
||||
void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
|
||||
|
||||
struct ccu_pll_nb {
|
||||
struct notifier_block clk_nb;
|
||||
struct ccu_common *common;
|
||||
|
||||
u32 enable;
|
||||
u32 lock;
|
||||
};
|
||||
|
||||
#define to_ccu_pll_nb(_nb) container_of(_nb, struct ccu_pll_nb, clk_nb)
|
||||
|
||||
int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
|
||||
|
||||
int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
|
||||
const struct sunxi_ccu_desc *desc);
|
||||
|
||||
|
@ -2006,7 +2006,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
|
||||
return;
|
||||
case HID_DG_TOOLSERIALNUMBER:
|
||||
wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
|
||||
wacom_wac->serial[0] |= value;
|
||||
wacom_wac->serial[0] |= (__u32)value;
|
||||
return;
|
||||
case WACOM_HID_WD_SENSE:
|
||||
wacom_wac->hid_data.sense_state = value;
|
||||
@ -2176,6 +2176,16 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
|
||||
wacom_wac->hid_data.cc_index = field->index;
|
||||
wacom_wac->hid_data.cc_value_index = usage->usage_index;
|
||||
break;
|
||||
case HID_DG_CONTACTID:
|
||||
if ((field->logical_maximum - field->logical_minimum) < touch_max) {
|
||||
/*
|
||||
* The HID descriptor for G11 sensors leaves logical
|
||||
* maximum set to '1' despite it being a multitouch
|
||||
* device. Override to a sensible number.
|
||||
*/
|
||||
field->logical_maximum = 255;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1118,6 +1118,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
||||
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
||||
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
||||
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
|
||||
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
|
||||
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
|
||||
@ -1523,6 +1524,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
|
||||
.matches = {
|
||||
|
@ -267,7 +267,7 @@ static void sdio_release_func(struct device *dev)
|
||||
sdio_free_func_cis(func);
|
||||
|
||||
kfree(func->info);
|
||||
|
||||
kfree(func->tmpbuf);
|
||||
kfree(func);
|
||||
}
|
||||
|
||||
@ -282,6 +282,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
|
||||
if (!func)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/*
|
||||
* allocate buffer separately to make sure it's properly aligned for
|
||||
* DMA usage (incl. 64 bit DMA)
|
||||
*/
|
||||
func->tmpbuf = kmalloc(4, GFP_KERNEL);
|
||||
if (!func->tmpbuf) {
|
||||
kfree(func);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
func->card = card;
|
||||
|
||||
device_initialize(&func->dev);
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stat.h>
|
||||
@ -1621,10 +1622,16 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
|
||||
|
||||
if (card->type == MMC_TYPE_SDIO ||
|
||||
card->type == MMC_TYPE_SD_COMBO) {
|
||||
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
|
||||
pm_runtime_get_noresume(mmc->parent);
|
||||
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
}
|
||||
clk_en_a = clk_en_a_old & ~clken_low_pwr;
|
||||
} else {
|
||||
clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
|
||||
pm_runtime_put_noidle(mmc->parent);
|
||||
clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
|
||||
}
|
||||
clk_en_a = clk_en_a_old | clken_low_pwr;
|
||||
}
|
||||
|
||||
|
@ -830,6 +830,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
|
||||
|
||||
switch (uhs) {
|
||||
case MMC_TIMING_UHS_SDR50:
|
||||
case MMC_TIMING_UHS_DDR50:
|
||||
pinctrl = imx_data->pins_100mhz;
|
||||
break;
|
||||
case MMC_TIMING_UHS_SDR104:
|
||||
|
@ -581,6 +581,13 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
|
||||
p_params->ets_cbs,
|
||||
p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
|
||||
|
||||
if (p_params->ets_enabled && !p_params->max_ets_tc) {
|
||||
p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
|
||||
"ETS params: max_ets_tc is forced to %d\n",
|
||||
p_params->max_ets_tc);
|
||||
}
|
||||
|
||||
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
|
||||
* encoded in a type u32 array of size 2.
|
||||
*/
|
||||
@ -999,6 +1006,8 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
|
||||
u8 pfc_map = 0;
|
||||
int i;
|
||||
|
||||
*pfc &= ~DCBX_PFC_ERROR_MASK;
|
||||
|
||||
if (p_params->pfc.willing)
|
||||
*pfc |= DCBX_PFC_WILLING_MASK;
|
||||
else
|
||||
@ -1253,7 +1262,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
|
||||
{
|
||||
struct qed_dcbx_get *dcbx_info;
|
||||
|
||||
dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
|
||||
dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
|
||||
if (!dcbx_info)
|
||||
return NULL;
|
||||
|
||||
@ -2071,6 +2080,8 @@ static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
|
||||
for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
|
||||
dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
|
||||
|
||||
dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
|
||||
|
||||
ptt = qed_ptt_acquire(hwfn);
|
||||
if (!ptt)
|
||||
return -EINVAL;
|
||||
|
@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
|
||||
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
|
||||
skb_queue_tail(&dp83640->rx_queue, skb);
|
||||
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
|
||||
} else {
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -254,14 +254,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
tx_overhead = 0x40;
|
||||
|
||||
len = skb->len;
|
||||
if (skb_headroom(skb) < tx_overhead) {
|
||||
struct sk_buff *skb2;
|
||||
|
||||
skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
|
||||
if (skb_cow_head(skb, tx_overhead)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__skb_push(skb, tx_overhead);
|
||||
|
@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
{
|
||||
int len = skb->len;
|
||||
|
||||
if (skb_headroom(skb) < 2) {
|
||||
struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
|
||||
if (skb_cow_head(skb, 2)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
skb_push(skb, 2);
|
||||
|
||||
|
@ -801,18 +801,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* We now decide whether we can put our special header into the sk_buff */
|
||||
if (skb_cloned(skb) || skb_headroom(skb) < 2) {
|
||||
/* no such luck - we make our own */
|
||||
struct sk_buff *copied_skb;
|
||||
copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
|
||||
dev_kfree_skb_irq(skb);
|
||||
skb = copied_skb;
|
||||
if (!copied_skb) {
|
||||
net->stats.tx_errors++;
|
||||
netif_start_queue(net);
|
||||
spin_unlock_irq(&kaweth->device_lock);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
if (skb_cow_head(skb, 2)) {
|
||||
net->stats.tx_errors++;
|
||||
netif_start_queue(net);
|
||||
spin_unlock_irq(&kaweth->device_lock);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
private_header = (__le16 *)__skb_push(skb, 2);
|
||||
|
@ -2608,14 +2608,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
|
||||
{
|
||||
u32 tx_cmd_a, tx_cmd_b;
|
||||
|
||||
if (skb_headroom(skb) < TX_OVERHEAD) {
|
||||
struct sk_buff *skb2;
|
||||
|
||||
skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
|
||||
if (skb_cow_head(skb, TX_OVERHEAD)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (lan78xx_linearize(skb) < 0)
|
||||
|
@ -2204,13 +2204,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
|
||||
{
|
||||
u32 tx_cmd_a, tx_cmd_b;
|
||||
|
||||
if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
|
||||
struct sk_buff *skb2 =
|
||||
skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
|
||||
if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
|
||||
|
@ -2002,13 +2002,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
|
||||
/* We do not advertise SG, so skbs should be already linearized */
|
||||
BUG_ON(skb_shinfo(skb)->nr_frags);
|
||||
|
||||
if (skb_headroom(skb) < overhead) {
|
||||
struct sk_buff *skb2 = skb_copy_expand(skb,
|
||||
overhead, 0, flags);
|
||||
/* Make writable and expand header space by overhead if required */
|
||||
if (skb_cow_head(skb, overhead)) {
|
||||
/* Must deallocate here as returning NULL to indicate error
|
||||
* means the skb won't be deallocated in the caller.
|
||||
*/
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (csum) {
|
||||
|
@ -457,14 +457,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
|
||||
|
||||
len = skb->len;
|
||||
|
||||
if (skb_headroom(skb) < SR_TX_OVERHEAD) {
|
||||
struct sk_buff *skb2;
|
||||
|
||||
skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
|
||||
if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = skb2;
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__skb_push(skb, SR_TX_OVERHEAD);
|
||||
|
@ -1315,6 +1315,14 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
||||
if (target)
|
||||
table->entries[state] = target;
|
||||
|
||||
/*
|
||||
* Don't allow transitions to the deepest state
|
||||
* if it's quirked off.
|
||||
*/
|
||||
if (state == ctrl->npss &&
|
||||
(ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Is this state a useful non-operational state for
|
||||
* higher-power states to autonomously transition to?
|
||||
@ -1387,16 +1395,15 @@ struct nvme_core_quirk_entry {
|
||||
};
|
||||
|
||||
static const struct nvme_core_quirk_entry core_quirks[] = {
|
||||
/*
|
||||
* Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
|
||||
* the controller to go out to lunch. It dies when the watchdog
|
||||
* timer reads CSTS and gets 0xffffffff.
|
||||
*/
|
||||
{
|
||||
.vid = 0x144d,
|
||||
.fr = "BXW75D0Q",
|
||||
/*
|
||||
* This Toshiba device seems to die using any APST states. See:
|
||||
* https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
|
||||
*/
|
||||
.vid = 0x1179,
|
||||
.mn = "THNSF5256GPUK TOSHIBA",
|
||||
.quirks = NVME_QUIRK_NO_APST,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
/* match is null-terminated but idstr is space-padded. */
|
||||
|
@ -83,6 +83,11 @@ enum nvme_quirks {
|
||||
* APST should not be used.
|
||||
*/
|
||||
NVME_QUIRK_NO_APST = (1 << 4),
|
||||
|
||||
/*
|
||||
* The deepest sleep state should not be used.
|
||||
*/
|
||||
NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/genhd.h>
|
||||
@ -1943,10 +1944,31 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
|
||||
/*
|
||||
* Several Samsung devices seem to drop off the PCIe bus
|
||||
* randomly when APST is on and uses the deepest sleep state.
|
||||
* This has been observed on a Samsung "SM951 NVMe SAMSUNG
|
||||
* 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
|
||||
* 950 PRO 256GB", but it seems to be restricted to two Dell
|
||||
* laptops.
|
||||
*/
|
||||
if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
|
||||
(dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
|
||||
dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
|
||||
return NVME_QUIRK_NO_DEEPEST_PS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
int node, result = -ENOMEM;
|
||||
struct nvme_dev *dev;
|
||||
unsigned long quirks = id->driver_data;
|
||||
|
||||
node = dev_to_node(&pdev->dev);
|
||||
if (node == NUMA_NO_NODE)
|
||||
@ -1978,8 +2000,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (result)
|
||||
goto put_pci;
|
||||
|
||||
quirks |= check_dell_samsung_bug(pdev);
|
||||
|
||||
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
|
||||
id->driver_data);
|
||||
quirks);
|
||||
if (result)
|
||||
goto release_pools;
|
||||
|
||||
|
@ -380,9 +380,13 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
|
||||
|
||||
static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
|
||||
{
|
||||
.compatible = "hisilicon,pcie-almost-ecam",
|
||||
.compatible = "hisilicon,hip06-pcie-ecam",
|
||||
.data = (void *) &hisi_pcie_platform_ops,
|
||||
},
|
||||
{
|
||||
.compatible = "hisilicon,hip07-pcie-ecam",
|
||||
.data = (void *) &hisi_pcie_platform_ops,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -297,14 +297,15 @@ static int pwm_backlight_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the GPIO is configured as input, change the direction to output
|
||||
* and set the GPIO as active.
|
||||
* If the GPIO is not known to be already configured as output, that
|
||||
* is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL,
|
||||
* change the direction to output and set the GPIO as active.
|
||||
* Do not force the GPIO to active when it was already output as it
|
||||
* could cause backlight flickering or we would enable the backlight too
|
||||
* early. Leave the decision of the initial backlight state for later.
|
||||
*/
|
||||
if (pb->enable_gpio &&
|
||||
gpiod_get_direction(pb->enable_gpio) == GPIOF_DIR_IN)
|
||||
gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT)
|
||||
gpiod_direction_output(pb->enable_gpio, 1);
|
||||
|
||||
pb->power_supply = devm_regulator_get(&pdev->dev, "power");
|
||||
|
@ -1015,6 +1015,15 @@ cifs_dir_needs_close(struct cifsFileInfo *cfile)
|
||||
return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
|
||||
}
|
||||
|
||||
static bool
|
||||
cifs_can_echo(struct TCP_Server_Info *server)
|
||||
{
|
||||
if (server->tcpStatus == CifsGood)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
struct smb_version_operations smb1_operations = {
|
||||
.send_cancel = send_nt_cancel,
|
||||
.compare_fids = cifs_compare_fids,
|
||||
@ -1049,6 +1058,7 @@ struct smb_version_operations smb1_operations = {
|
||||
.get_dfs_refer = CIFSGetDFSRefer,
|
||||
.qfs_tcon = cifs_qfs_tcon,
|
||||
.is_path_accessible = cifs_is_path_accessible,
|
||||
.can_echo = cifs_can_echo,
|
||||
.query_path_info = cifs_query_path_info,
|
||||
.query_file_info = cifs_query_file_info,
|
||||
.get_srv_inum = cifs_get_srv_inum,
|
||||
|
@ -2489,7 +2489,7 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
|
||||
|
||||
int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
|
||||
{
|
||||
if (op->opnum == OP_ILLEGAL)
|
||||
if (op->opnum == OP_ILLEGAL || op->status == nfserr_notsupp)
|
||||
return op_encode_hdr_size * sizeof(__be32);
|
||||
|
||||
BUG_ON(OPDESC(op)->op_rsize_bop == NULL);
|
||||
|
@ -91,6 +91,7 @@ slow:
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
dentry->d_flags |= DCACHE_RCUACCESS;
|
||||
dentry->d_fsdata = (void *)ns->ops;
|
||||
d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
|
||||
if (d) {
|
||||
|
@ -53,7 +53,7 @@ struct sdio_func {
|
||||
unsigned int state; /* function state */
|
||||
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
|
||||
|
||||
u8 tmpbuf[4]; /* DMA:able scratch buffer */
|
||||
u8 *tmpbuf; /* DMA:able scratch buffer */
|
||||
|
||||
unsigned num_info; /* number of info strings */
|
||||
const char **info; /* info strings */
|
||||
|
@ -35,7 +35,7 @@
|
||||
#define RTF_PREF(pref) ((pref) << 27)
|
||||
#define RTF_PREF_MASK 0x18000000
|
||||
|
||||
#define RTF_PCPU 0x40000000
|
||||
#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
|
||||
#define RTF_LOCAL 0x80000000
|
||||
|
||||
|
||||
|
@ -3405,11 +3405,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
|
||||
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_page *reader;
|
||||
struct buffer_page *head_page;
|
||||
struct buffer_page *commit_page;
|
||||
unsigned commit;
|
||||
|
||||
cpu_buffer = iter->cpu_buffer;
|
||||
|
||||
return iter->head_page == cpu_buffer->commit_page &&
|
||||
iter->head == rb_commit_index(cpu_buffer);
|
||||
/* Remember, trace recording is off when iterator is in use */
|
||||
reader = cpu_buffer->reader_page;
|
||||
head_page = cpu_buffer->head_page;
|
||||
commit_page = cpu_buffer->commit_page;
|
||||
commit = rb_page_commit(commit_page);
|
||||
|
||||
return ((iter->head_page == commit_page && iter->head == commit) ||
|
||||
(iter->head_page == reader && commit_page == head_page &&
|
||||
head_page->read == commit &&
|
||||
iter->head == rb_page_commit(cpu_buffer->reader_page)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
|
||||
|
||||
|
@ -6733,11 +6733,13 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
|
||||
return ret;
|
||||
|
||||
out_reg:
|
||||
ret = alloc_snapshot(&global_trace);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = register_ftrace_function_probe(glob, ops, count);
|
||||
|
||||
if (ret >= 0)
|
||||
alloc_snapshot(&global_trace);
|
||||
|
||||
out:
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
|
@ -184,9 +184,9 @@ void putback_movable_pages(struct list_head *l)
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
} else {
|
||||
putback_lru_page(page);
|
||||
dec_node_page_state(page, NR_ISOLATED_ANON +
|
||||
page_is_file_cache(page));
|
||||
putback_lru_page(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||
{
|
||||
int migratetype = 0;
|
||||
int batch_free = 0;
|
||||
unsigned long nr_scanned, flags;
|
||||
unsigned long nr_scanned;
|
||||
bool isolated_pageblocks;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
spin_lock(&zone->lock);
|
||||
isolated_pageblocks = has_isolate_pageblock(zone);
|
||||
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
|
||||
if (nr_scanned)
|
||||
@ -1142,7 +1142,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||
trace_mm_page_pcpu_drain(page, 0, mt);
|
||||
} while (--count && --batch_free && !list_empty(list));
|
||||
}
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
spin_unlock(&zone->lock);
|
||||
}
|
||||
|
||||
static void free_one_page(struct zone *zone,
|
||||
@ -1150,9 +1150,8 @@ static void free_one_page(struct zone *zone,
|
||||
unsigned int order,
|
||||
int migratetype)
|
||||
{
|
||||
unsigned long nr_scanned, flags;
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
__count_vm_events(PGFREE, 1 << order);
|
||||
unsigned long nr_scanned;
|
||||
spin_lock(&zone->lock);
|
||||
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
|
||||
if (nr_scanned)
|
||||
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
|
||||
@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
}
|
||||
__free_one_page(page, pfn, zone, order, migratetype);
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
spin_unlock(&zone->lock);
|
||||
}
|
||||
|
||||
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
||||
@ -1240,6 +1239,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
|
||||
|
||||
static void __free_pages_ok(struct page *page, unsigned int order)
|
||||
{
|
||||
unsigned long flags;
|
||||
int migratetype;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
|
||||
@ -1247,7 +1247,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
||||
return;
|
||||
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
local_irq_save(flags);
|
||||
__count_vm_events(PGFREE, 1 << order);
|
||||
free_one_page(page_zone(page), page, pfn, order, migratetype);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
|
||||
@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
||||
int migratetype, bool cold)
|
||||
{
|
||||
int i, alloced = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
spin_lock(&zone->lock);
|
||||
for (i = 0; i < count; ++i) {
|
||||
struct page *page = __rmqueue(zone, order, migratetype);
|
||||
if (unlikely(page == NULL))
|
||||
@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
||||
* pages added to the pcp list.
|
||||
*/
|
||||
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
spin_unlock(&zone->lock);
|
||||
return alloced;
|
||||
}
|
||||
|
||||
@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
|
||||
{
|
||||
struct zone *zone = page_zone(page);
|
||||
struct per_cpu_pages *pcp;
|
||||
unsigned long flags;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int migratetype;
|
||||
|
||||
if (in_interrupt()) {
|
||||
__free_pages_ok(page, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!free_pcp_prepare(page))
|
||||
return;
|
||||
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
set_pcppage_migratetype(page, migratetype);
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
__count_vm_event(PGFREE);
|
||||
|
||||
/*
|
||||
* We only track unmovable, reclaimable and movable on pcp lists.
|
||||
@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
}
|
||||
|
||||
__count_vm_event(PGFREE);
|
||||
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
||||
if (!cold)
|
||||
list_add(&page->lru, &pcp->lists[migratetype]);
|
||||
@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
VM_BUG_ON(in_interrupt());
|
||||
|
||||
do {
|
||||
if (list_empty(list)) {
|
||||
pcp->count += rmqueue_bulk(zone, 0,
|
||||
@ -2686,8 +2682,9 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
|
||||
struct list_head *list;
|
||||
bool cold = ((gfp_flags & __GFP_COLD) != 0);
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
pcp = &this_cpu_ptr(zone->pageset)->pcp;
|
||||
list = &pcp->lists[migratetype];
|
||||
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
|
||||
@ -2695,7 +2692,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
|
||||
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
|
||||
zone_statistics(preferred_zone, zone);
|
||||
}
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
return page;
|
||||
}
|
||||
|
||||
@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (likely(order == 0) && !in_interrupt()) {
|
||||
if (likely(order == 0)) {
|
||||
page = rmqueue_pcplist(preferred_zone, zone, order,
|
||||
gfp_flags, migratetype);
|
||||
goto out;
|
||||
|
@ -1768,8 +1768,7 @@ void __init init_mm_internals(void)
|
||||
{
|
||||
int ret __maybe_unused;
|
||||
|
||||
mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
|
||||
WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
|
||||
mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
|
||||
|
@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
|
||||
while ((skb = skb_dequeue(&npinfo->txq))) {
|
||||
struct net_device *dev = skb->dev;
|
||||
struct netdev_queue *txq;
|
||||
unsigned int q_index;
|
||||
|
||||
if (!netif_device_present(dev) || !netif_running(dev)) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
txq = skb_get_tx_queue(dev, skb);
|
||||
|
||||
local_irq_save(flags);
|
||||
/* check if skb->queue_mapping is still valid */
|
||||
q_index = skb_get_queue_mapping(skb);
|
||||
if (unlikely(q_index >= dev->real_num_tx_queues)) {
|
||||
q_index = q_index % dev->real_num_tx_queues;
|
||||
skb_set_queue_mapping(skb, q_index);
|
||||
}
|
||||
txq = netdev_get_tx_queue(dev, q_index);
|
||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||
if (netif_xmit_frozen_or_stopped(txq) ||
|
||||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
|
||||
|
@ -388,7 +388,6 @@ looped_back:
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
|
||||
((&hdr->segments_left) -
|
||||
skb_network_header(skb)));
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -774,7 +774,8 @@ failure:
|
||||
* Delete a VIF entry
|
||||
*/
|
||||
|
||||
static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
|
||||
static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct mif_device *v;
|
||||
struct net_device *dev;
|
||||
@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
|
||||
dev->ifindex, &in6_dev->cnf);
|
||||
}
|
||||
|
||||
if (v->flags & MIFF_REGISTER)
|
||||
if ((v->flags & MIFF_REGISTER) && !notify)
|
||||
unregister_netdevice_queue(dev, head);
|
||||
|
||||
dev_put(dev);
|
||||
@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block *this,
|
||||
struct mr6_table *mrt;
|
||||
struct mif_device *v;
|
||||
int ct;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (event != NETDEV_UNREGISTER)
|
||||
return NOTIFY_DONE;
|
||||
@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block *this,
|
||||
v = &mrt->vif6_table[0];
|
||||
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
|
||||
if (v->dev == dev)
|
||||
mif6_delete(mrt, ct, &list);
|
||||
mif6_delete(mrt, ct, 1, NULL);
|
||||
}
|
||||
}
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
|
||||
for (i = 0; i < mrt->maxvif; i++) {
|
||||
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
|
||||
continue;
|
||||
mif6_delete(mrt, i, &list);
|
||||
mif6_delete(mrt, i, 0, &list);
|
||||
}
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
@ -1708,7 +1707,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
|
||||
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
|
||||
return -EFAULT;
|
||||
rtnl_lock();
|
||||
ret = mif6_delete(mrt, mifi, NULL);
|
||||
ret = mif6_delete(mrt, mifi, 0, NULL);
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
|
||||
|
@ -1854,6 +1854,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
|
||||
int addr_type;
|
||||
int err = -EINVAL;
|
||||
|
||||
/* RTF_PCPU is an internal flag; can not be set by userspace */
|
||||
if (cfg->fc_flags & RTF_PCPU)
|
||||
goto out;
|
||||
|
||||
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
|
||||
goto out;
|
||||
#ifndef CONFIG_IPV6_SUBTREES
|
||||
|
@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
|
||||
struct sr6_tlv *tlv;
|
||||
unsigned int tlv_len;
|
||||
|
||||
if (trailing < sizeof(*tlv))
|
||||
return false;
|
||||
|
||||
tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
|
||||
tlv_len = sizeof(*tlv) + tlv->len;
|
||||
|
||||
|
@ -63,8 +63,13 @@ struct pfkey_sock {
|
||||
} u;
|
||||
struct sk_buff *skb;
|
||||
} dump;
|
||||
struct mutex dump_lock;
|
||||
};
|
||||
|
||||
static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
|
||||
xfrm_address_t *saddr, xfrm_address_t *daddr,
|
||||
u16 *family);
|
||||
|
||||
static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
|
||||
{
|
||||
return (struct pfkey_sock *)sk;
|
||||
@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
|
||||
{
|
||||
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
|
||||
struct sock *sk;
|
||||
struct pfkey_sock *pfk;
|
||||
int err;
|
||||
|
||||
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
|
||||
@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
pfk = pfkey_sk(sk);
|
||||
mutex_init(&pfk->dump_lock);
|
||||
|
||||
sock->ops = &pfkey_ops;
|
||||
sock_init_data(sock, sk);
|
||||
|
||||
@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
|
||||
struct sadb_msg *hdr;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&pfk->dump_lock);
|
||||
if (!pfk->dump.dump) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = pfk->dump.dump(pfk);
|
||||
if (rc == -ENOBUFS)
|
||||
return 0;
|
||||
if (rc == -ENOBUFS) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (pfk->dump.skb) {
|
||||
if (!pfkey_can_dump(&pfk->sk))
|
||||
return 0;
|
||||
if (!pfkey_can_dump(&pfk->sk)) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hdr = (struct sadb_msg *) pfk->dump.skb->data;
|
||||
hdr->sadb_msg_seq = 0;
|
||||
@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
|
||||
}
|
||||
|
||||
pfkey_terminate_dump(pfk);
|
||||
|
||||
out:
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1793,19 +1815,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
|
||||
struct xfrm_address_filter *filter = NULL;
|
||||
struct pfkey_sock *pfk = pfkey_sk(sk);
|
||||
|
||||
if (pfk->dump.dump != NULL)
|
||||
mutex_lock(&pfk->dump_lock);
|
||||
if (pfk->dump.dump != NULL) {
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
|
||||
if (proto == 0)
|
||||
if (proto == 0) {
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
|
||||
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
|
||||
|
||||
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
|
||||
if (filter == NULL)
|
||||
if (filter == NULL) {
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
|
||||
sizeof(xfrm_address_t));
|
||||
@ -1821,6 +1850,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
|
||||
pfk->dump.dump = pfkey_dump_sa;
|
||||
pfk->dump.done = pfkey_dump_sa_done;
|
||||
xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
|
||||
return pfkey_do_dump(pfk);
|
||||
}
|
||||
@ -1913,19 +1943,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
|
||||
|
||||
/* addresses present only in tunnel mode */
|
||||
if (t->mode == XFRM_MODE_TUNNEL) {
|
||||
u8 *sa = (u8 *) (rq + 1);
|
||||
int family, socklen;
|
||||
int err;
|
||||
|
||||
family = pfkey_sockaddr_extract((struct sockaddr *)sa,
|
||||
&t->saddr);
|
||||
if (!family)
|
||||
return -EINVAL;
|
||||
|
||||
socklen = pfkey_sockaddr_len(family);
|
||||
if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
|
||||
&t->id.daddr) != family)
|
||||
return -EINVAL;
|
||||
t->encap_family = family;
|
||||
err = parse_sockaddr_pair(
|
||||
(struct sockaddr *)(rq + 1),
|
||||
rq->sadb_x_ipsecrequest_len - sizeof(*rq),
|
||||
&t->saddr, &t->id.daddr, &t->encap_family);
|
||||
if (err)
|
||||
return err;
|
||||
} else
|
||||
t->encap_family = xp->family;
|
||||
|
||||
@ -1945,7 +1970,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
|
||||
if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
|
||||
return -EINVAL;
|
||||
|
||||
while (len >= sizeof(struct sadb_x_ipsecrequest)) {
|
||||
while (len >= sizeof(*rq)) {
|
||||
if (len < rq->sadb_x_ipsecrequest_len ||
|
||||
rq->sadb_x_ipsecrequest_len < sizeof(*rq))
|
||||
return -EINVAL;
|
||||
|
||||
if ((err = parse_ipsecrequest(xp, rq)) < 0)
|
||||
return err;
|
||||
len -= rq->sadb_x_ipsecrequest_len;
|
||||
@ -2408,7 +2437,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_KEY_MIGRATE
|
||||
static int pfkey_sockaddr_pair_size(sa_family_t family)
|
||||
{
|
||||
return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
|
||||
@ -2420,7 +2448,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
|
||||
{
|
||||
int af, socklen;
|
||||
|
||||
if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
|
||||
if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
|
||||
return -EINVAL;
|
||||
|
||||
af = pfkey_sockaddr_extract(sa, saddr);
|
||||
@ -2436,6 +2464,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_KEY_MIGRATE
|
||||
static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
|
||||
struct xfrm_migrate *m)
|
||||
{
|
||||
@ -2443,13 +2472,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
|
||||
struct sadb_x_ipsecrequest *rq2;
|
||||
int mode;
|
||||
|
||||
if (len <= sizeof(struct sadb_x_ipsecrequest) ||
|
||||
len < rq1->sadb_x_ipsecrequest_len)
|
||||
if (len < sizeof(*rq1) ||
|
||||
len < rq1->sadb_x_ipsecrequest_len ||
|
||||
rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
|
||||
return -EINVAL;
|
||||
|
||||
/* old endoints */
|
||||
err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
|
||||
rq1->sadb_x_ipsecrequest_len,
|
||||
rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
|
||||
&m->old_saddr, &m->old_daddr,
|
||||
&m->old_family);
|
||||
if (err)
|
||||
@ -2458,13 +2488,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
|
||||
rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
|
||||
len -= rq1->sadb_x_ipsecrequest_len;
|
||||
|
||||
if (len <= sizeof(struct sadb_x_ipsecrequest) ||
|
||||
len < rq2->sadb_x_ipsecrequest_len)
|
||||
if (len <= sizeof(*rq2) ||
|
||||
len < rq2->sadb_x_ipsecrequest_len ||
|
||||
rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
|
||||
return -EINVAL;
|
||||
|
||||
/* new endpoints */
|
||||
err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
|
||||
rq2->sadb_x_ipsecrequest_len,
|
||||
rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
|
||||
&m->new_saddr, &m->new_daddr,
|
||||
&m->new_family);
|
||||
if (err)
|
||||
@ -2679,14 +2710,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
|
||||
{
|
||||
struct pfkey_sock *pfk = pfkey_sk(sk);
|
||||
|
||||
if (pfk->dump.dump != NULL)
|
||||
mutex_lock(&pfk->dump_lock);
|
||||
if (pfk->dump.dump != NULL) {
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pfk->dump.msg_version = hdr->sadb_msg_version;
|
||||
pfk->dump.msg_portid = hdr->sadb_msg_pid;
|
||||
pfk->dump.dump = pfkey_dump_sp;
|
||||
pfk->dump.done = pfkey_dump_sp_done;
|
||||
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
|
||||
return pfkey_do_dump(pfk);
|
||||
}
|
||||
|
@ -197,6 +197,51 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
|
||||
return len;
|
||||
}
|
||||
|
||||
static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
|
||||
struct sk_buff *skb,
|
||||
int rtap_vendor_space)
|
||||
{
|
||||
struct {
|
||||
struct ieee80211_hdr_3addr hdr;
|
||||
u8 category;
|
||||
u8 action_code;
|
||||
} __packed action;
|
||||
|
||||
if (!sdata)
|
||||
return;
|
||||
|
||||
BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
|
||||
|
||||
if (skb->len < rtap_vendor_space + sizeof(action) +
|
||||
VHT_MUMIMO_GROUPS_DATA_LEN)
|
||||
return;
|
||||
|
||||
if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
|
||||
return;
|
||||
|
||||
skb_copy_bits(skb, rtap_vendor_space, &action, sizeof(action));
|
||||
|
||||
if (!ieee80211_is_action(action.hdr.frame_control))
|
||||
return;
|
||||
|
||||
if (action.category != WLAN_CATEGORY_VHT)
|
||||
return;
|
||||
|
||||
if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
|
||||
return;
|
||||
|
||||
if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
|
||||
return;
|
||||
|
||||
skb = skb_copy(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
|
||||
skb_queue_tail(&sdata->skb_queue, skb);
|
||||
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* ieee80211_add_rx_radiotap_header - add radiotap header
|
||||
*
|
||||
@ -504,7 +549,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
||||
struct net_device *prev_dev = NULL;
|
||||
int present_fcs_len = 0;
|
||||
unsigned int rtap_vendor_space = 0;
|
||||
struct ieee80211_mgmt *mgmt;
|
||||
struct ieee80211_sub_if_data *monitor_sdata =
|
||||
rcu_dereference(local->monitor_sdata);
|
||||
|
||||
@ -551,6 +595,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
||||
return origskb;
|
||||
}
|
||||
|
||||
ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_vendor_space);
|
||||
|
||||
/* room for the radiotap header based on driver features */
|
||||
rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb);
|
||||
needed_headroom = rt_hdrlen - rtap_vendor_space;
|
||||
@ -606,23 +652,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
||||
ieee80211_rx_stats(sdata->dev, skb->len);
|
||||
}
|
||||
|
||||
mgmt = (void *)skb->data;
|
||||
if (monitor_sdata &&
|
||||
skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
|
||||
ieee80211_is_action(mgmt->frame_control) &&
|
||||
mgmt->u.action.category == WLAN_CATEGORY_VHT &&
|
||||
mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
|
||||
is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
|
||||
ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
|
||||
struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
|
||||
|
||||
if (mu_skb) {
|
||||
mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
|
||||
skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
|
||||
ieee80211_queue_work(&local->hw, &monitor_sdata->work);
|
||||
}
|
||||
}
|
||||
|
||||
if (prev_dev) {
|
||||
skb->dev = prev_dev;
|
||||
netif_receive_skb(skb);
|
||||
@ -3598,6 +3627,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
|
||||
!ether_addr_equal(bssid, hdr->addr1))
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* 802.11-2016 Table 9-26 says that for data frames, A1 must be
|
||||
* the BSSID - we've checked that already but may have accepted
|
||||
* the wildcard (ff:ff:ff:ff:ff:ff).
|
||||
*
|
||||
* It also says:
|
||||
* The BSSID of the Data frame is determined as follows:
|
||||
* a) If the STA is contained within an AP or is associated
|
||||
* with an AP, the BSSID is the address currently in use
|
||||
* by the STA contained in the AP.
|
||||
*
|
||||
* So we should not accept data frames with an address that's
|
||||
* multicast.
|
||||
*
|
||||
* Accepting it also opens a security problem because stations
|
||||
* could encrypt it with the GTK and inject traffic that way.
|
||||
*/
|
||||
if (ieee80211_is_data(hdr->frame_control) && multicast)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
case NL80211_IFTYPE_WDS:
|
||||
if (bssid || !ieee80211_is_data(hdr->frame_control))
|
||||
|
@ -658,7 +658,9 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
}
|
||||
|
||||
if (plen != len) {
|
||||
skb_pad(skb, plen - len);
|
||||
rc = skb_pad(skb, plen - len);
|
||||
if (rc)
|
||||
goto out_node;
|
||||
skb_put(skb, plen - len);
|
||||
}
|
||||
|
||||
|
@ -529,20 +529,20 @@ errout:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
|
||||
static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
|
||||
{
|
||||
a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
|
||||
if (!a->act_cookie)
|
||||
return -ENOMEM;
|
||||
struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return NULL;
|
||||
|
||||
a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
|
||||
if (!a->act_cookie->data) {
|
||||
kfree(a->act_cookie);
|
||||
return -ENOMEM;
|
||||
c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
|
||||
if (!c->data) {
|
||||
kfree(c);
|
||||
return NULL;
|
||||
}
|
||||
a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
|
||||
c->len = nla_len(tb[TCA_ACT_COOKIE]);
|
||||
|
||||
return 0;
|
||||
return c;
|
||||
}
|
||||
|
||||
struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
|
||||
@ -551,6 +551,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
|
||||
{
|
||||
struct tc_action *a;
|
||||
struct tc_action_ops *a_o;
|
||||
struct tc_cookie *cookie = NULL;
|
||||
char act_name[IFNAMSIZ];
|
||||
struct nlattr *tb[TCA_ACT_MAX + 1];
|
||||
struct nlattr *kind;
|
||||
@ -566,6 +567,18 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
|
||||
goto err_out;
|
||||
if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
|
||||
goto err_out;
|
||||
if (tb[TCA_ACT_COOKIE]) {
|
||||
int cklen = nla_len(tb[TCA_ACT_COOKIE]);
|
||||
|
||||
if (cklen > TC_COOKIE_MAX_SIZE)
|
||||
goto err_out;
|
||||
|
||||
cookie = nla_memdup_cookie(tb);
|
||||
if (!cookie) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
|
||||
@ -604,20 +617,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
|
||||
if (err < 0)
|
||||
goto err_mod;
|
||||
|
||||
if (tb[TCA_ACT_COOKIE]) {
|
||||
int cklen = nla_len(tb[TCA_ACT_COOKIE]);
|
||||
|
||||
if (cklen > TC_COOKIE_MAX_SIZE) {
|
||||
err = -EINVAL;
|
||||
tcf_hash_release(a, bind);
|
||||
goto err_mod;
|
||||
}
|
||||
|
||||
if (nla_memdup_cookie(a, tb) < 0) {
|
||||
err = -ENOMEM;
|
||||
tcf_hash_release(a, bind);
|
||||
goto err_mod;
|
||||
if (name == NULL && tb[TCA_ACT_COOKIE]) {
|
||||
if (a->act_cookie) {
|
||||
kfree(a->act_cookie->data);
|
||||
kfree(a->act_cookie);
|
||||
}
|
||||
a->act_cookie = cookie;
|
||||
}
|
||||
|
||||
/* module count goes up only when brand new policy is created
|
||||
@ -632,6 +637,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
|
||||
err_mod:
|
||||
module_put(a_o->owner);
|
||||
err_out:
|
||||
if (cookie) {
|
||||
kfree(cookie->data);
|
||||
kfree(cookie);
|
||||
}
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ static unsigned long key_gc_flags;
|
||||
* immediately unlinked.
|
||||
*/
|
||||
struct key_type key_type_dead = {
|
||||
.name = "dead",
|
||||
.name = ".dead",
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -273,7 +273,8 @@ error:
|
||||
* Create and join an anonymous session keyring or join a named session
|
||||
* keyring, creating it if necessary. A named session keyring must have Search
|
||||
* permission for it to be joined. Session keyrings without this permit will
|
||||
* be skipped over.
|
||||
* be skipped over. It is not permitted for userspace to create or join
|
||||
* keyrings whose name begin with a dot.
|
||||
*
|
||||
* If successful, the ID of the joined session keyring will be returned.
|
||||
*/
|
||||
@ -290,12 +291,16 @@ long keyctl_join_session_keyring(const char __user *_name)
|
||||
ret = PTR_ERR(name);
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = -EPERM;
|
||||
if (name[0] == '.')
|
||||
goto error_name;
|
||||
}
|
||||
|
||||
/* join the session */
|
||||
ret = join_session_keyring(name);
|
||||
error_name:
|
||||
kfree(name);
|
||||
|
||||
error:
|
||||
return ret;
|
||||
}
|
||||
@ -1253,8 +1258,8 @@ error:
|
||||
* Read or set the default keyring in which request_key() will cache keys and
|
||||
* return the old setting.
|
||||
*
|
||||
* If a process keyring is specified then this will be created if it doesn't
|
||||
* yet exist. The old setting will be returned if successful.
|
||||
* If a thread or process keyring is specified then it will be created if it
|
||||
* doesn't yet exist. The old setting will be returned if successful.
|
||||
*/
|
||||
long keyctl_set_reqkey_keyring(int reqkey_defl)
|
||||
{
|
||||
@ -1279,11 +1284,8 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
|
||||
|
||||
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
|
||||
ret = install_process_keyring_to_cred(new);
|
||||
if (ret < 0) {
|
||||
if (ret != -EEXIST)
|
||||
goto error;
|
||||
ret = 0;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
goto set;
|
||||
|
||||
case KEY_REQKEY_DEFL_DEFAULT:
|
||||
|
@ -128,13 +128,18 @@ error:
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a fresh thread keyring directly to new credentials. This keyring is
|
||||
* allowed to overrun the quota.
|
||||
* Install a thread keyring to the given credentials struct if it didn't have
|
||||
* one already. This is allowed to overrun the quota.
|
||||
*
|
||||
* Return: 0 if a thread keyring is now present; -errno on failure.
|
||||
*/
|
||||
int install_thread_keyring_to_cred(struct cred *new)
|
||||
{
|
||||
struct key *keyring;
|
||||
|
||||
if (new->thread_keyring)
|
||||
return 0;
|
||||
|
||||
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
|
||||
KEY_POS_ALL | KEY_USR_VIEW,
|
||||
KEY_ALLOC_QUOTA_OVERRUN,
|
||||
@ -147,7 +152,9 @@ int install_thread_keyring_to_cred(struct cred *new)
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a fresh thread keyring, discarding the old one.
|
||||
* Install a thread keyring to the current task if it didn't have one already.
|
||||
*
|
||||
* Return: 0 if a thread keyring is now present; -errno on failure.
|
||||
*/
|
||||
static int install_thread_keyring(void)
|
||||
{
|
||||
@ -158,8 +165,6 @@ static int install_thread_keyring(void)
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
BUG_ON(new->thread_keyring);
|
||||
|
||||
ret = install_thread_keyring_to_cred(new);
|
||||
if (ret < 0) {
|
||||
abort_creds(new);
|
||||
@ -170,17 +175,17 @@ static int install_thread_keyring(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a process keyring directly to a credentials struct.
|
||||
* Install a process keyring to the given credentials struct if it didn't have
|
||||
* one already. This is allowed to overrun the quota.
|
||||
*
|
||||
* Returns -EEXIST if there was already a process keyring, 0 if one installed,
|
||||
* and other value on any other error
|
||||
* Return: 0 if a process keyring is now present; -errno on failure.
|
||||
*/
|
||||
int install_process_keyring_to_cred(struct cred *new)
|
||||
{
|
||||
struct key *keyring;
|
||||
|
||||
if (new->process_keyring)
|
||||
return -EEXIST;
|
||||
return 0;
|
||||
|
||||
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
|
||||
KEY_POS_ALL | KEY_USR_VIEW,
|
||||
@ -194,11 +199,9 @@ int install_process_keyring_to_cred(struct cred *new)
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure a process keyring is installed for the current process. The
|
||||
* existing process keyring is not replaced.
|
||||
* Install a process keyring to the current task if it didn't have one already.
|
||||
*
|
||||
* Returns 0 if there is a process keyring by the end of this function, some
|
||||
* error otherwise.
|
||||
* Return: 0 if a process keyring is now present; -errno on failure.
|
||||
*/
|
||||
static int install_process_keyring(void)
|
||||
{
|
||||
@ -212,14 +215,18 @@ static int install_process_keyring(void)
|
||||
ret = install_process_keyring_to_cred(new);
|
||||
if (ret < 0) {
|
||||
abort_creds(new);
|
||||
return ret != -EEXIST ? ret : 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return commit_creds(new);
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a session keyring directly to a credentials struct.
|
||||
* Install the given keyring as the session keyring of the given credentials
|
||||
* struct, replacing the existing one if any. If the given keyring is NULL,
|
||||
* then install a new anonymous session keyring.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure.
|
||||
*/
|
||||
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
|
||||
{
|
||||
@ -254,8 +261,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a session keyring, discarding the old one. If a keyring is not
|
||||
* supplied, an empty one is invented.
|
||||
* Install the given keyring as the session keyring of the current task,
|
||||
* replacing the existing one if any. If the given keyring is NULL, then
|
||||
* install a new anonymous session keyring.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure.
|
||||
*/
|
||||
static int install_session_keyring(struct key *keyring)
|
||||
{
|
||||
|
@ -282,7 +282,7 @@ static void test_arraymap_percpu(int task, void *data)
|
||||
{
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
int key, next_key, fd, i;
|
||||
long values[nr_cpus];
|
||||
long long values[nr_cpus];
|
||||
|
||||
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
|
||||
sizeof(values[0]), 2, 0);
|
||||
@ -340,7 +340,7 @@ static void test_arraymap_percpu_many_keys(void)
|
||||
* allocator more than anything else
|
||||
*/
|
||||
unsigned int nr_keys = 2000;
|
||||
long values[nr_cpus];
|
||||
long long values[nr_cpus];
|
||||
int key, fd, i;
|
||||
|
||||
fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
|
||||
|
@ -75,7 +75,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
|
||||
{
|
||||
int fd, val;
|
||||
|
||||
fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
|
||||
fd = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
|
||||
if (fd < 0) {
|
||||
perror("socket packet");
|
||||
exit(1);
|
||||
@ -95,6 +95,24 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static void sock_fanout_set_cbpf(int fd)
|
||||
{
|
||||
struct sock_filter bpf_filter[] = {
|
||||
BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 80), /* ldb [80] */
|
||||
BPF_STMT(BPF_RET+BPF_A, 0), /* ret A */
|
||||
};
|
||||
struct sock_fprog bpf_prog;
|
||||
|
||||
bpf_prog.filter = bpf_filter;
|
||||
bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
|
||||
|
||||
if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &bpf_prog,
|
||||
sizeof(bpf_prog))) {
|
||||
perror("fanout data cbpf");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void sock_fanout_set_ebpf(int fd)
|
||||
{
|
||||
const int len_off = __builtin_offsetof(struct __sk_buff, len);
|
||||
@ -270,7 +288,7 @@ static int test_datapath(uint16_t typeflags, int port_off,
|
||||
exit(1);
|
||||
}
|
||||
if (type == PACKET_FANOUT_CBPF)
|
||||
sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
|
||||
sock_fanout_set_cbpf(fds[0]);
|
||||
else if (type == PACKET_FANOUT_EBPF)
|
||||
sock_fanout_set_ebpf(fds[0]);
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
# define __maybe_unused __attribute__ ((__unused__))
|
||||
#endif
|
||||
|
||||
static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
|
||||
static __maybe_unused void pair_udp_setfilter(int fd)
|
||||
{
|
||||
/* the filter below checks for all of the following conditions that
|
||||
* are based on the contents of create_payload()
|
||||
@ -76,23 +76,16 @@ static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
|
||||
};
|
||||
struct sock_fprog bpf_prog;
|
||||
|
||||
if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
|
||||
bpf_filter[5].code = 0x16; /* RET A */
|
||||
|
||||
bpf_prog.filter = bpf_filter;
|
||||
bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
|
||||
if (setsockopt(fd, lvl, optnum, &bpf_prog,
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
|
||||
sizeof(bpf_prog))) {
|
||||
perror("setsockopt SO_ATTACH_FILTER");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static __maybe_unused void pair_udp_setfilter(int fd)
|
||||
{
|
||||
sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
|
||||
}
|
||||
|
||||
static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
|
||||
{
|
||||
struct sockaddr_in saddr, daddr;
|
||||
|
Loading…
Reference in New Issue
Block a user