Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
We got slightly different patches removing a double word
in a comment in net/ipv4/raw.c - picked the version from net.
Simple conflict in drivers/net/ethernet/ibm/ibmvnic.c. Use cached
values instead of VNIC login response buffer (following what
commit 507ebe6444 ("ibmvnic: Fix use-after-free of VNIC login
response buffer") did).
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -681,7 +681,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
|
||||
data->values[i] = AUDIT_UID_UNSET;
|
||||
break;
|
||||
}
|
||||
/* fall through - if set */
|
||||
fallthrough; /* if set */
|
||||
default:
|
||||
data->values[i] = f->val;
|
||||
}
|
||||
|
||||
@@ -1794,7 +1794,7 @@ static bool cg_sockopt_is_valid_access(int off, int size,
|
||||
return prog->expected_attach_type ==
|
||||
BPF_CGROUP_GETSOCKOPT;
|
||||
case offsetof(struct bpf_sockopt, optname):
|
||||
/* fallthrough */
|
||||
fallthrough;
|
||||
case offsetof(struct bpf_sockopt, level):
|
||||
if (size != size_default)
|
||||
return false;
|
||||
|
||||
@@ -277,7 +277,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
/* fallthrough */
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
xdp_return_frame(xdpf);
|
||||
stats->drop++;
|
||||
|
||||
@@ -2037,7 +2037,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
if (expected_attach_type)
|
||||
return -EINVAL;
|
||||
/* fallthrough */
|
||||
fallthrough;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
@@ -2644,7 +2644,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
|
||||
u32 ulen = info->raw_tracepoint.tp_name_len;
|
||||
size_t tp_len = strlen(tp_name);
|
||||
|
||||
if (ulen && !ubuf)
|
||||
if (!ulen ^ !ubuf)
|
||||
return -EINVAL;
|
||||
|
||||
info->raw_tracepoint.tp_name_len = tp_len + 1;
|
||||
|
||||
@@ -5334,7 +5334,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
off_reg == dst_reg ? dst : src);
|
||||
return -EACCES;
|
||||
}
|
||||
/* fall-through */
|
||||
fallthrough;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -11208,7 +11208,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
default:
|
||||
if (!prog_extension)
|
||||
return -EINVAL;
|
||||
/* fallthrough */
|
||||
fallthrough;
|
||||
case BPF_MODIFY_RETURN:
|
||||
case BPF_LSM_MAC:
|
||||
case BPF_TRACE_FENTRY:
|
||||
|
||||
@@ -93,7 +93,7 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
warn_deprecated_v2();
|
||||
/* fall through - v3 is otherwise equivalent to v2. */
|
||||
fallthrough; /* v3 is otherwise equivalent to v2 */
|
||||
case _LINUX_CAPABILITY_VERSION_3:
|
||||
*tocopy = _LINUX_CAPABILITY_U32S_3;
|
||||
break;
|
||||
|
||||
@@ -255,11 +255,11 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
|
||||
return -EFAULT;
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 );
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 );
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 );
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 );
|
||||
}
|
||||
#else
|
||||
|
||||
@@ -1046,14 +1046,14 @@ int gdb_serial_stub(struct kgdb_state *ks)
|
||||
return DBG_PASS_EVENT;
|
||||
}
|
||||
#endif
|
||||
/* Fall through */
|
||||
fallthrough;
|
||||
case 'C': /* Exception passing */
|
||||
tmp = gdb_cmd_exception_pass(ks);
|
||||
if (tmp > 0)
|
||||
goto default_handle;
|
||||
if (tmp == 0)
|
||||
break;
|
||||
/* Fall through - on tmp < 0 */
|
||||
fallthrough; /* on tmp < 0 */
|
||||
case 'c': /* Continue packet */
|
||||
case 's': /* Single step packet */
|
||||
if (kgdb_contthread && kgdb_contthread != current) {
|
||||
@@ -1062,7 +1062,7 @@ int gdb_serial_stub(struct kgdb_state *ks)
|
||||
break;
|
||||
}
|
||||
dbg_activate_sw_breakpoints();
|
||||
/* Fall through - to default processing */
|
||||
fallthrough; /* to default processing */
|
||||
default:
|
||||
default_handle:
|
||||
error = kgdb_arch_handle_exception(ks->ex_vector,
|
||||
|
||||
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
|
||||
case KT_LATIN:
|
||||
if (isprint(keychar))
|
||||
break; /* printable characters */
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case KT_SPEC:
|
||||
if (keychar == K_ENTER)
|
||||
break;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
return -1; /* ignore unprintables */
|
||||
}
|
||||
|
||||
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
|
||||
*word = w8;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
diag = KDB_BADWIDTH;
|
||||
kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
|
||||
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
|
||||
*word = w8;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
diag = KDB_BADWIDTH;
|
||||
kdb_printf("kdb_getword: bad width %ld\n", (long) size);
|
||||
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
|
||||
diag = kdb_putarea(addr, w8);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
diag = KDB_BADWIDTH;
|
||||
kdb_printf("kdb_putword: bad width %ld\n", (long) size);
|
||||
|
||||
@@ -84,7 +84,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned int order;
|
||||
struct page *page;
|
||||
struct page *page = NULL;
|
||||
void *addr;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
|
||||
@@ -10034,7 +10034,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
case IF_SRC_KERNELADDR:
|
||||
case IF_SRC_KERNEL:
|
||||
kernel = 1;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
|
||||
case IF_SRC_FILEADDR:
|
||||
case IF_SRC_FILE:
|
||||
|
||||
@@ -4,6 +4,7 @@ menu "GCOV-based kernel profiling"
|
||||
config GCOV_KERNEL
|
||||
bool "Enable gcov-based kernel profiling"
|
||||
depends on DEBUG_FS
|
||||
depends on !CC_IS_GCC || GCC_VERSION < 100000
|
||||
select CONSTRUCTORS if !UML
|
||||
default n
|
||||
help
|
||||
|
||||
@@ -173,7 +173,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
|
||||
|
||||
__irq_wake_thread(desc, action);
|
||||
|
||||
/* Fall through - to add to randomness */
|
||||
fallthrough; /* to add to randomness */
|
||||
case IRQ_HANDLED:
|
||||
*flags |= action->flags;
|
||||
break;
|
||||
|
||||
@@ -271,7 +271,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
case IRQ_SET_MASK_OK:
|
||||
case IRQ_SET_MASK_OK_DONE:
|
||||
cpumask_copy(desc->irq_common_data.affinity, mask);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
irq_validate_effective_affinity(data);
|
||||
irq_set_thread_affinity(desc);
|
||||
@@ -868,7 +868,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
|
||||
case IRQ_SET_MASK_OK_DONE:
|
||||
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
|
||||
irqd_set(&desc->irq_data, flags);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
flags = irqd_get_trigger_type(&desc->irq_data);
|
||||
|
||||
@@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
|
||||
unsigned int cpu, bit;
|
||||
struct cpumap *cm;
|
||||
|
||||
/*
|
||||
* Not required in theory, but matrix_find_best_cpu() uses
|
||||
* for_each_cpu() which ignores the cpumask on UP .
|
||||
*/
|
||||
if (cpumask_empty(msk))
|
||||
return -EINVAL;
|
||||
|
||||
cpu = matrix_find_best_cpu(m, msk);
|
||||
if (cpu == UINT_MAX)
|
||||
return -ENOSPC;
|
||||
|
||||
@@ -684,12 +684,12 @@ bool kallsyms_show_value(const struct cred *cred)
|
||||
case 0:
|
||||
if (kallsyms_for_perf())
|
||||
return true;
|
||||
/* fallthrough */
|
||||
fallthrough;
|
||||
case 1:
|
||||
if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
|
||||
CAP_OPT_NOAUDIT) == 0)
|
||||
return true;
|
||||
/* fallthrough */
|
||||
fallthrough;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
|
||||
|
||||
skip_checks:
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
this_cpu_write(hardirqs_enabled, 1);
|
||||
__this_cpu_write(hardirqs_enabled, 1);
|
||||
trace->hardirq_enable_ip = ip;
|
||||
trace->hardirq_enable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
@@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
|
||||
/*
|
||||
* We have done an ON -> OFF transition:
|
||||
*/
|
||||
this_cpu_write(hardirqs_enabled, 0);
|
||||
__this_cpu_write(hardirqs_enabled, 0);
|
||||
trace->hardirq_disable_ip = ip;
|
||||
trace->hardirq_disable_event = ++trace->irq_events;
|
||||
debug_atomic_inc(hardirqs_off_events);
|
||||
@@ -4977,6 +4977,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion)) {
|
||||
/* XXX allow trylock from NMI ?!? */
|
||||
if (lockdep_nmi() && !trylock) {
|
||||
@@ -5001,7 +5003,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
__lock_acquire(lock, subclass, trylock, read, check,
|
||||
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
|
||||
lockdep_recursion_finish();
|
||||
@@ -5013,13 +5014,15 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_release(lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
trace_lock_release(lock, ip);
|
||||
if (__lock_release(lock, ip))
|
||||
check_chain_key(current);
|
||||
lockdep_recursion_finish();
|
||||
@@ -5205,8 +5208,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
||||
hlock->holdtime_stamp = now;
|
||||
}
|
||||
|
||||
trace_lock_acquired(lock, ip);
|
||||
|
||||
stats = get_lock_stats(hlock_class(hlock));
|
||||
if (waittime) {
|
||||
if (hlock->read)
|
||||
@@ -5225,6 +5226,8 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_acquired(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
@@ -5234,7 +5237,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion++;
|
||||
trace_lock_contended(lock, ip);
|
||||
__lock_contended(lock, ip);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5245,6 +5247,8 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_lock_contended(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
|
||||
@@ -659,7 +659,7 @@ static void power_down(void)
|
||||
break;
|
||||
case HIBERNATION_PLATFORM:
|
||||
hibernation_platform_enter();
|
||||
/* Fall through */
|
||||
fallthrough;
|
||||
case HIBERNATION_SHUTDOWN:
|
||||
if (pm_power_off)
|
||||
kernel_power_off();
|
||||
|
||||
@@ -119,7 +119,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
|
||||
* and add, then see if the aggregate has changed.
|
||||
*/
|
||||
plist_del(node, &c->list);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case PM_QOS_ADD_REQ:
|
||||
plist_node_init(node, new_value);
|
||||
plist_add(node, &c->list);
|
||||
@@ -188,7 +188,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf,
|
||||
break;
|
||||
case PM_QOS_UPDATE_REQ:
|
||||
pm_qos_flags_remove_req(pqf, req);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case PM_QOS_ADD_REQ:
|
||||
req->flags = val;
|
||||
INIT_LIST_HEAD(&req->node);
|
||||
|
||||
@@ -2320,7 +2320,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
||||
state = possible;
|
||||
break;
|
||||
}
|
||||
/* Fall-through */
|
||||
fallthrough;
|
||||
case possible:
|
||||
do_set_cpus_allowed(p, cpu_possible_mask);
|
||||
state = fail;
|
||||
|
||||
@@ -54,17 +54,18 @@ __setup("hlt", cpu_idle_nopoll_setup);
|
||||
|
||||
static noinline int __cpuidle cpu_idle_poll(void)
|
||||
{
|
||||
rcu_idle_enter();
|
||||
trace_cpu_idle_rcuidle(0, smp_processor_id());
|
||||
local_irq_enable();
|
||||
trace_cpu_idle(0, smp_processor_id());
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
local_irq_enable();
|
||||
|
||||
while (!tif_need_resched() &&
|
||||
(cpu_idle_force_poll || tick_check_broadcast_expired()))
|
||||
(cpu_idle_force_poll || tick_check_broadcast_expired()))
|
||||
cpu_relax();
|
||||
start_critical_timings();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -90,9 +91,14 @@ void __cpuidle default_idle_call(void)
|
||||
if (current_clr_polling_and_test()) {
|
||||
local_irq_enable();
|
||||
} else {
|
||||
|
||||
trace_cpu_idle(1, smp_processor_id());
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
arch_cpu_idle();
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,7 +164,6 @@ static void cpuidle_idle_call(void)
|
||||
|
||||
if (cpuidle_not_available(drv, dev)) {
|
||||
tick_nohz_idle_stop_tick();
|
||||
rcu_idle_enter();
|
||||
|
||||
default_idle_call();
|
||||
goto exit_idle;
|
||||
@@ -178,21 +183,17 @@ static void cpuidle_idle_call(void)
|
||||
u64 max_latency_ns;
|
||||
|
||||
if (idle_should_enter_s2idle()) {
|
||||
rcu_idle_enter();
|
||||
|
||||
entered_state = call_cpuidle_s2idle(drv, dev);
|
||||
if (entered_state > 0)
|
||||
goto exit_idle;
|
||||
|
||||
rcu_idle_exit();
|
||||
|
||||
max_latency_ns = U64_MAX;
|
||||
} else {
|
||||
max_latency_ns = dev->forced_idle_latency_limit_ns;
|
||||
}
|
||||
|
||||
tick_nohz_idle_stop_tick();
|
||||
rcu_idle_enter();
|
||||
|
||||
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
|
||||
call_cpuidle(drv, dev, next_state);
|
||||
@@ -209,8 +210,6 @@ static void cpuidle_idle_call(void)
|
||||
else
|
||||
tick_nohz_idle_retain_tick();
|
||||
|
||||
rcu_idle_enter();
|
||||
|
||||
entered_state = call_cpuidle(drv, dev, next_state);
|
||||
/*
|
||||
* Give the governor an opportunity to reflect on the outcome
|
||||
@@ -226,8 +225,6 @@ exit_idle:
|
||||
*/
|
||||
if (WARN_ON_ONCE(irqs_disabled()))
|
||||
local_irq_enable();
|
||||
|
||||
rcu_idle_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1219,13 +1219,13 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
||||
case sa_rootdomain:
|
||||
if (!atomic_read(&d->rd->refcount))
|
||||
free_rootdomain(&d->rd->rcu);
|
||||
/* Fall through */
|
||||
fallthrough;
|
||||
case sa_sd:
|
||||
free_percpu(d->sd);
|
||||
/* Fall through */
|
||||
fallthrough;
|
||||
case sa_sd_storage:
|
||||
__sdt_free(cpu_map);
|
||||
/* Fall through */
|
||||
fallthrough;
|
||||
case sa_none:
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -851,7 +851,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
|
||||
*/
|
||||
if (!sid || sid == task_session(current))
|
||||
break;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@@ -1753,7 +1753,7 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
|
||||
|
||||
if (who == RUSAGE_CHILDREN)
|
||||
break;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
|
||||
case RUSAGE_SELF:
|
||||
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
||||
|
||||
@@ -204,8 +204,7 @@ static int max_extfrag_threshold = 1000;
|
||||
|
||||
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
|
||||
static int bpf_stats_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct static_key *key = (struct static_key *)table->data;
|
||||
static int saved_val;
|
||||
|
||||
@@ -377,7 +377,7 @@ static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
switch (state) {
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
WARN_ON(1);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -439,12 +439,12 @@ static struct pid *good_sigevent(sigevent_t * event)
|
||||
rtn = pid_task(pid, PIDTYPE_PID);
|
||||
if (!rtn || !same_thread_group(rtn, current))
|
||||
return NULL;
|
||||
/* FALLTHRU */
|
||||
fallthrough;
|
||||
case SIGEV_SIGNAL:
|
||||
case SIGEV_THREAD:
|
||||
if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
|
||||
return NULL;
|
||||
/* FALLTHRU */
|
||||
fallthrough;
|
||||
case SIGEV_NONE:
|
||||
return pid;
|
||||
default:
|
||||
|
||||
@@ -381,7 +381,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
|
||||
switch (mode) {
|
||||
case TICK_BROADCAST_FORCE:
|
||||
tick_broadcast_forced = 1;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case TICK_BROADCAST_ON:
|
||||
cpumask_set_cpu(cpu, tick_broadcast_on);
|
||||
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
|
||||
|
||||
@@ -666,7 +666,7 @@ static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
WARN_ON(1);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -745,7 +745,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
|
||||
#endif
|
||||
case BLKTRACESTART:
|
||||
start = 1;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case BLKTRACESTOP:
|
||||
ret = __blk_trace_startstop(q, start);
|
||||
break;
|
||||
|
||||
@@ -499,7 +499,7 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
|
||||
ptr++;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
default:
|
||||
parse_error(pe, FILT_ERR_TOO_MANY_PREDS,
|
||||
next - str);
|
||||
@@ -1273,7 +1273,7 @@ static int parse_pred(const char *str, void *data,
|
||||
switch (op) {
|
||||
case OP_NE:
|
||||
pred->not = 1;
|
||||
/* Fall through */
|
||||
fallthrough;
|
||||
case OP_GLOB:
|
||||
case OP_EQ:
|
||||
break;
|
||||
|
||||
Reference in New Issue
Block a user