mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
[NETFILTER]: Fix/improve deadlock condition on module removal netfilter
So I've had a deadlock reported to me. I've found that the sequence of events goes like this: 1) process A (modprobe) runs to remove ip_tables.ko 2) process B (iptables-restore) runs and calls setsockopt on a netfilter socket, increasing the ip_tables socket_ops use count 3) process A acquires a file lock on the file ip_tables.ko, calls remove_module in the kernel, which in turn executes the ip_tables module cleanup routine, which calls nf_unregister_sockopt 4) nf_unregister_sockopt, seeing that the use count is non-zero, puts the calling process into uninterruptible sleep, expecting the process using the socket option code to wake it up when it exits the kernel 4) the user of the socket option code (process B) in do_ipt_get_ctl, calls ipt_find_table_lock, which in this case calls request_module to load ip_tables_nat.ko 5) request_module forks a copy of modprobe (process C) to load the module and blocks until modprobe exits. 6) Process C. forked by request_module process the dependencies of ip_tables_nat.ko, of which ip_tables.ko is one. 7) Process C attempts to lock the request module and all its dependencies, it blocks when it attempts to lock ip_tables.ko (which was previously locked in step 3) Theres not really any great permanent solution to this that I can see, but I've developed a two part solution that corrects the problem Part 1) Modifies the nf_sockopt registration code so that, instead of using a use counter internal to the nf_sockopt_ops structure, we instead use a pointer to the registering modules owner to do module reference counting when nf_sockopt calls a modules set/get routine. This prevents the deadlock by preventing set 4 from happening. Part 2) Enhances the modprobe utilty so that by default it preforms non-blocking remove operations (the same way rmmod does), and add an option to explicity request blocking operation. So if you select blocking operation in modprobe you can still cause the above deadlock, but only if you explicity try (and since root can do any old stupid thing it would like.... :) ). Signed-off-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0fb9670137
commit
16fcec35e7
@ -88,9 +88,8 @@ struct nf_sockopt_ops
|
|||||||
int (*compat_get)(struct sock *sk, int optval,
|
int (*compat_get)(struct sock *sk, int optval,
|
||||||
void __user *user, int *len);
|
void __user *user, int *len);
|
||||||
|
|
||||||
/* Number of users inside set() or get(). */
|
/* Use the module struct to lock set/get code in place */
|
||||||
unsigned int use;
|
struct module *owner;
|
||||||
struct task_struct *cleanup_task;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Each queued (to userspace) skbuff has one of these. */
|
/* Each queued (to userspace) skbuff has one of these. */
|
||||||
|
@ -1513,6 +1513,7 @@ static struct nf_sockopt_ops ebt_sockopts =
|
|||||||
.get_optmin = EBT_BASE_CTL,
|
.get_optmin = EBT_BASE_CTL,
|
||||||
.get_optmax = EBT_SO_GET_MAX + 1,
|
.get_optmax = EBT_SO_GET_MAX + 1,
|
||||||
.get = do_ebt_get_ctl,
|
.get = do_ebt_get_ctl,
|
||||||
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init ebtables_init(void)
|
static int __init ebtables_init(void)
|
||||||
|
@ -2339,6 +2339,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
|
|||||||
.get_optmin = IP_VS_BASE_CTL,
|
.get_optmin = IP_VS_BASE_CTL,
|
||||||
.get_optmax = IP_VS_SO_GET_MAX+1,
|
.get_optmax = IP_VS_SO_GET_MAX+1,
|
||||||
.get = do_ip_vs_get_ctl,
|
.get = do_ip_vs_get_ctl,
|
||||||
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1161,6 +1161,7 @@ static struct nf_sockopt_ops arpt_sockopts = {
|
|||||||
.get_optmin = ARPT_BASE_CTL,
|
.get_optmin = ARPT_BASE_CTL,
|
||||||
.get_optmax = ARPT_SO_GET_MAX+1,
|
.get_optmax = ARPT_SO_GET_MAX+1,
|
||||||
.get = do_arpt_get_ctl,
|
.get = do_arpt_get_ctl,
|
||||||
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init arp_tables_init(void)
|
static int __init arp_tables_init(void)
|
||||||
|
@ -2296,6 +2296,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
|
|||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compat_get = compat_do_ipt_get_ctl,
|
.compat_get = compat_do_ipt_get_ctl,
|
||||||
#endif
|
#endif
|
||||||
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct xt_match icmp_matchstruct __read_mostly = {
|
static struct xt_match icmp_matchstruct __read_mostly = {
|
||||||
|
@ -399,6 +399,7 @@ static struct nf_sockopt_ops so_getorigdst = {
|
|||||||
.get_optmin = SO_ORIGINAL_DST,
|
.get_optmin = SO_ORIGINAL_DST,
|
||||||
.get_optmax = SO_ORIGINAL_DST+1,
|
.get_optmax = SO_ORIGINAL_DST+1,
|
||||||
.get = &getorigdst,
|
.get = &getorigdst,
|
||||||
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
|
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
|
||||||
|
@ -1462,6 +1462,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
|
|||||||
.get_optmin = IP6T_BASE_CTL,
|
.get_optmin = IP6T_BASE_CTL,
|
||||||
.get_optmax = IP6T_SO_GET_MAX+1,
|
.get_optmax = IP6T_SO_GET_MAX+1,
|
||||||
.get = do_ip6t_get_ctl,
|
.get = do_ip6t_get_ctl,
|
||||||
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct xt_match icmp6_matchstruct __read_mostly = {
|
static struct xt_match icmp6_matchstruct __read_mostly = {
|
||||||
|
@ -55,18 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
|
|||||||
|
|
||||||
void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
|
void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
|
||||||
{
|
{
|
||||||
/* No point being interruptible: we're probably in cleanup_module() */
|
|
||||||
restart:
|
|
||||||
mutex_lock(&nf_sockopt_mutex);
|
mutex_lock(&nf_sockopt_mutex);
|
||||||
if (reg->use != 0) {
|
|
||||||
/* To be woken by nf_sockopt call... */
|
|
||||||
/* FIXME: Stuart Young's name appears gratuitously. */
|
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
||||||
reg->cleanup_task = current;
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
|
||||||
schedule();
|
|
||||||
goto restart;
|
|
||||||
}
|
|
||||||
list_del(®->list);
|
list_del(®->list);
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
}
|
}
|
||||||
@ -86,10 +75,11 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
|
|||||||
list_for_each(i, &nf_sockopts) {
|
list_for_each(i, &nf_sockopts) {
|
||||||
ops = (struct nf_sockopt_ops *)i;
|
ops = (struct nf_sockopt_ops *)i;
|
||||||
if (ops->pf == pf) {
|
if (ops->pf == pf) {
|
||||||
|
if (!try_module_get(ops->owner))
|
||||||
|
goto out_nosup;
|
||||||
if (get) {
|
if (get) {
|
||||||
if (val >= ops->get_optmin
|
if (val >= ops->get_optmin
|
||||||
&& val < ops->get_optmax) {
|
&& val < ops->get_optmax) {
|
||||||
ops->use++;
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
ret = ops->get(sk, val, opt, len);
|
ret = ops->get(sk, val, opt, len);
|
||||||
goto out;
|
goto out;
|
||||||
@ -97,23 +87,20 @@ static int nf_sockopt(struct sock *sk, int pf, int val,
|
|||||||
} else {
|
} else {
|
||||||
if (val >= ops->set_optmin
|
if (val >= ops->set_optmin
|
||||||
&& val < ops->set_optmax) {
|
&& val < ops->set_optmax) {
|
||||||
ops->use++;
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
ret = ops->set(sk, val, opt, *len);
|
ret = ops->set(sk, val, opt, *len);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
module_put(ops->owner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out_nosup:
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
return -ENOPROTOOPT;
|
return -ENOPROTOOPT;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_lock(&nf_sockopt_mutex);
|
module_put(ops->owner);
|
||||||
ops->use--;
|
|
||||||
if (ops->cleanup_task)
|
|
||||||
wake_up_process(ops->cleanup_task);
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,10 +131,12 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
|
|||||||
list_for_each(i, &nf_sockopts) {
|
list_for_each(i, &nf_sockopts) {
|
||||||
ops = (struct nf_sockopt_ops *)i;
|
ops = (struct nf_sockopt_ops *)i;
|
||||||
if (ops->pf == pf) {
|
if (ops->pf == pf) {
|
||||||
|
if (!try_module_get(ops->owner))
|
||||||
|
goto out_nosup;
|
||||||
|
|
||||||
if (get) {
|
if (get) {
|
||||||
if (val >= ops->get_optmin
|
if (val >= ops->get_optmin
|
||||||
&& val < ops->get_optmax) {
|
&& val < ops->get_optmax) {
|
||||||
ops->use++;
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
if (ops->compat_get)
|
if (ops->compat_get)
|
||||||
ret = ops->compat_get(sk,
|
ret = ops->compat_get(sk,
|
||||||
@ -160,7 +149,6 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
|
|||||||
} else {
|
} else {
|
||||||
if (val >= ops->set_optmin
|
if (val >= ops->set_optmin
|
||||||
&& val < ops->set_optmax) {
|
&& val < ops->set_optmax) {
|
||||||
ops->use++;
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
if (ops->compat_set)
|
if (ops->compat_set)
|
||||||
ret = ops->compat_set(sk,
|
ret = ops->compat_set(sk,
|
||||||
@ -171,17 +159,15 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
module_put(ops->owner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out_nosup:
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
mutex_unlock(&nf_sockopt_mutex);
|
||||||
return -ENOPROTOOPT;
|
return -ENOPROTOOPT;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_lock(&nf_sockopt_mutex);
|
module_put(ops->owner);
|
||||||
ops->use--;
|
|
||||||
if (ops->cleanup_task)
|
|
||||||
wake_up_process(ops->cleanup_task);
|
|
||||||
mutex_unlock(&nf_sockopt_mutex);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user