2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 22:20:36 +00:00
|
|
|
/* scm.c - Socket level control messages processing.
|
|
|
|
*
|
|
|
|
* Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
|
|
|
* Alignment and value checking mods by Craig Metz
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/signal.h>
|
2006-01-11 20:17:47 +00:00
|
|
|
#include <linux/capability.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 17:51:30 +00:00
|
|
|
#include <linux/sched/user.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/security.h>
|
2013-03-15 08:03:33 +00:00
|
|
|
#include <linux/pid_namespace.h>
|
2007-10-19 06:40:14 +00:00
|
|
|
#include <linux/pid.h>
|
|
|
|
#include <linux/nsproxy.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2019-02-02 15:34:51 +00:00
|
|
|
#include <linux/errqueue.h>
|
2023-12-06 13:26:47 +00:00
|
|
|
#include <linux/io_uring.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-12-24 19:46:01 +00:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/compat.h>
|
|
|
|
#include <net/scm.h>
|
2013-01-21 21:09:00 +00:00
|
|
|
#include <net/cls_cgroup.h>
|
af_unix: Try to run GC async.
If more than 16000 inflight AF_UNIX sockets exist and the garbage
collector is not running, unix_(dgram|stream)_sendmsg() call unix_gc().
Also, they wait for unix_gc() to complete.
In unix_gc(), all inflight AF_UNIX sockets are traversed at least once,
and more if they are the GC candidate. Thus, sendmsg() significantly
slows down with too many inflight AF_UNIX sockets.
However, if a process sends data with no AF_UNIX FD, the sendmsg() call
does not need to wait for GC. After this change, only the process that
meets the condition below will be blocked under such a situation.
1) cmsg contains AF_UNIX socket
2) more than 32 AF_UNIX sent by the same user are still inflight
Note that even a sendmsg() call that does not meet the condition but has
AF_UNIX FD will be blocked later in unix_scm_to_skb() by the spinlock,
but we allow that as a bonus for sane users.
The results below are the time spent in unix_dgram_sendmsg() sending 1
byte of data with no FD 4096 times on a host where 32K inflight AF_UNIX
sockets exist.
Without series: the sane sendmsg() needs to wait gc unreasonably.
$ sudo /usr/share/bcc/tools/funclatency -p 11165 unix_dgram_sendmsg
Tracing 1 functions for "unix_dgram_sendmsg"... Hit Ctrl-C to end.
^C
nsecs : count distribution
[...]
524288 -> 1048575 : 0 | |
1048576 -> 2097151 : 3881 |****************************************|
2097152 -> 4194303 : 214 |** |
4194304 -> 8388607 : 1 | |
avg = 1825567 nsecs, total: 7477526027 nsecs, count: 4096
With series: the sane sendmsg() can finish much faster.
$ sudo /usr/share/bcc/tools/funclatency -p 8702 unix_dgram_sendmsg
Tracing 1 functions for "unix_dgram_sendmsg"... Hit Ctrl-C to end.
^C
nsecs : count distribution
[...]
128 -> 255 : 0 | |
256 -> 511 : 4092 |****************************************|
512 -> 1023 : 2 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 1 | |
8192 -> 16383 : 1 | |
avg = 410 nsecs, total: 1680510 nsecs, count: 4096
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://lore.kernel.org/r/20240123170856.41348-6-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-01-23 17:08:56 +00:00
|
|
|
#include <net/af_unix.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2007-02-09 14:24:36 +00:00
|
|
|
* Only allow a user to send credentials, that they could set with
|
2005-04-16 22:20:36 +00:00
|
|
|
* setu(g)id.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static __inline__ int scm_check_creds(struct ucred *creds)
|
|
|
|
{
|
2008-11-13 23:39:18 +00:00
|
|
|
const struct cred *cred = current_cred();
|
2012-05-23 22:39:45 +00:00
|
|
|
kuid_t uid = make_kuid(cred->user_ns, creds->uid);
|
|
|
|
kgid_t gid = make_kgid(cred->user_ns, creds->gid);
|
|
|
|
|
|
|
|
if (!uid_valid(uid) || !gid_valid(gid))
|
|
|
|
return -EINVAL;
|
2008-11-13 23:39:16 +00:00
|
|
|
|
2013-03-15 08:03:33 +00:00
|
|
|
if ((creds->pid == task_tgid_vnr(current) ||
|
2013-08-22 18:39:15 +00:00
|
|
|
ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
|
2012-05-23 22:39:45 +00:00
|
|
|
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
|
2013-03-20 19:49:49 +00:00
|
|
|
uid_eq(uid, cred->suid)) || ns_capable(cred->user_ns, CAP_SETUID)) &&
|
2012-05-23 22:39:45 +00:00
|
|
|
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
|
2013-03-20 19:49:49 +00:00
|
|
|
gid_eq(gid, cred->sgid)) || ns_capable(cred->user_ns, CAP_SETGID))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
|
|
|
|
{
|
|
|
|
int *fdp = (int*)CMSG_DATA(cmsg);
|
|
|
|
struct scm_fp_list *fpl = *fplp;
|
|
|
|
struct file **fpp;
|
|
|
|
int i, num;
|
|
|
|
|
2017-01-03 12:42:17 +00:00
|
|
|
num = (cmsg->cmsg_len - sizeof(struct cmsghdr))/sizeof(int);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (num <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (num > SCM_MAX_FD)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!fpl)
|
|
|
|
{
|
2021-07-19 10:44:56 +00:00
|
|
|
fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL_ACCOUNT);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!fpl)
|
|
|
|
return -ENOMEM;
|
|
|
|
*fplp = fpl;
|
|
|
|
fpl->count = 0;
|
af_unix: Try to run GC async.
If more than 16000 inflight AF_UNIX sockets exist and the garbage
collector is not running, unix_(dgram|stream)_sendmsg() call unix_gc().
Also, they wait for unix_gc() to complete.
In unix_gc(), all inflight AF_UNIX sockets are traversed at least once,
and more if they are the GC candidate. Thus, sendmsg() significantly
slows down with too many inflight AF_UNIX sockets.
However, if a process sends data with no AF_UNIX FD, the sendmsg() call
does not need to wait for GC. After this change, only the process that
meets the condition below will be blocked under such a situation.
1) cmsg contains AF_UNIX socket
2) more than 32 AF_UNIX sent by the same user are still inflight
Note that even a sendmsg() call that does not meet the condition but has
AF_UNIX FD will be blocked later in unix_scm_to_skb() by the spinlock,
but we allow that as a bonus for sane users.
The results below are the time spent in unix_dgram_sendmsg() sending 1
byte of data with no FD 4096 times on a host where 32K inflight AF_UNIX
sockets exist.
Without series: the sane sendmsg() needs to wait gc unreasonably.
$ sudo /usr/share/bcc/tools/funclatency -p 11165 unix_dgram_sendmsg
Tracing 1 functions for "unix_dgram_sendmsg"... Hit Ctrl-C to end.
^C
nsecs : count distribution
[...]
524288 -> 1048575 : 0 | |
1048576 -> 2097151 : 3881 |****************************************|
2097152 -> 4194303 : 214 |** |
4194304 -> 8388607 : 1 | |
avg = 1825567 nsecs, total: 7477526027 nsecs, count: 4096
With series: the sane sendmsg() can finish much faster.
$ sudo /usr/share/bcc/tools/funclatency -p 8702 unix_dgram_sendmsg
Tracing 1 functions for "unix_dgram_sendmsg"... Hit Ctrl-C to end.
^C
nsecs : count distribution
[...]
128 -> 255 : 0 | |
256 -> 511 : 4092 |****************************************|
512 -> 1023 : 2 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 1 | |
8192 -> 16383 : 1 | |
avg = 410 nsecs, total: 1680510 nsecs, count: 4096
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://lore.kernel.org/r/20240123170856.41348-6-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-01-23 17:08:56 +00:00
|
|
|
fpl->count_unix = 0;
|
2010-11-23 14:09:15 +00:00
|
|
|
fpl->max = SCM_MAX_FD;
|
2016-02-03 01:11:03 +00:00
|
|
|
fpl->user = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
fpp = &fpl->fp[fpl->count];
|
|
|
|
|
2010-11-23 14:09:15 +00:00
|
|
|
if (fpl->count + num > fpl->max)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
2007-02-09 14:24:36 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Verify the descriptors and increment the usage count.
|
|
|
|
*/
|
2007-02-09 14:24:36 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i=0; i< num; i++)
|
|
|
|
{
|
|
|
|
int fd = fdp[i];
|
|
|
|
struct file *file;
|
|
|
|
|
2011-03-13 21:08:22 +00:00
|
|
|
if (fd < 0 || !(file = fget_raw(fd)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EBADF;
|
2023-12-06 13:26:47 +00:00
|
|
|
/* don't allow io_uring files */
|
2023-12-19 19:30:43 +00:00
|
|
|
if (io_is_uring_fops(file)) {
|
2023-12-06 13:26:47 +00:00
|
|
|
fput(file);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
af_unix: Try to run GC async.
If more than 16000 inflight AF_UNIX sockets exist and the garbage
collector is not running, unix_(dgram|stream)_sendmsg() call unix_gc().
Also, they wait for unix_gc() to complete.
In unix_gc(), all inflight AF_UNIX sockets are traversed at least once,
and more if they are the GC candidate. Thus, sendmsg() significantly
slows down with too many inflight AF_UNIX sockets.
However, if a process sends data with no AF_UNIX FD, the sendmsg() call
does not need to wait for GC. After this change, only the process that
meets the condition below will be blocked under such a situation.
1) cmsg contains AF_UNIX socket
2) more than 32 AF_UNIX sent by the same user are still inflight
Note that even a sendmsg() call that does not meet the condition but has
AF_UNIX FD will be blocked later in unix_scm_to_skb() by the spinlock,
but we allow that as a bonus for sane users.
The results below are the time spent in unix_dgram_sendmsg() sending 1
byte of data with no FD 4096 times on a host where 32K inflight AF_UNIX
sockets exist.
Without series: the sane sendmsg() needs to wait gc unreasonably.
$ sudo /usr/share/bcc/tools/funclatency -p 11165 unix_dgram_sendmsg
Tracing 1 functions for "unix_dgram_sendmsg"... Hit Ctrl-C to end.
^C
nsecs : count distribution
[...]
524288 -> 1048575 : 0 | |
1048576 -> 2097151 : 3881 |****************************************|
2097152 -> 4194303 : 214 |** |
4194304 -> 8388607 : 1 | |
avg = 1825567 nsecs, total: 7477526027 nsecs, count: 4096
With series: the sane sendmsg() can finish much faster.
$ sudo /usr/share/bcc/tools/funclatency -p 8702 unix_dgram_sendmsg
Tracing 1 functions for "unix_dgram_sendmsg"... Hit Ctrl-C to end.
^C
nsecs : count distribution
[...]
128 -> 255 : 0 | |
256 -> 511 : 4092 |****************************************|
512 -> 1023 : 2 | |
1024 -> 2047 : 0 | |
2048 -> 4095 : 0 | |
4096 -> 8191 : 1 | |
8192 -> 16383 : 1 | |
avg = 410 nsecs, total: 1680510 nsecs, count: 4096
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://lore.kernel.org/r/20240123170856.41348-6-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-01-23 17:08:56 +00:00
|
|
|
if (unix_get_socket(file))
|
|
|
|
fpl->count_unix++;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
*fpp++ = file;
|
|
|
|
fpl->count++;
|
|
|
|
}
|
2016-02-03 01:11:03 +00:00
|
|
|
|
|
|
|
if (!fpl->user)
|
|
|
|
fpl->user = get_uid(current_user());
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return num;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __scm_destroy(struct scm_cookie *scm)
|
|
|
|
{
|
|
|
|
struct scm_fp_list *fpl = scm->fp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (fpl) {
|
|
|
|
scm->fp = NULL;
|
2012-06-24 06:03:05 +00:00
|
|
|
for (i=fpl->count-1; i>=0; i--)
|
|
|
|
fput(fpl->fp[i]);
|
2016-02-03 01:11:03 +00:00
|
|
|
free_uid(fpl->user);
|
2012-06-24 06:03:05 +00:00
|
|
|
kfree(fpl);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2010-07-09 21:22:04 +00:00
|
|
|
EXPORT_SYMBOL(__scm_destroy);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
|
|
|
|
{
|
2023-08-08 13:58:09 +00:00
|
|
|
const struct proto_ops *ops = READ_ONCE(sock->ops);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
int err;
|
|
|
|
|
2014-12-11 03:22:04 +00:00
|
|
|
for_each_cmsghdr(cmsg, msg) {
|
2005-04-16 22:20:36 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
|
|
|
|
/* Verify that cmsg_len is at least sizeof(struct cmsghdr) */
|
|
|
|
/* The first check was omitted in <= 2.2.5. The reasoning was
|
|
|
|
that parser checks cmsg_len in any case, so that
|
|
|
|
additional check would be work duplication.
|
2007-02-09 14:24:36 +00:00
|
|
|
But if cmsg_level is not SOL_SOCKET, we do not check
|
2005-04-16 22:20:36 +00:00
|
|
|
for too short ancillary data object at all! Oops.
|
|
|
|
OK, let's add it...
|
|
|
|
*/
|
|
|
|
if (!CMSG_OK(msg, cmsg))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (cmsg->cmsg_level != SOL_SOCKET)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (cmsg->cmsg_type)
|
|
|
|
{
|
|
|
|
case SCM_RIGHTS:
|
2023-08-08 13:58:09 +00:00
|
|
|
if (!ops || ops->family != PF_UNIX)
|
2010-02-28 01:20:36 +00:00
|
|
|
goto error;
|
2005-04-16 22:20:36 +00:00
|
|
|
err=scm_fp_copy(cmsg, &p->fp);
|
|
|
|
if (err<0)
|
|
|
|
goto error;
|
|
|
|
break;
|
|
|
|
case SCM_CREDENTIALS:
|
2012-05-23 22:39:45 +00:00
|
|
|
{
|
2012-09-06 18:20:01 +00:00
|
|
|
struct ucred creds;
|
2012-05-23 22:39:45 +00:00
|
|
|
kuid_t uid;
|
|
|
|
kgid_t gid;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
|
|
|
|
goto error;
|
2012-09-06 18:20:01 +00:00
|
|
|
memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred));
|
|
|
|
err = scm_check_creds(&creds);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (err)
|
|
|
|
goto error;
|
2010-06-13 03:32:34 +00:00
|
|
|
|
2012-09-06 18:20:01 +00:00
|
|
|
p->creds.pid = creds.pid;
|
|
|
|
if (!p->pid || pid_vnr(p->pid) != creds.pid) {
|
2010-06-13 03:32:34 +00:00
|
|
|
struct pid *pid;
|
|
|
|
err = -ESRCH;
|
2012-09-06 18:20:01 +00:00
|
|
|
pid = find_get_pid(creds.pid);
|
2010-06-13 03:32:34 +00:00
|
|
|
if (!pid)
|
|
|
|
goto error;
|
|
|
|
put_pid(p->pid);
|
|
|
|
p->pid = pid;
|
|
|
|
}
|
|
|
|
|
2012-05-23 22:39:45 +00:00
|
|
|
err = -EINVAL;
|
2012-09-06 18:20:01 +00:00
|
|
|
uid = make_kuid(current_user_ns(), creds.uid);
|
|
|
|
gid = make_kgid(current_user_ns(), creds.gid);
|
2012-05-23 22:39:45 +00:00
|
|
|
if (!uid_valid(uid) || !gid_valid(gid))
|
|
|
|
goto error;
|
|
|
|
|
2012-09-06 18:20:01 +00:00
|
|
|
p->creds.uid = uid;
|
|
|
|
p->creds.gid = gid;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2012-05-23 22:39:45 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->fp && !p->fp->count)
|
|
|
|
{
|
|
|
|
kfree(p->fp);
|
|
|
|
p->fp = NULL;
|
|
|
|
}
|
|
|
|
return 0;
|
2007-02-09 14:24:36 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
error:
|
|
|
|
scm_destroy(p);
|
|
|
|
return err;
|
|
|
|
}
|
2010-07-09 21:22:04 +00:00
|
|
|
EXPORT_SYMBOL(__scm_send);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
|
|
|
|
{
|
|
|
|
int cmlen = CMSG_LEN(len);
|
|
|
|
|
2020-05-11 11:59:13 +00:00
|
|
|
if (msg->msg_flags & MSG_CMSG_COMPAT)
|
2005-04-16 22:20:36 +00:00
|
|
|
return put_cmsg_compat(msg, level, type, len, data);
|
|
|
|
|
2020-05-11 11:59:13 +00:00
|
|
|
if (!msg->msg_control || msg->msg_controllen < sizeof(struct cmsghdr)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
msg->msg_flags |= MSG_CTRUNC;
|
|
|
|
return 0; /* XXX: return error? check spec. */
|
|
|
|
}
|
|
|
|
if (msg->msg_controllen < cmlen) {
|
|
|
|
msg->msg_flags |= MSG_CTRUNC;
|
|
|
|
cmlen = msg->msg_controllen;
|
|
|
|
}
|
2020-05-11 11:59:13 +00:00
|
|
|
|
|
|
|
if (msg->msg_control_is_user) {
|
|
|
|
struct cmsghdr __user *cm = msg->msg_control_user;
|
2021-04-15 17:37:53 +00:00
|
|
|
|
2023-02-17 18:24:54 +00:00
|
|
|
check_object_size(data, cmlen - sizeof(*cm), true);
|
|
|
|
|
2021-04-15 17:37:53 +00:00
|
|
|
if (!user_write_access_begin(cm, cmlen))
|
|
|
|
goto efault;
|
|
|
|
|
2021-04-16 18:35:38 +00:00
|
|
|
unsafe_put_user(cmlen, &cm->cmsg_len, efault_end);
|
2021-04-15 17:37:53 +00:00
|
|
|
unsafe_put_user(level, &cm->cmsg_level, efault_end);
|
|
|
|
unsafe_put_user(type, &cm->cmsg_type, efault_end);
|
|
|
|
unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
|
|
|
|
cmlen - sizeof(*cm), efault_end);
|
|
|
|
user_write_access_end();
|
2020-05-11 11:59:13 +00:00
|
|
|
} else {
|
|
|
|
struct cmsghdr *cm = msg->msg_control;
|
|
|
|
|
|
|
|
cm->cmsg_level = level;
|
|
|
|
cm->cmsg_type = type;
|
|
|
|
cm->cmsg_len = cmlen;
|
|
|
|
memcpy(CMSG_DATA(cm), data, cmlen - sizeof(*cm));
|
|
|
|
}
|
|
|
|
|
|
|
|
cmlen = min(CMSG_SPACE(len), msg->msg_controllen);
|
2023-04-13 11:47:03 +00:00
|
|
|
if (msg->msg_control_is_user)
|
|
|
|
msg->msg_control_user += cmlen;
|
|
|
|
else
|
|
|
|
msg->msg_control += cmlen;
|
2005-04-16 22:20:36 +00:00
|
|
|
msg->msg_controllen -= cmlen;
|
2020-05-11 11:59:13 +00:00
|
|
|
return 0;
|
2021-04-15 17:37:53 +00:00
|
|
|
|
|
|
|
efault_end:
|
|
|
|
user_write_access_end();
|
|
|
|
efault:
|
|
|
|
return -EFAULT;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-07-09 21:22:04 +00:00
|
|
|
EXPORT_SYMBOL(put_cmsg);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-02-02 15:34:51 +00:00
|
|
|
void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
|
|
|
|
{
|
|
|
|
struct scm_timestamping64 tss;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
|
|
|
|
tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
|
|
|
|
tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
|
|
|
|
}
|
|
|
|
|
|
|
|
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_NEW, sizeof(tss), &tss);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(put_cmsg_scm_timestamping64);
|
|
|
|
|
|
|
|
void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
|
|
|
|
{
|
|
|
|
struct scm_timestamping tss;
|
|
|
|
int i;
|
|
|
|
|
2019-10-27 17:09:32 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
|
|
|
|
tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
|
|
|
|
tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
|
|
|
|
}
|
2019-02-02 15:34:51 +00:00
|
|
|
|
|
|
|
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_OLD, sizeof(tss), &tss);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(put_cmsg_scm_timestamping);
|
|
|
|
|
2020-05-11 11:59:12 +00:00
|
|
|
static int scm_max_fds(struct msghdr *msg)
|
|
|
|
{
|
|
|
|
if (msg->msg_controllen <= sizeof(struct cmsghdr))
|
|
|
|
return 0;
|
|
|
|
return (msg->msg_controllen - sizeof(struct cmsghdr)) / sizeof(int);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
|
|
|
|
{
|
2020-06-09 23:11:29 +00:00
|
|
|
struct cmsghdr __user *cm =
|
2023-04-13 11:47:03 +00:00
|
|
|
(__force struct cmsghdr __user *)msg->msg_control_user;
|
2020-06-09 23:11:29 +00:00
|
|
|
unsigned int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0;
|
2020-05-11 11:59:12 +00:00
|
|
|
int fdmax = min_t(int, scm_max_fds(msg), scm->fp->count);
|
|
|
|
int __user *cmsg_data = CMSG_USER_DATA(cm);
|
2005-04-16 22:20:36 +00:00
|
|
|
int err = 0, i;
|
|
|
|
|
2020-06-09 23:11:29 +00:00
|
|
|
/* no use for FD passing from kernel space callers */
|
|
|
|
if (WARN_ON_ONCE(!msg->msg_control_is_user))
|
|
|
|
return;
|
|
|
|
|
2020-05-11 11:59:12 +00:00
|
|
|
if (msg->msg_flags & MSG_CMSG_COMPAT) {
|
2005-04-16 22:20:36 +00:00
|
|
|
scm_detach_fds_compat(msg, scm);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-11 11:59:12 +00:00
|
|
|
for (i = 0; i < fdmax; i++) {
|
2023-11-30 12:49:10 +00:00
|
|
|
err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags);
|
2020-06-11 03:47:45 +00:00
|
|
|
if (err < 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-06-09 23:11:29 +00:00
|
|
|
if (i > 0) {
|
2020-05-11 11:59:12 +00:00
|
|
|
int cmlen = CMSG_LEN(i * sizeof(int));
|
|
|
|
|
2006-10-10 04:42:14 +00:00
|
|
|
err = put_user(SOL_SOCKET, &cm->cmsg_level);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!err)
|
|
|
|
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
|
|
|
|
if (!err)
|
|
|
|
err = put_user(cmlen, &cm->cmsg_len);
|
|
|
|
if (!err) {
|
2020-05-11 11:59:12 +00:00
|
|
|
cmlen = CMSG_SPACE(i * sizeof(int));
|
net, scm: fix PaX detected msg_controllen overflow in scm_detach_fds
David and HacKurx reported a following/similar size overflow triggered
in a grsecurity kernel, thanks to PaX's gcc size overflow plugin:
(Already fixed in later grsecurity versions by Brad and PaX Team.)
[ 1002.296137] PAX: size overflow detected in function scm_detach_fds net/core/scm.c:314
cicus.202_127 min, count: 4, decl: msg_controllen; num: 0; context: msghdr;
[ 1002.296145] CPU: 0 PID: 3685 Comm: scm_rights_recv Not tainted 4.2.3-grsec+ #7
[ 1002.296149] Hardware name: Apple Inc. MacBookAir5,1/Mac-66F35F19FE2A0D05, [...]
[ 1002.296153] ffffffff81c27366 0000000000000000 ffffffff81c27375 ffffc90007843aa8
[ 1002.296162] ffffffff818129ba 0000000000000000 ffffffff81c27366 ffffc90007843ad8
[ 1002.296169] ffffffff8121f838 fffffffffffffffc fffffffffffffffc ffffc90007843e60
[ 1002.296176] Call Trace:
[ 1002.296190] [<ffffffff818129ba>] dump_stack+0x45/0x57
[ 1002.296200] [<ffffffff8121f838>] report_size_overflow+0x38/0x60
[ 1002.296209] [<ffffffff816a979e>] scm_detach_fds+0x2ce/0x300
[ 1002.296220] [<ffffffff81791899>] unix_stream_read_generic+0x609/0x930
[ 1002.296228] [<ffffffff81791c9f>] unix_stream_recvmsg+0x4f/0x60
[ 1002.296236] [<ffffffff8178dc00>] ? unix_set_peek_off+0x50/0x50
[ 1002.296243] [<ffffffff8168fac7>] sock_recvmsg+0x47/0x60
[ 1002.296248] [<ffffffff81691522>] ___sys_recvmsg+0xe2/0x1e0
[ 1002.296257] [<ffffffff81693496>] __sys_recvmsg+0x46/0x80
[ 1002.296263] [<ffffffff816934fc>] SyS_recvmsg+0x2c/0x40
[ 1002.296271] [<ffffffff8181a3ab>] entry_SYSCALL_64_fastpath+0x12/0x85
Further investigation showed that this can happen when an *odd* number of
fds are being passed over AF_UNIX sockets.
In these cases CMSG_LEN(i * sizeof(int)) and CMSG_SPACE(i * sizeof(int)),
where i is the number of successfully passed fds, differ by 4 bytes due
to the extra CMSG_ALIGN() padding in CMSG_SPACE() to an 8 byte boundary
on 64 bit. The padding is used to align subsequent cmsg headers in the
control buffer.
When the control buffer passed in from the receiver side *lacks* these 4
bytes (e.g. due to buggy/wrong API usage), then msg->msg_controllen will
overflow in scm_detach_fds():
int cmlen = CMSG_LEN(i * sizeof(int)); <--- cmlen w/o tail-padding
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_SPACE(i * sizeof(int)); <--- cmlen w/ 4 byte extra tail-padding
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen; <--- iff no tail-padding space here ...
} ... wrap-around
F.e. it will wrap to a length of 18446744073709551612 bytes in case the
receiver passed in msg->msg_controllen of 20 bytes, and the sender
properly transferred 1 fd to the receiver, so that its CMSG_LEN results
in 20 bytes and CMSG_SPACE in 24 bytes.
In case of MSG_CMSG_COMPAT (scm_detach_fds_compat()), I haven't seen an
issue in my tests as alignment seems always on 4 byte boundary. Same
should be in case of native 32 bit, where we end up with 4 byte boundaries
as well.
In practice, passing msg->msg_controllen of 20 to recvmsg() while receiving
a single fd would mean that on successful return, msg->msg_controllen is
being set by the kernel to 24 bytes instead, thus more than the input
buffer advertised. It could f.e. become an issue if such application later
on zeroes or copies the control buffer based on the returned msg->msg_controllen
elsewhere.
Maximum number of fds we can send is a hard upper limit SCM_MAX_FD (253).
Going over the code, it seems like msg->msg_controllen is not being read
after scm_detach_fds() in scm_recv() anymore by the kernel, good!
Relevant recvmsg() handler are unix_dgram_recvmsg() (unix_seqpacket_recvmsg())
and unix_stream_recvmsg(). Both return back to their recvmsg() caller,
and ___sys_recvmsg() places the updated length, that is, new msg_control -
old msg_control pointer into msg->msg_controllen (hence the 24 bytes seen
in the example).
Long time ago, Wei Yongjun fixed something related in commit 1ac70e7ad24a
("[NET]: Fix function put_cmsg() which may cause usr application memory
overflow").
RFC3542, section 20.2. says:
The fields shown as "XX" are possible padding, between the cmsghdr
structure and the data, and between the data and the next cmsghdr
structure, if required by the implementation. While sending an
application may or may not include padding at the end of last
ancillary data in msg_controllen and implementations must accept both
as valid. On receiving a portable application must provide space for
padding at the end of the last ancillary data as implementations may
copy out the padding at the end of the control message buffer and
include it in the received msg_controllen. When recvmsg() is called
if msg_controllen is too small for all the ancillary data items
including any trailing padding after the last item an implementation
may set MSG_CTRUNC.
Since we didn't place MSG_CTRUNC for already quite a long time, just do
the same as in 1ac70e7ad24a to avoid an overflow.
Btw, even man-page author got this wrong :/ See db939c9b26e9 ("cmsg.3: Fix
error in SCM_RIGHTS code sample"). Some people must have copied this (?),
thus it got triggered in the wild (reported several times during boot by
David and HacKurx).
No Fixes tag this time as pre 2002 (that is, pre history tree).
Reported-by: David Sterba <dave@jikos.cz>
Reported-by: HacKurx <hackurx@gmail.com>
Cc: PaX Team <pageexec@freemail.hu>
Cc: Emese Revfy <re.emese@gmail.com>
Cc: Brad Spengler <spender@grsecurity.net>
Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
Cc: Eric Dumazet <edumazet@google.com>
Reviewed-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-11-19 23:11:56 +00:00
|
|
|
if (msg->msg_controllen < cmlen)
|
|
|
|
cmlen = msg->msg_controllen;
|
2023-04-13 11:47:03 +00:00
|
|
|
msg->msg_control_user += cmlen;
|
2005-04-16 22:20:36 +00:00
|
|
|
msg->msg_controllen -= cmlen;
|
|
|
|
}
|
|
|
|
}
|
2020-05-11 11:59:12 +00:00
|
|
|
|
|
|
|
if (i < scm->fp->count || (scm->fp->count && fdmax <= 0))
|
2005-04-16 22:20:36 +00:00
|
|
|
msg->msg_flags |= MSG_CTRUNC;
|
|
|
|
|
|
|
|
/*
|
2020-05-11 11:59:12 +00:00
|
|
|
* All of the files that fit in the message have had their usage counts
|
|
|
|
* incremented, so we just free the list.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
__scm_destroy(scm);
|
|
|
|
}
|
2010-07-09 21:22:04 +00:00
|
|
|
EXPORT_SYMBOL(scm_detach_fds);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
|
|
|
|
{
|
|
|
|
struct scm_fp_list *new_fpl;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!fpl)
|
|
|
|
return NULL;
|
|
|
|
|
2010-11-23 14:09:15 +00:00
|
|
|
new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
|
2021-07-19 10:44:56 +00:00
|
|
|
GFP_KERNEL_ACCOUNT);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (new_fpl) {
|
2010-11-23 14:09:15 +00:00
|
|
|
for (i = 0; i < fpl->count; i++)
|
2005-04-16 22:20:36 +00:00
|
|
|
get_file(fpl->fp[i]);
|
2010-11-23 14:09:15 +00:00
|
|
|
new_fpl->max = new_fpl->count;
|
2016-02-03 01:11:03 +00:00
|
|
|
new_fpl->user = get_uid(fpl->user);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
return new_fpl;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(scm_fp_dup);
|