forked from Minki/linux
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull misc vfs updates from Al Viro: "All kinds of misc stuff, without any unifying topic, from various people. Neil's d_anon patch, several bugfixes, introduction of kvmalloc analogue of kmemdup_user(), extending bitfield.h to deal with fixed-endians, assorted cleanups all over the place..." * 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (28 commits) alpha: osf_sys.c: use timespec64 where appropriate alpha: osf_sys.c: fix put_tv32 regression jffs2: Fix use-after-free bug in jffs2_iget()'s error handling path dcache: delete unused d_hash_mask dcache: subtract d_hash_shift from 32 in advance fs/buffer.c: fold init_buffer() into init_page_buffers() fs: fold __inode_permission() into inode_permission() fs: add RWF_APPEND sctp: use vmemdup_user() rather than badly open-coding memdup_user() snd_ctl_elem_init_enum_names(): switch to vmemdup_user() replace_user_tlv(): switch to vmemdup_user() new primitive: vmemdup_user() memdup_user(): switch to GFP_USER eventfd: fold eventfd_ctx_get() into eventfd_ctx_fileget() eventfd: fold eventfd_ctx_read() into eventfd_read() eventfd: convert to use anon_inode_getfd() nfs4file: get rid of pointless include of btrfs.h uvc_v4l2: clean copyin/copyout up vme_user: don't use __copy_..._user() usx2y: don't bother with memdup_user() for 16-byte structure ...
This commit is contained in:
commit
19e7b5f994
@ -56,13 +56,25 @@ a/ A dentry flag DCACHE_DISCONNECTED which is set on
|
||||
any dentry that might not be part of the proper prefix.
|
||||
This is set when anonymous dentries are created, and cleared when a
|
||||
dentry is noticed to be a child of a dentry which is in the proper
|
||||
prefix.
|
||||
prefix. If the refcount on a dentry with this flag set
|
||||
becomes zero, the dentry is immediately discarded, rather than being
|
||||
kept in the dcache. If a dentry that is not already in the dcache
|
||||
is repeatedly accessed by filehandle (as NFSD might do), an new dentry
|
||||
will be a allocated for each access, and discarded at the end of
|
||||
the access.
|
||||
|
||||
b/ A per-superblock list "s_anon" of dentries which are the roots of
|
||||
subtrees that are not in the proper prefix. These dentries, as
|
||||
well as the proper prefix, need to be released at unmount time. As
|
||||
these dentries will not be hashed, they are linked together on the
|
||||
d_hash list_head.
|
||||
Note that such a dentry can acquire children, name, ancestors, etc.
|
||||
without losing DCACHE_DISCONNECTED - that flag is only cleared when
|
||||
subtree is successfully reconnected to root. Until then dentries
|
||||
in such subtree are retained only as long as there are references;
|
||||
refcount reaching zero means immediate eviction, same as for unhashed
|
||||
dentries. That guarantees that we won't need to hunt them down upon
|
||||
umount.
|
||||
|
||||
b/ A primitive for creation of secondary roots - d_obtain_root(inode).
|
||||
Those do _not_ bear DCACHE_DISCONNECTED. They are placed on the
|
||||
per-superblock list (->s_roots), so they can be located at umount
|
||||
time for eviction purposes.
|
||||
|
||||
c/ Helper routines to allocate anonymous dentries, and to help attach
|
||||
loose directory dentries at lookup time. They are:
|
||||
@ -77,7 +89,6 @@ c/ Helper routines to allocate anonymous dentries, and to help attach
|
||||
(such as an anonymous one created by d_obtain_alias), if appropriate.
|
||||
It returns NULL when the passed-in dentry is used, following the calling
|
||||
convention of ->lookup.
|
||||
|
||||
|
||||
Filesystem Issues
|
||||
-----------------
|
||||
|
@ -950,22 +950,31 @@ struct itimerval32
|
||||
};
|
||||
|
||||
static inline long
|
||||
get_tv32(struct timeval *o, struct timeval32 __user *i)
|
||||
get_tv32(struct timespec64 *o, struct timeval32 __user *i)
|
||||
{
|
||||
struct timeval32 tv;
|
||||
if (copy_from_user(&tv, i, sizeof(struct timeval32)))
|
||||
return -EFAULT;
|
||||
o->tv_sec = tv.tv_sec;
|
||||
o->tv_usec = tv.tv_usec;
|
||||
o->tv_nsec = tv.tv_usec * NSEC_PER_USEC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline long
|
||||
put_tv32(struct timeval32 __user *o, struct timeval *i)
|
||||
put_tv32(struct timeval32 __user *o, struct timespec64 *i)
|
||||
{
|
||||
return copy_to_user(o, &(struct timeval32){
|
||||
.tv_sec = o->tv_sec,
|
||||
.tv_usec = o->tv_usec},
|
||||
.tv_sec = i->tv_sec,
|
||||
.tv_usec = i->tv_nsec / NSEC_PER_USEC},
|
||||
sizeof(struct timeval32));
|
||||
}
|
||||
|
||||
static inline long
|
||||
put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
|
||||
{
|
||||
return copy_to_user(o, &(struct timeval32){
|
||||
.tv_sec = i->tv_sec,
|
||||
.tv_usec = i->tv_usec},
|
||||
sizeof(struct timeval32));
|
||||
}
|
||||
|
||||
@ -1004,9 +1013,10 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
|
||||
struct timezone __user *, tz)
|
||||
{
|
||||
if (tv) {
|
||||
struct timeval ktv;
|
||||
do_gettimeofday(&ktv);
|
||||
if (put_tv32(tv, &ktv))
|
||||
struct timespec64 kts;
|
||||
|
||||
ktime_get_real_ts64(&kts);
|
||||
if (put_tv32(tv, &kts))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (tz) {
|
||||
@ -1019,22 +1029,19 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
|
||||
SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
|
||||
struct timezone __user *, tz)
|
||||
{
|
||||
struct timespec64 kts64;
|
||||
struct timespec kts;
|
||||
struct timespec64 kts;
|
||||
struct timezone ktz;
|
||||
|
||||
if (tv) {
|
||||
if (get_tv32((struct timeval *)&kts, tv))
|
||||
if (get_tv32(&kts, tv))
|
||||
return -EFAULT;
|
||||
kts.tv_nsec *= 1000;
|
||||
kts64 = timespec_to_timespec64(kts);
|
||||
}
|
||||
if (tz) {
|
||||
if (copy_from_user(&ktz, tz, sizeof(*tz)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return do_sys_settimeofday64(tv ? &kts64 : NULL, tz ? &ktz : NULL);
|
||||
return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL);
|
||||
}
|
||||
|
||||
asmlinkage long sys_ni_posix_timers(void);
|
||||
@ -1083,22 +1090,16 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
|
||||
SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
|
||||
struct timeval32 __user *, tvs)
|
||||
{
|
||||
struct timespec tv[2];
|
||||
struct timespec64 tv[2];
|
||||
|
||||
if (tvs) {
|
||||
struct timeval ktvs[2];
|
||||
if (get_tv32(&ktvs[0], &tvs[0]) ||
|
||||
get_tv32(&ktvs[1], &tvs[1]))
|
||||
if (get_tv32(&tv[0], &tvs[0]) ||
|
||||
get_tv32(&tv[1], &tvs[1]))
|
||||
return -EFAULT;
|
||||
|
||||
if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 ||
|
||||
ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000)
|
||||
if (tv[0].tv_nsec < 0 || tv[0].tv_nsec >= 1000000000 ||
|
||||
tv[1].tv_nsec < 0 || tv[1].tv_nsec >= 1000000000)
|
||||
return -EINVAL;
|
||||
|
||||
tv[0].tv_sec = ktvs[0].tv_sec;
|
||||
tv[0].tv_nsec = 1000 * ktvs[0].tv_usec;
|
||||
tv[1].tv_sec = ktvs[1].tv_sec;
|
||||
tv[1].tv_nsec = 1000 * ktvs[1].tv_usec;
|
||||
}
|
||||
|
||||
return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
|
||||
@ -1107,19 +1108,18 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
|
||||
SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
|
||||
fd_set __user *, exp, struct timeval32 __user *, tvp)
|
||||
{
|
||||
struct timespec end_time, *to = NULL;
|
||||
struct timespec64 end_time, *to = NULL;
|
||||
if (tvp) {
|
||||
struct timeval tv;
|
||||
struct timespec64 tv;
|
||||
to = &end_time;
|
||||
|
||||
if (get_tv32(&tv, tvp))
|
||||
return -EFAULT;
|
||||
|
||||
if (tv.tv_sec < 0 || tv.tv_usec < 0)
|
||||
if (tv.tv_sec < 0 || tv.tv_nsec < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (poll_select_set_timeout(to, tv.tv_sec,
|
||||
tv.tv_usec * NSEC_PER_USEC))
|
||||
if (poll_select_set_timeout(to, tv.tv_sec, tv.tv_nsec))
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
@ -1192,9 +1192,9 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
|
||||
return -EFAULT;
|
||||
if (!ur)
|
||||
return err;
|
||||
if (put_tv32(&ur->ru_utime, &r.ru_utime))
|
||||
if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
|
||||
return -EFAULT;
|
||||
if (put_tv32(&ur->ru_stime, &r.ru_stime))
|
||||
if (put_tv_to_tv32(&ur->ru_stime, &r.ru_stime))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss,
|
||||
sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss)))
|
||||
@ -1210,18 +1210,18 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
|
||||
SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep,
|
||||
struct timeval32 __user *, remain)
|
||||
{
|
||||
struct timeval tmp;
|
||||
struct timespec64 tmp;
|
||||
unsigned long ticks;
|
||||
|
||||
if (get_tv32(&tmp, sleep))
|
||||
goto fault;
|
||||
|
||||
ticks = timeval_to_jiffies(&tmp);
|
||||
ticks = timespec64_to_jiffies(&tmp);
|
||||
|
||||
ticks = schedule_timeout_interruptible(ticks);
|
||||
|
||||
if (remain) {
|
||||
jiffies_to_timeval(ticks, &tmp);
|
||||
jiffies_to_timespec64(ticks, &tmp);
|
||||
if (put_tv32(remain, &tmp))
|
||||
goto fault;
|
||||
}
|
||||
@ -1280,7 +1280,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
|
||||
if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
|
||||
(copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
|
||||
offsetof(struct timex32, tick))) ||
|
||||
(put_tv32(&txc_p->time, &txc.time)))
|
||||
(put_tv_to_tv32(&txc_p->time, &txc.time)))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
|
@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
|
||||
|
||||
xbuf_size = count * sizeof(*x);
|
||||
ybuf_size = count * sizeof(*y);
|
||||
x = kmalloc(xbuf_size, GFP_KERNEL);
|
||||
if (x == NULL)
|
||||
return -ENOMEM;
|
||||
y = kmalloc(ybuf_size, GFP_KERNEL);
|
||||
if (y == NULL) {
|
||||
x = memdup_user(depth->x, xbuf_size);
|
||||
if (IS_ERR(x))
|
||||
return PTR_ERR(x);
|
||||
y = memdup_user(depth->y, ybuf_size);
|
||||
if (IS_ERR(y)) {
|
||||
kfree(x);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(y);
|
||||
}
|
||||
if (copy_from_user(x, depth->x, xbuf_size)) {
|
||||
kfree(x);
|
||||
kfree(y);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (copy_from_user(y, depth->y, xbuf_size)) {
|
||||
kfree(x);
|
||||
kfree(y);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
buffer_size = depth->n * sizeof(u32);
|
||||
buffer = memdup_user(depth->buffer, buffer_size);
|
||||
if (IS_ERR(buffer)) {
|
||||
|
@ -1284,36 +1284,30 @@ struct uvc_xu_control_mapping32 {
|
||||
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
|
||||
const struct uvc_xu_control_mapping32 __user *up)
|
||||
{
|
||||
compat_caddr_t p;
|
||||
struct uvc_xu_control_mapping32 *p = (void *)kp;
|
||||
compat_caddr_t info;
|
||||
u32 count;
|
||||
|
||||
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
|
||||
__copy_from_user(kp, up, offsetof(typeof(*up), menu_info)) ||
|
||||
__get_user(kp->menu_count, &up->menu_count))
|
||||
if (copy_from_user(p, up, sizeof(*p)))
|
||||
return -EFAULT;
|
||||
|
||||
count = p->menu_count;
|
||||
info = p->menu_info;
|
||||
|
||||
memset(kp->reserved, 0, sizeof(kp->reserved));
|
||||
|
||||
if (kp->menu_count == 0) {
|
||||
kp->menu_info = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (__get_user(p, &up->menu_info))
|
||||
return -EFAULT;
|
||||
kp->menu_info = compat_ptr(p);
|
||||
|
||||
kp->menu_info = count ? compat_ptr(info) : NULL;
|
||||
kp->menu_count = count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
|
||||
struct uvc_xu_control_mapping32 __user *up)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
|
||||
__copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
|
||||
__put_user(kp->menu_count, &up->menu_count))
|
||||
if (copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
|
||||
put_user(kp->menu_count, &up->menu_count))
|
||||
return -EFAULT;
|
||||
|
||||
if (__clear_user(up->reserved, sizeof(up->reserved)))
|
||||
if (clear_user(up->reserved, sizeof(up->reserved)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
@ -1330,31 +1324,26 @@ struct uvc_xu_control_query32 {
|
||||
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
|
||||
const struct uvc_xu_control_query32 __user *up)
|
||||
{
|
||||
compat_caddr_t p;
|
||||
struct uvc_xu_control_query32 v;
|
||||
|
||||
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
|
||||
__copy_from_user(kp, up, offsetof(typeof(*up), data)))
|
||||
if (copy_from_user(&v, up, sizeof(v)))
|
||||
return -EFAULT;
|
||||
|
||||
if (kp->size == 0) {
|
||||
kp->data = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (__get_user(p, &up->data))
|
||||
return -EFAULT;
|
||||
kp->data = compat_ptr(p);
|
||||
|
||||
*kp = (struct uvc_xu_control_query){
|
||||
.unit = v.unit,
|
||||
.selector = v.selector,
|
||||
.query = v.query,
|
||||
.size = v.size,
|
||||
.data = v.size ? compat_ptr(v.data) : NULL
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
|
||||
struct uvc_xu_control_query32 __user *up)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
|
||||
__copy_to_user(up, kp, offsetof(typeof(*up), data)))
|
||||
if (copy_to_user(up, kp, offsetof(typeof(*up), data)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1296,15 +1296,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
|
||||
spin_lock_nested(&dentry->d_lock,
|
||||
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
|
||||
ll_d2d(dentry)->lld_invalid = 1;
|
||||
/*
|
||||
* We should be careful about dentries created by d_obtain_alias().
|
||||
* These dentries are not put in the dentry tree, instead they are
|
||||
* linked to sb->s_anon through dentry->d_hash.
|
||||
* shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
|
||||
* If we unhashed such a dentry, unmount would not be able to find
|
||||
* it and busy inodes would be reported.
|
||||
*/
|
||||
if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
|
||||
if (d_count(dentry) == 0)
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
|
||||
if (copied < 0)
|
||||
return (int)copied;
|
||||
|
||||
if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
|
||||
if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
|
||||
return -EFAULT;
|
||||
|
||||
return copied;
|
||||
@ -146,7 +146,7 @@ static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
|
||||
if (count > image[minor].size_buf)
|
||||
count = image[minor].size_buf;
|
||||
|
||||
if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
|
||||
if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
|
||||
return -EFAULT;
|
||||
|
||||
return vme_master_write(image[minor].resource, image[minor].kern_buf,
|
||||
@ -159,7 +159,7 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
|
||||
void *image_ptr;
|
||||
|
||||
image_ptr = image[minor].kern_buf + *ppos;
|
||||
if (__copy_to_user(buf, image_ptr, (unsigned long)count))
|
||||
if (copy_to_user(buf, image_ptr, (unsigned long)count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
@ -171,7 +171,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
|
||||
void *image_ptr;
|
||||
|
||||
image_ptr = image[minor].kern_buf + *ppos;
|
||||
if (__copy_from_user(image_ptr, buf, (unsigned long)count))
|
||||
if (copy_from_user(image_ptr, buf, (unsigned long)count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
|
10
fs/buffer.c
10
fs/buffer.c
@ -53,13 +53,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
|
||||
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
||||
|
||||
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
|
||||
{
|
||||
bh->b_end_io = handler;
|
||||
bh->b_private = private;
|
||||
}
|
||||
EXPORT_SYMBOL(init_buffer);
|
||||
|
||||
inline void touch_buffer(struct buffer_head *bh)
|
||||
{
|
||||
trace_block_touch_buffer(bh);
|
||||
@ -922,7 +915,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
|
||||
|
||||
do {
|
||||
if (!buffer_mapped(bh)) {
|
||||
init_buffer(bh, NULL, NULL);
|
||||
bh->b_end_io = NULL;
|
||||
bh->b_private = NULL;
|
||||
bh->b_bdev = bdev;
|
||||
bh->b_blocknr = block;
|
||||
if (uptodate)
|
||||
|
32
fs/dcache.c
32
fs/dcache.c
@ -32,7 +32,6 @@
|
||||
#include <linux/swap.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/bit_spinlock.h>
|
||||
#include <linux/rculist_bl.h>
|
||||
#include <linux/prefetch.h>
|
||||
@ -49,8 +48,8 @@
|
||||
* - i_dentry, d_u.d_alias, d_inode of aliases
|
||||
* dcache_hash_bucket lock protects:
|
||||
* - the dcache hash table
|
||||
* s_anon bl list spinlock protects:
|
||||
* - the s_anon list (see __d_drop)
|
||||
* s_roots bl list spinlock protects:
|
||||
* - the s_roots list (see __d_drop)
|
||||
* dentry->d_sb->s_dentry_lru_lock protects:
|
||||
* - the dcache lru lists and counters
|
||||
* d_lock protects:
|
||||
@ -68,7 +67,7 @@
|
||||
* dentry->d_lock
|
||||
* dentry->d_sb->s_dentry_lru_lock
|
||||
* dcache_hash_bucket lock
|
||||
* s_anon lock
|
||||
* s_roots lock
|
||||
*
|
||||
* If there is an ancestor relationship:
|
||||
* dentry->d_parent->...->d_parent->d_lock
|
||||
@ -104,14 +103,13 @@ EXPORT_SYMBOL(slash_name);
|
||||
* information, yet avoid using a prime hash-size or similar.
|
||||
*/
|
||||
|
||||
static unsigned int d_hash_mask __read_mostly;
|
||||
static unsigned int d_hash_shift __read_mostly;
|
||||
|
||||
static struct hlist_bl_head *dentry_hashtable __read_mostly;
|
||||
|
||||
static inline struct hlist_bl_head *d_hash(unsigned int hash)
|
||||
{
|
||||
return dentry_hashtable + (hash >> (32 - d_hash_shift));
|
||||
return dentry_hashtable + (hash >> d_hash_shift);
|
||||
}
|
||||
|
||||
#define IN_LOOKUP_SHIFT 10
|
||||
@ -477,10 +475,10 @@ void __d_drop(struct dentry *dentry)
|
||||
/*
|
||||
* Hashed dentries are normally on the dentry hashtable,
|
||||
* with the exception of those newly allocated by
|
||||
* d_obtain_alias, which are always IS_ROOT:
|
||||
* d_obtain_root, which are always IS_ROOT:
|
||||
*/
|
||||
if (unlikely(IS_ROOT(dentry)))
|
||||
b = &dentry->d_sb->s_anon;
|
||||
b = &dentry->d_sb->s_roots;
|
||||
else
|
||||
b = d_hash(dentry->d_name.hash);
|
||||
|
||||
@ -1500,8 +1498,8 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
||||
sb->s_root = NULL;
|
||||
do_one_tree(dentry);
|
||||
|
||||
while (!hlist_bl_empty(&sb->s_anon)) {
|
||||
dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
|
||||
while (!hlist_bl_empty(&sb->s_roots)) {
|
||||
dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
|
||||
do_one_tree(dentry);
|
||||
}
|
||||
}
|
||||
@ -1964,9 +1962,11 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
|
||||
spin_lock(&tmp->d_lock);
|
||||
__d_set_inode_and_type(tmp, inode, add_flags);
|
||||
hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
|
||||
hlist_bl_lock(&tmp->d_sb->s_anon);
|
||||
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
|
||||
hlist_bl_unlock(&tmp->d_sb->s_anon);
|
||||
if (!disconnected) {
|
||||
hlist_bl_lock(&tmp->d_sb->s_roots);
|
||||
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_roots);
|
||||
hlist_bl_unlock(&tmp->d_sb->s_roots);
|
||||
}
|
||||
spin_unlock(&tmp->d_lock);
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
@ -3585,9 +3585,10 @@ static void __init dcache_init_early(void)
|
||||
13,
|
||||
HASH_EARLY | HASH_ZERO,
|
||||
&d_hash_shift,
|
||||
&d_hash_mask,
|
||||
NULL,
|
||||
0,
|
||||
0);
|
||||
d_hash_shift = 32 - d_hash_shift;
|
||||
}
|
||||
|
||||
static void __init dcache_init(void)
|
||||
@ -3611,9 +3612,10 @@ static void __init dcache_init(void)
|
||||
13,
|
||||
HASH_ZERO,
|
||||
&d_hash_shift,
|
||||
&d_hash_mask,
|
||||
NULL,
|
||||
0,
|
||||
0);
|
||||
d_hash_shift = 32 - d_hash_shift;
|
||||
}
|
||||
|
||||
/* SLAB cache for __getname() consumers */
|
||||
|
127
fs/eventfd.c
127
fs/eventfd.c
@ -79,25 +79,12 @@ static void eventfd_free(struct kref *kref)
|
||||
eventfd_free_ctx(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* eventfd_ctx_get - Acquires a reference to the internal eventfd context.
|
||||
* @ctx: [in] Pointer to the eventfd context.
|
||||
*
|
||||
* Returns: In case of success, returns a pointer to the eventfd context.
|
||||
*/
|
||||
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
|
||||
{
|
||||
kref_get(&ctx->kref);
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eventfd_ctx_get);
|
||||
|
||||
/**
|
||||
* eventfd_ctx_put - Releases a reference to the internal eventfd context.
|
||||
* @ctx: [in] Pointer to eventfd context.
|
||||
*
|
||||
* The eventfd context reference must have been previously acquired either
|
||||
* with eventfd_ctx_get() or eventfd_ctx_fdget().
|
||||
* with eventfd_ctx_fdget() or eventfd_ctx_fileget().
|
||||
*/
|
||||
void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
||||
{
|
||||
@ -207,36 +194,27 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
|
||||
|
||||
/**
|
||||
* eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
|
||||
* @ctx: [in] Pointer to eventfd context.
|
||||
* @no_wait: [in] Different from zero if the operation should not block.
|
||||
* @cnt: [out] Pointer to the 64-bit counter value.
|
||||
*
|
||||
* Returns %0 if successful, or the following error codes:
|
||||
*
|
||||
* - -EAGAIN : The operation would have blocked but @no_wait was non-zero.
|
||||
* - -ERESTARTSYS : A signal interrupted the wait operation.
|
||||
*
|
||||
* If @no_wait is zero, the function might sleep until the eventfd internal
|
||||
* counter becomes greater than zero.
|
||||
*/
|
||||
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
|
||||
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct eventfd_ctx *ctx = file->private_data;
|
||||
ssize_t res;
|
||||
__u64 ucnt = 0;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
if (count < sizeof(ucnt))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&ctx->wqh.lock);
|
||||
*cnt = 0;
|
||||
res = -EAGAIN;
|
||||
if (ctx->count > 0)
|
||||
res = 0;
|
||||
else if (!no_wait) {
|
||||
res = sizeof(ucnt);
|
||||
else if (!(file->f_flags & O_NONBLOCK)) {
|
||||
__add_wait_queue(&ctx->wqh, &wait);
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (ctx->count > 0) {
|
||||
res = 0;
|
||||
res = sizeof(ucnt);
|
||||
break;
|
||||
}
|
||||
if (signal_pending(current)) {
|
||||
@ -250,32 +228,18 @@ ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
|
||||
__remove_wait_queue(&ctx->wqh, &wait);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
if (likely(res == 0)) {
|
||||
eventfd_ctx_do_read(ctx, cnt);
|
||||
if (likely(res > 0)) {
|
||||
eventfd_ctx_do_read(ctx, &ucnt);
|
||||
if (waitqueue_active(&ctx->wqh))
|
||||
wake_up_locked_poll(&ctx->wqh, POLLOUT);
|
||||
}
|
||||
spin_unlock_irq(&ctx->wqh.lock);
|
||||
|
||||
if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
|
||||
return -EFAULT;
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eventfd_ctx_read);
|
||||
|
||||
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct eventfd_ctx *ctx = file->private_data;
|
||||
ssize_t res;
|
||||
__u64 cnt;
|
||||
|
||||
if (count < sizeof(cnt))
|
||||
return -EINVAL;
|
||||
res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt);
|
||||
}
|
||||
|
||||
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
@ -405,79 +369,44 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
|
||||
*/
|
||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
|
||||
{
|
||||
struct eventfd_ctx *ctx;
|
||||
|
||||
if (file->f_op != &eventfd_fops)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return eventfd_ctx_get(file->private_data);
|
||||
ctx = file->private_data;
|
||||
kref_get(&ctx->kref);
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
|
||||
|
||||
/**
|
||||
* eventfd_file_create - Creates an eventfd file pointer.
|
||||
* @count: Initial eventfd counter value.
|
||||
* @flags: Flags for the eventfd file.
|
||||
*
|
||||
* This function creates an eventfd file pointer, w/out installing it into
|
||||
* the fd table. This is useful when the eventfd file is used during the
|
||||
* initialization of data structures that require extra setup after the eventfd
|
||||
* creation. So the eventfd creation is split into the file pointer creation
|
||||
* phase, and the file descriptor installation phase.
|
||||
* In this way races with userspace closing the newly installed file descriptor
|
||||
* can be avoided.
|
||||
* Returns an eventfd file pointer, or a proper error pointer.
|
||||
*/
|
||||
struct file *eventfd_file_create(unsigned int count, int flags)
|
||||
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
|
||||
{
|
||||
struct file *file;
|
||||
struct eventfd_ctx *ctx;
|
||||
int fd;
|
||||
|
||||
/* Check the EFD_* constants for consistency. */
|
||||
BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
|
||||
BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
|
||||
|
||||
if (flags & ~EFD_FLAGS_SET)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
|
||||
kref_init(&ctx->kref);
|
||||
init_waitqueue_head(&ctx->wqh);
|
||||
ctx->count = count;
|
||||
ctx->flags = flags;
|
||||
|
||||
file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
|
||||
O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
|
||||
if (IS_ERR(file))
|
||||
fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
|
||||
O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
|
||||
if (fd < 0)
|
||||
eventfd_free_ctx(ctx);
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
|
||||
{
|
||||
int fd, error;
|
||||
struct file *file;
|
||||
|
||||
error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
|
||||
if (error < 0)
|
||||
return error;
|
||||
fd = error;
|
||||
|
||||
file = eventfd_file_create(count, flags);
|
||||
if (IS_ERR(file)) {
|
||||
error = PTR_ERR(file);
|
||||
goto err_put_unused_fd;
|
||||
}
|
||||
fd_install(fd, file);
|
||||
|
||||
return fd;
|
||||
|
||||
err_put_unused_fd:
|
||||
put_unused_fd(fd);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(eventfd, unsigned int, count)
|
||||
|
@ -11,18 +11,13 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
|
||||
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/task_work.h>
|
||||
#include <linux/ima.h>
|
||||
#include <linux/swap.h>
|
||||
|
@ -362,7 +362,6 @@ error_io:
|
||||
ret = -EIO;
|
||||
error:
|
||||
mutex_unlock(&f->sem);
|
||||
jffs2_do_clear_inode(c, f);
|
||||
iget_failed(inode);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
71
fs/namei.c
71
fs/namei.c
@ -390,50 +390,6 @@ static inline int do_inode_permission(struct inode *inode, int mask)
|
||||
return generic_permission(inode, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* __inode_permission - Check for access rights to a given inode
|
||||
* @inode: Inode to check permission on
|
||||
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
|
||||
*
|
||||
* Check for read/write/execute permissions on an inode.
|
||||
*
|
||||
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
|
||||
*
|
||||
* This does not check for a read-only file system. You probably want
|
||||
* inode_permission().
|
||||
*/
|
||||
int __inode_permission(struct inode *inode, int mask)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (unlikely(mask & MAY_WRITE)) {
|
||||
/*
|
||||
* Nobody gets write access to an immutable file.
|
||||
*/
|
||||
if (IS_IMMUTABLE(inode))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Updating mtime will likely cause i_uid and i_gid to be
|
||||
* written back improperly if their true value is unknown
|
||||
* to the vfs.
|
||||
*/
|
||||
if (HAS_UNMAPPED_ID(inode))
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
retval = do_inode_permission(inode, mask);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
retval = devcgroup_inode_permission(inode, mask);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
return security_inode_permission(inode, mask);
|
||||
}
|
||||
EXPORT_SYMBOL(__inode_permission);
|
||||
|
||||
/**
|
||||
* sb_permission - Check superblock-level permissions
|
||||
* @sb: Superblock of inode to check permission on
|
||||
@ -472,7 +428,32 @@ int inode_permission(struct inode *inode, int mask)
|
||||
retval = sb_permission(inode->i_sb, inode, mask);
|
||||
if (retval)
|
||||
return retval;
|
||||
return __inode_permission(inode, mask);
|
||||
|
||||
if (unlikely(mask & MAY_WRITE)) {
|
||||
/*
|
||||
* Nobody gets write access to an immutable file.
|
||||
*/
|
||||
if (IS_IMMUTABLE(inode))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Updating mtime will likely cause i_uid and i_gid to be
|
||||
* written back improperly if their true value is unknown
|
||||
* to the vfs.
|
||||
*/
|
||||
if (HAS_UNMAPPED_ID(inode))
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
retval = do_inode_permission(inode, mask);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
retval = devcgroup_inode_permission(inode, mask);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
return security_inode_permission(inode, mask);
|
||||
}
|
||||
EXPORT_SYMBOL(inode_permission);
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
|
||||
#include "delegation.h"
|
||||
#include "internal.h"
|
||||
#include "iostat.h"
|
||||
|
@ -225,7 +225,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
|
||||
if (s->s_user_ns != &init_user_ns)
|
||||
s->s_iflags |= SB_I_NODEV;
|
||||
INIT_HLIST_NODE(&s->s_instances);
|
||||
INIT_HLIST_BL_HEAD(&s->s_anon);
|
||||
INIT_HLIST_BL_HEAD(&s->s_roots);
|
||||
mutex_init(&s->s_sync_lock);
|
||||
INIT_LIST_HEAD(&s->s_inodes);
|
||||
spin_lock_init(&s->s_inode_list_lock);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define _LINUX_BITFIELD_H
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
* Bitfield access macros
|
||||
@ -103,4 +104,49 @@
|
||||
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
|
||||
})
|
||||
|
||||
extern void __compiletime_warning("value doesn't fit into mask")
|
||||
__field_overflow(void);
|
||||
extern void __compiletime_error("bad bitfield mask")
|
||||
__bad_mask(void);
|
||||
static __always_inline u64 field_multiplier(u64 field)
|
||||
{
|
||||
if ((field | (field - 1)) & ((field | (field - 1)) + 1))
|
||||
__bad_mask();
|
||||
return field & -field;
|
||||
}
|
||||
static __always_inline u64 field_mask(u64 field)
|
||||
{
|
||||
return field / field_multiplier(field);
|
||||
}
|
||||
#define ____MAKE_OP(type,base,to,from) \
|
||||
static __always_inline __##type type##_encode_bits(base v, base field) \
|
||||
{ \
|
||||
if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
|
||||
__field_overflow(); \
|
||||
return to((v & field_mask(field)) * field_multiplier(field)); \
|
||||
} \
|
||||
static __always_inline __##type type##_replace_bits(__##type old, \
|
||||
base val, base field) \
|
||||
{ \
|
||||
return (old & ~to(field)) | type##_encode_bits(val, field); \
|
||||
} \
|
||||
static __always_inline void type##p_replace_bits(__##type *p, \
|
||||
base val, base field) \
|
||||
{ \
|
||||
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
|
||||
} \
|
||||
static __always_inline base type##_get_bits(__##type v, base field) \
|
||||
{ \
|
||||
return (from(v) & field)/field_multiplier(field); \
|
||||
}
|
||||
#define __MAKE_OP(size) \
|
||||
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
|
||||
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
|
||||
____MAKE_OP(u##size,u##size,,)
|
||||
__MAKE_OP(16)
|
||||
__MAKE_OP(32)
|
||||
__MAKE_OP(64)
|
||||
#undef __MAKE_OP
|
||||
#undef ____MAKE_OP
|
||||
|
||||
#endif
|
||||
|
@ -151,7 +151,6 @@ void buffer_check_dirty_writeback(struct page *page,
|
||||
|
||||
void mark_buffer_dirty(struct buffer_head *bh);
|
||||
void mark_buffer_write_io_error(struct buffer_head *bh);
|
||||
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
|
||||
void touch_buffer(struct buffer_head *bh);
|
||||
void set_bh_page(struct buffer_head *bh,
|
||||
struct page *page, unsigned long offset);
|
||||
|
@ -26,18 +26,16 @@
|
||||
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
|
||||
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
|
||||
|
||||
struct eventfd_ctx;
|
||||
struct file;
|
||||
|
||||
#ifdef CONFIG_EVENTFD
|
||||
|
||||
struct file *eventfd_file_create(unsigned int count, int flags);
|
||||
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
|
||||
void eventfd_ctx_put(struct eventfd_ctx *ctx);
|
||||
struct file *eventfd_fget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
||||
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||
__u64 *cnt);
|
||||
|
||||
@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
|
||||
* Ugly ugly ugly error layer to support modules that uses eventfd but
|
||||
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
|
||||
*/
|
||||
static inline struct file *eventfd_file_create(unsigned int count, int flags)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
|
||||
{
|
||||
@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
||||
|
||||
}
|
||||
|
||||
static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
|
||||
__u64 *cnt)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
|
||||
wait_queue_entry_t *wait, __u64 *cnt)
|
||||
{
|
||||
|
@ -1359,7 +1359,7 @@ struct super_block {
|
||||
|
||||
const struct fscrypt_operations *s_cop;
|
||||
|
||||
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
|
||||
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
|
||||
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
|
||||
struct block_device *s_bdev;
|
||||
struct backing_dev_info *s_bdi;
|
||||
@ -2688,7 +2688,6 @@ extern sector_t bmap(struct inode *, sector_t);
|
||||
#endif
|
||||
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
|
||||
extern int inode_permission(struct inode *, int);
|
||||
extern int __inode_permission(struct inode *, int);
|
||||
extern int generic_permission(struct inode *, int);
|
||||
extern int __check_sticky(struct inode *dir, struct inode *inode);
|
||||
|
||||
@ -3228,6 +3227,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
|
||||
ki->ki_flags |= IOCB_DSYNC;
|
||||
if (flags & RWF_SYNC)
|
||||
ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
|
||||
if (flags & RWF_APPEND)
|
||||
ki->ki_flags |= IOCB_APPEND;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
extern char *strndup_user(const char __user *, long);
|
||||
extern void *memdup_user(const void __user *, size_t);
|
||||
extern void *vmemdup_user(const void __user *, size_t);
|
||||
extern void *memdup_user_nul(const void __user *, size_t);
|
||||
|
||||
/*
|
||||
|
@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
|
||||
/* per-IO, return -EAGAIN if operation would block */
|
||||
#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
|
||||
|
||||
/* per-IO O_APPEND */
|
||||
#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
|
||||
|
||||
/* mask of flags supported by the kernel */
|
||||
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
|
||||
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
|
||||
RWF_APPEND)
|
||||
|
||||
#endif /* _UAPI_LINUX_FS_H */
|
||||
|
@ -20,7 +20,7 @@ EXPORT_SYMBOL(_copy_from_user);
|
||||
#endif
|
||||
|
||||
#ifndef INLINE_COPY_TO_USER
|
||||
unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
|
||||
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
if (likely(access_ok(VERIFY_WRITE, to, n))) {
|
||||
|
36
mm/util.c
36
mm/util.c
@ -150,18 +150,14 @@ EXPORT_SYMBOL(kmemdup_nul);
|
||||
* @src: source address in user space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Returns an ERR_PTR() on failure.
|
||||
* Returns an ERR_PTR() on failure. Result is physically
|
||||
* contiguous, to be freed by kfree().
|
||||
*/
|
||||
void *memdup_user(const void __user *src, size_t len)
|
||||
{
|
||||
void *p;
|
||||
|
||||
/*
|
||||
* Always use GFP_KERNEL, since copy_from_user() can sleep and
|
||||
* cause pagefault, which makes it pointless to use GFP_NOFS
|
||||
* or GFP_ATOMIC.
|
||||
*/
|
||||
p = kmalloc_track_caller(len, GFP_KERNEL);
|
||||
p = kmalloc_track_caller(len, GFP_USER);
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -174,6 +170,32 @@ void *memdup_user(const void __user *src, size_t len)
|
||||
}
|
||||
EXPORT_SYMBOL(memdup_user);
|
||||
|
||||
/**
|
||||
* vmemdup_user - duplicate memory region from user space
|
||||
*
|
||||
* @src: source address in user space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Returns an ERR_PTR() on failure. Result may be not
|
||||
* physically contiguous. Use kvfree() to free.
|
||||
*/
|
||||
void *vmemdup_user(const void __user *src, size_t len)
|
||||
{
|
||||
void *p;
|
||||
|
||||
p = kvmalloc(len, GFP_USER);
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (copy_from_user(p, src, len)) {
|
||||
kvfree(p);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(vmemdup_user);
|
||||
|
||||
/*
|
||||
* strndup_user - duplicate an existing string from user space
|
||||
* @s: The string to duplicate
|
||||
|
@ -968,13 +968,6 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
|
||||
* This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
|
||||
* from userspace.
|
||||
*
|
||||
* We don't use copy_from_user() for optimization: we first do the
|
||||
* sanity checks (buffer size -fast- and access check-healthy
|
||||
* pointer); if all of those succeed, then we can alloc the memory
|
||||
* (expensive operation) needed to copy the data to kernel. Then we do
|
||||
* the copying without checking the user space area
|
||||
* (__copy_from_user()).
|
||||
*
|
||||
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
|
||||
* it.
|
||||
*
|
||||
@ -1004,25 +997,15 @@ static int sctp_setsockopt_bindx(struct sock *sk,
|
||||
if (unlikely(addrs_size <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check the user passed a healthy pointer. */
|
||||
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Alloc space for the address array in kernel memory. */
|
||||
kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
|
||||
if (unlikely(!kaddrs))
|
||||
return -ENOMEM;
|
||||
|
||||
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
|
||||
kfree(kaddrs);
|
||||
return -EFAULT;
|
||||
}
|
||||
kaddrs = vmemdup_user(addrs, addrs_size);
|
||||
if (unlikely(IS_ERR(kaddrs)))
|
||||
return PTR_ERR(kaddrs);
|
||||
|
||||
/* Walk through the addrs buffer and count the number of addresses. */
|
||||
addr_buf = kaddrs;
|
||||
while (walk_size < addrs_size) {
|
||||
if (walk_size + sizeof(sa_family_t) > addrs_size) {
|
||||
kfree(kaddrs);
|
||||
kvfree(kaddrs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1033,7 +1016,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
|
||||
* causes the address buffer to overflow return EINVAL.
|
||||
*/
|
||||
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
|
||||
kfree(kaddrs);
|
||||
kvfree(kaddrs);
|
||||
return -EINVAL;
|
||||
}
|
||||
addrcnt++;
|
||||
@ -1063,7 +1046,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(kaddrs);
|
||||
kvfree(kaddrs);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1321,13 +1304,6 @@ out_free:
|
||||
* land and invoking either sctp_connectx(). This is used for tunneling
|
||||
* the sctp_connectx() request through sctp_setsockopt() from userspace.
|
||||
*
|
||||
* We don't use copy_from_user() for optimization: we first do the
|
||||
* sanity checks (buffer size -fast- and access check-healthy
|
||||
* pointer); if all of those succeed, then we can alloc the memory
|
||||
* (expensive operation) needed to copy the data to kernel. Then we do
|
||||
* the copying without checking the user space area
|
||||
* (__copy_from_user()).
|
||||
*
|
||||
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
|
||||
* it.
|
||||
*
|
||||
@ -1343,7 +1319,6 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
|
||||
sctp_assoc_t *assoc_id)
|
||||
{
|
||||
struct sockaddr *kaddrs;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
int err = 0;
|
||||
|
||||
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
|
||||
@ -1352,24 +1327,12 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
|
||||
if (unlikely(addrs_size <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check the user passed a healthy pointer. */
|
||||
if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
|
||||
return -EFAULT;
|
||||
kaddrs = vmemdup_user(addrs, addrs_size);
|
||||
if (unlikely(IS_ERR(kaddrs)))
|
||||
return PTR_ERR(kaddrs);
|
||||
|
||||
/* Alloc space for the address array in kernel memory. */
|
||||
if (sk->sk_socket->file)
|
||||
gfp = GFP_USER | __GFP_NOWARN;
|
||||
kaddrs = kmalloc(addrs_size, gfp);
|
||||
if (unlikely(!kaddrs))
|
||||
return -ENOMEM;
|
||||
|
||||
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
|
||||
err = -EFAULT;
|
||||
} else {
|
||||
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
|
||||
}
|
||||
|
||||
kfree(kaddrs);
|
||||
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
|
||||
kvfree(kaddrs);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <sound/core.h>
|
||||
#include <sound/minors.h>
|
||||
@ -1129,7 +1130,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
|
||||
if (size > 1024 * 128) /* sane value */
|
||||
return -EINVAL;
|
||||
|
||||
container = memdup_user(buf, size);
|
||||
container = vmemdup_user(buf, size);
|
||||
if (IS_ERR(container))
|
||||
return PTR_ERR(container);
|
||||
|
||||
@ -1137,7 +1138,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
|
||||
if (!change)
|
||||
change = memcmp(ue->tlv_data, container, size) != 0;
|
||||
if (!change) {
|
||||
kfree(container);
|
||||
kvfree(container);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1148,7 +1149,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
|
||||
mask = SNDRV_CTL_EVENT_MASK_INFO;
|
||||
}
|
||||
|
||||
kfree(ue->tlv_data);
|
||||
kvfree(ue->tlv_data);
|
||||
ue->tlv_data = container;
|
||||
ue->tlv_data_size = size;
|
||||
|
||||
@ -1197,7 +1198,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
|
||||
if (ue->info.value.enumerated.names_length > 64 * 1024)
|
||||
return -EINVAL;
|
||||
|
||||
names = memdup_user((const void __user *)user_ptrval,
|
||||
names = vmemdup_user((const void __user *)user_ptrval,
|
||||
ue->info.value.enumerated.names_length);
|
||||
if (IS_ERR(names))
|
||||
return PTR_ERR(names);
|
||||
@ -1208,7 +1209,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
|
||||
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
|
||||
name_len = strnlen(p, buf_len);
|
||||
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
|
||||
kfree(names);
|
||||
kvfree(names);
|
||||
return -EINVAL;
|
||||
}
|
||||
p += name_len + 1;
|
||||
@ -1225,8 +1226,8 @@ static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
|
||||
{
|
||||
struct user_element *ue = kcontrol->private_data;
|
||||
|
||||
kfree(ue->tlv_data);
|
||||
kfree(ue->priv_data);
|
||||
kvfree(ue->tlv_data);
|
||||
kvfree(ue->priv_data);
|
||||
kfree(ue);
|
||||
}
|
||||
|
||||
|
@ -233,8 +233,6 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
|
||||
/* check whether the dsp was already loaded */
|
||||
if (hw->dsp_loaded & (1 << info.index))
|
||||
return -EBUSY;
|
||||
if (!access_ok(VERIFY_READ, info.image, info.length))
|
||||
return -EFAULT;
|
||||
err = hw->ops.dsp_load(hw, &info);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
@ -378,7 +378,7 @@ out:
|
||||
static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
struct usb_stream_config *cfg;
|
||||
struct usb_stream_config cfg;
|
||||
struct us122l *us122l = hw->private_data;
|
||||
struct usb_stream *s;
|
||||
unsigned min_period_frames;
|
||||
@ -388,24 +388,21 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
|
||||
if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
|
||||
return -ENOTTY;
|
||||
|
||||
cfg = memdup_user((void *)arg, sizeof(*cfg));
|
||||
if (IS_ERR(cfg))
|
||||
return PTR_ERR(cfg);
|
||||
if (copy_from_user(&cfg, (void __user *)arg, sizeof(cfg)))
|
||||
return -EFAULT;
|
||||
|
||||
if (cfg.version != USB_STREAM_INTERFACE_VERSION)
|
||||
return -ENXIO;
|
||||
|
||||
if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
|
||||
err = -ENXIO;
|
||||
goto free;
|
||||
}
|
||||
high_speed = us122l->dev->speed == USB_SPEED_HIGH;
|
||||
if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000 &&
|
||||
if ((cfg.sample_rate != 44100 && cfg.sample_rate != 48000 &&
|
||||
(!high_speed ||
|
||||
(cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) ||
|
||||
cfg->frame_size != 6 ||
|
||||
cfg->period_frames > 0x3000) {
|
||||
err = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
switch (cfg->sample_rate) {
|
||||
(cfg.sample_rate != 88200 && cfg.sample_rate != 96000))) ||
|
||||
cfg.frame_size != 6 ||
|
||||
cfg.period_frames > 0x3000)
|
||||
return -EINVAL;
|
||||
|
||||
switch (cfg.sample_rate) {
|
||||
case 44100:
|
||||
min_period_frames = 48;
|
||||
break;
|
||||
@ -418,10 +415,8 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
|
||||
}
|
||||
if (!high_speed)
|
||||
min_period_frames <<= 1;
|
||||
if (cfg->period_frames < min_period_frames) {
|
||||
err = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
if (cfg.period_frames < min_period_frames)
|
||||
return -EINVAL;
|
||||
|
||||
snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
|
||||
|
||||
@ -430,24 +425,22 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
|
||||
if (!us122l->master)
|
||||
us122l->master = file;
|
||||
else if (us122l->master != file) {
|
||||
if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
|
||||
if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg))) {
|
||||
err = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
us122l->slave = file;
|
||||
}
|
||||
if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
|
||||
if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg)) ||
|
||||
s->state == usb_stream_xrun) {
|
||||
us122l_stop(us122l);
|
||||
if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
|
||||
if (!us122l_start(us122l, cfg.sample_rate, cfg.period_frames))
|
||||
err = -EIO;
|
||||
else
|
||||
err = 1;
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&us122l->mutex);
|
||||
free:
|
||||
kfree(cfg);
|
||||
wake_up_all(&us122l->sk.sleep);
|
||||
return err;
|
||||
}
|
||||
|
@ -198,24 +198,22 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
|
||||
struct snd_hwdep_dsp_image *dsp)
|
||||
{
|
||||
struct usX2Ydev *priv = hw->private_data;
|
||||
int lret, err = -EINVAL;
|
||||
struct usb_device* dev = priv->dev;
|
||||
int lret, err;
|
||||
char *buf;
|
||||
|
||||
snd_printdd( "dsp_load %s\n", dsp->name);
|
||||
|
||||
if (access_ok(VERIFY_READ, dsp->image, dsp->length)) {
|
||||
struct usb_device* dev = priv->dev;
|
||||
char *buf;
|
||||
buf = memdup_user(dsp->image, dsp->length);
|
||||
if (IS_ERR(buf))
|
||||
return PTR_ERR(buf);
|
||||
|
||||
buf = memdup_user(dsp->image, dsp->length);
|
||||
if (IS_ERR(buf))
|
||||
return PTR_ERR(buf);
|
||||
|
||||
err = usb_set_interface(dev, 0, 1);
|
||||
if (err)
|
||||
snd_printk(KERN_ERR "usb_set_interface error \n");
|
||||
else
|
||||
err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
|
||||
kfree(buf);
|
||||
}
|
||||
err = usb_set_interface(dev, 0, 1);
|
||||
if (err)
|
||||
snd_printk(KERN_ERR "usb_set_interface error \n");
|
||||
else
|
||||
err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
|
||||
kfree(buf);
|
||||
if (err)
|
||||
return err;
|
||||
if (dsp->index == 1) {
|
||||
|
Loading…
Reference in New Issue
Block a user