mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
Merge branch 'slab/for-6.12/kmem_cache_args' into slab/for-next
Merge kmem_cache_create() refactoring by Christian Brauner. Note this includes a merge of the vfs.file tree that contains the prerequisity kmem_cache_create_rcu() work.
This commit is contained in:
commit
ecc4d6af97
@ -3451,6 +3451,12 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
|
||||
struct tun_file *tfile = file->private_data;
|
||||
int ret;
|
||||
|
||||
if (on) {
|
||||
ret = file_f_owner_allocate(file);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -2225,6 +2225,12 @@ static int __tty_fasync(int fd, struct file *filp, int on)
|
||||
if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
|
||||
goto out;
|
||||
|
||||
if (on) {
|
||||
retval = file_f_owner_allocate(filp);
|
||||
if (retval)
|
||||
goto out;
|
||||
}
|
||||
|
||||
retval = fasync_helper(fd, filp, on, &tty->fasync);
|
||||
if (retval <= 0)
|
||||
goto out;
|
||||
|
162
fs/fcntl.c
162
fs/fcntl.c
@ -33,6 +33,8 @@
|
||||
#include <asm/siginfo.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
|
||||
|
||||
static int setfl(int fd, struct file * filp, unsigned int arg)
|
||||
@ -87,22 +89,64 @@ static int setfl(int fd, struct file * filp, unsigned int arg)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an file->f_owner struct if it doesn't exist, handling racing
|
||||
* allocations correctly.
|
||||
*/
|
||||
int file_f_owner_allocate(struct file *file)
|
||||
{
|
||||
struct fown_struct *f_owner;
|
||||
|
||||
f_owner = file_f_owner(file);
|
||||
if (f_owner)
|
||||
return 0;
|
||||
|
||||
f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL);
|
||||
if (!f_owner)
|
||||
return -ENOMEM;
|
||||
|
||||
rwlock_init(&f_owner->lock);
|
||||
f_owner->file = file;
|
||||
/* If someone else raced us, drop our allocation. */
|
||||
if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner)))
|
||||
kfree(f_owner);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(file_f_owner_allocate);
|
||||
|
||||
void file_f_owner_release(struct file *file)
|
||||
{
|
||||
struct fown_struct *f_owner;
|
||||
|
||||
f_owner = file_f_owner(file);
|
||||
if (f_owner) {
|
||||
put_pid(f_owner->pid);
|
||||
kfree(f_owner);
|
||||
}
|
||||
}
|
||||
|
||||
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
|
||||
int force)
|
||||
{
|
||||
write_lock_irq(&filp->f_owner.lock);
|
||||
if (force || !filp->f_owner.pid) {
|
||||
put_pid(filp->f_owner.pid);
|
||||
filp->f_owner.pid = get_pid(pid);
|
||||
filp->f_owner.pid_type = type;
|
||||
struct fown_struct *f_owner;
|
||||
|
||||
f_owner = file_f_owner(filp);
|
||||
if (WARN_ON_ONCE(!f_owner))
|
||||
return;
|
||||
|
||||
write_lock_irq(&f_owner->lock);
|
||||
if (force || !f_owner->pid) {
|
||||
put_pid(f_owner->pid);
|
||||
f_owner->pid = get_pid(pid);
|
||||
f_owner->pid_type = type;
|
||||
|
||||
if (pid) {
|
||||
const struct cred *cred = current_cred();
|
||||
filp->f_owner.uid = cred->uid;
|
||||
filp->f_owner.euid = cred->euid;
|
||||
f_owner->uid = cred->uid;
|
||||
f_owner->euid = cred->euid;
|
||||
}
|
||||
}
|
||||
write_unlock_irq(&filp->f_owner.lock);
|
||||
write_unlock_irq(&f_owner->lock);
|
||||
}
|
||||
|
||||
void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
|
||||
@ -119,6 +163,8 @@ int f_setown(struct file *filp, int who, int force)
|
||||
struct pid *pid = NULL;
|
||||
int ret = 0;
|
||||
|
||||
might_sleep();
|
||||
|
||||
type = PIDTYPE_TGID;
|
||||
if (who < 0) {
|
||||
/* avoid overflow below */
|
||||
@ -129,6 +175,10 @@ int f_setown(struct file *filp, int who, int force)
|
||||
who = -who;
|
||||
}
|
||||
|
||||
ret = file_f_owner_allocate(filp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rcu_read_lock();
|
||||
if (who) {
|
||||
pid = find_vpid(who);
|
||||
@ -152,16 +202,21 @@ void f_delown(struct file *filp)
|
||||
pid_t f_getown(struct file *filp)
|
||||
{
|
||||
pid_t pid = 0;
|
||||
struct fown_struct *f_owner;
|
||||
|
||||
read_lock_irq(&filp->f_owner.lock);
|
||||
f_owner = file_f_owner(filp);
|
||||
if (!f_owner)
|
||||
return pid;
|
||||
|
||||
read_lock_irq(&f_owner->lock);
|
||||
rcu_read_lock();
|
||||
if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
|
||||
pid = pid_vnr(filp->f_owner.pid);
|
||||
if (filp->f_owner.pid_type == PIDTYPE_PGID)
|
||||
if (pid_task(f_owner->pid, f_owner->pid_type)) {
|
||||
pid = pid_vnr(f_owner->pid);
|
||||
if (f_owner->pid_type == PIDTYPE_PGID)
|
||||
pid = -pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
read_unlock_irq(&filp->f_owner.lock);
|
||||
read_unlock_irq(&f_owner->lock);
|
||||
return pid;
|
||||
}
|
||||
|
||||
@ -194,6 +249,10 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = file_f_owner_allocate(filp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rcu_read_lock();
|
||||
pid = find_vpid(owner.pid);
|
||||
if (owner.pid && !pid)
|
||||
@ -210,13 +269,20 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
|
||||
struct f_owner_ex __user *owner_p = (void __user *)arg;
|
||||
struct f_owner_ex owner = {};
|
||||
int ret = 0;
|
||||
struct fown_struct *f_owner;
|
||||
enum pid_type pid_type = PIDTYPE_PID;
|
||||
|
||||
read_lock_irq(&filp->f_owner.lock);
|
||||
f_owner = file_f_owner(filp);
|
||||
if (f_owner) {
|
||||
read_lock_irq(&f_owner->lock);
|
||||
rcu_read_lock();
|
||||
if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
|
||||
owner.pid = pid_vnr(filp->f_owner.pid);
|
||||
if (pid_task(f_owner->pid, f_owner->pid_type))
|
||||
owner.pid = pid_vnr(f_owner->pid);
|
||||
rcu_read_unlock();
|
||||
switch (filp->f_owner.pid_type) {
|
||||
pid_type = f_owner->pid_type;
|
||||
}
|
||||
|
||||
switch (pid_type) {
|
||||
case PIDTYPE_PID:
|
||||
owner.type = F_OWNER_TID;
|
||||
break;
|
||||
@ -234,7 +300,8 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
read_unlock_irq(&filp->f_owner.lock);
|
||||
if (f_owner)
|
||||
read_unlock_irq(&f_owner->lock);
|
||||
|
||||
if (!ret) {
|
||||
ret = copy_to_user(owner_p, &owner, sizeof(owner));
|
||||
@ -248,14 +315,18 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
|
||||
static int f_getowner_uids(struct file *filp, unsigned long arg)
|
||||
{
|
||||
struct user_namespace *user_ns = current_user_ns();
|
||||
struct fown_struct *f_owner;
|
||||
uid_t __user *dst = (void __user *)arg;
|
||||
uid_t src[2];
|
||||
uid_t src[2] = {0, 0};
|
||||
int err;
|
||||
|
||||
read_lock_irq(&filp->f_owner.lock);
|
||||
src[0] = from_kuid(user_ns, filp->f_owner.uid);
|
||||
src[1] = from_kuid(user_ns, filp->f_owner.euid);
|
||||
read_unlock_irq(&filp->f_owner.lock);
|
||||
f_owner = file_f_owner(filp);
|
||||
if (f_owner) {
|
||||
read_lock_irq(&f_owner->lock);
|
||||
src[0] = from_kuid(user_ns, f_owner->uid);
|
||||
src[1] = from_kuid(user_ns, f_owner->euid);
|
||||
read_unlock_irq(&f_owner->lock);
|
||||
}
|
||||
|
||||
err = put_user(src[0], &dst[0]);
|
||||
err |= put_user(src[1], &dst[1]);
|
||||
@ -343,6 +414,30 @@ static long f_dupfd_query(int fd, struct file *filp)
|
||||
return f.file == filp;
|
||||
}
|
||||
|
||||
static int f_owner_sig(struct file *filp, int signum, bool setsig)
|
||||
{
|
||||
int ret = 0;
|
||||
struct fown_struct *f_owner;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (setsig) {
|
||||
if (!valid_signal(signum))
|
||||
return -EINVAL;
|
||||
|
||||
ret = file_f_owner_allocate(filp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
f_owner = file_f_owner(filp);
|
||||
if (setsig)
|
||||
f_owner->signum = signum;
|
||||
else if (f_owner)
|
||||
ret = f_owner->signum;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
|
||||
struct file *filp)
|
||||
{
|
||||
@ -421,15 +516,10 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
|
||||
err = f_getowner_uids(filp, arg);
|
||||
break;
|
||||
case F_GETSIG:
|
||||
err = filp->f_owner.signum;
|
||||
err = f_owner_sig(filp, 0, false);
|
||||
break;
|
||||
case F_SETSIG:
|
||||
/* arg == 0 restores default behaviour. */
|
||||
if (!valid_signal(argi)) {
|
||||
break;
|
||||
}
|
||||
err = 0;
|
||||
filp->f_owner.signum = argi;
|
||||
err = f_owner_sig(filp, argi, true);
|
||||
break;
|
||||
case F_GETLEASE:
|
||||
err = fcntl_getlease(filp);
|
||||
@ -844,14 +934,19 @@ static void send_sigurg_to_task(struct task_struct *p,
|
||||
do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
|
||||
}
|
||||
|
||||
int send_sigurg(struct fown_struct *fown)
|
||||
int send_sigurg(struct file *file)
|
||||
{
|
||||
struct fown_struct *fown;
|
||||
struct task_struct *p;
|
||||
enum pid_type type;
|
||||
struct pid *pid;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
fown = file_f_owner(file);
|
||||
if (!fown)
|
||||
return 0;
|
||||
|
||||
read_lock_irqsave(&fown->lock, flags);
|
||||
|
||||
type = fown->pid_type;
|
||||
@ -1027,13 +1122,16 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
|
||||
}
|
||||
read_lock_irqsave(&fa->fa_lock, flags);
|
||||
if (fa->fa_file) {
|
||||
fown = &fa->fa_file->f_owner;
|
||||
fown = file_f_owner(fa->fa_file);
|
||||
if (!fown)
|
||||
goto next;
|
||||
/* Don't send SIGURG to processes which have not set a
|
||||
queued signum: SIGURG has its own default signalling
|
||||
mechanism. */
|
||||
if (!(sig == SIGURG && fown->signum == 0))
|
||||
send_sigio(fown, fa->fa_fd, band);
|
||||
}
|
||||
next:
|
||||
read_unlock_irqrestore(&fa->fa_lock, flags);
|
||||
fa = rcu_dereference(fa->fa_next);
|
||||
}
|
||||
|
@ -155,7 +155,6 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
|
||||
return error;
|
||||
}
|
||||
|
||||
rwlock_init(&f->f_owner.lock);
|
||||
spin_lock_init(&f->f_lock);
|
||||
mutex_init(&f->f_pos_lock);
|
||||
f->f_flags = flags;
|
||||
@ -425,7 +424,7 @@ static void __fput(struct file *file)
|
||||
cdev_put(inode->i_cdev);
|
||||
}
|
||||
fops_put(file->f_op);
|
||||
put_pid(file->f_owner.pid);
|
||||
file_f_owner_release(file);
|
||||
put_file_access(file);
|
||||
dput(dentry);
|
||||
if (unlikely(mode & FMODE_NEED_UNMOUNT))
|
||||
@ -512,9 +511,14 @@ EXPORT_SYMBOL(__fput_sync);
|
||||
|
||||
void __init files_init(void)
|
||||
{
|
||||
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
|
||||
SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN |
|
||||
SLAB_PANIC | SLAB_ACCOUNT, NULL);
|
||||
struct kmem_cache_args args = {
|
||||
.use_freeptr_offset = true,
|
||||
.freeptr_offset = offsetof(struct file, f_freeptr),
|
||||
};
|
||||
|
||||
filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
|
||||
SLAB_HWCACHE_ALIGN | SLAB_PANIC |
|
||||
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
|
||||
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -337,3 +337,4 @@ static inline bool path_mounted(const struct path *path)
|
||||
{
|
||||
return path->mnt->mnt_root == path->dentry;
|
||||
}
|
||||
void file_f_owner_release(struct file *file);
|
||||
|
@ -1451,7 +1451,7 @@ int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose)
|
||||
struct file *filp = fl->c.flc_file;
|
||||
|
||||
f_delown(filp);
|
||||
filp->f_owner.signum = 0;
|
||||
file_f_owner(filp)->signum = 0;
|
||||
fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync);
|
||||
if (fl->fl_fasync != NULL) {
|
||||
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
|
||||
@ -1783,6 +1783,10 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
|
||||
lease = *flp;
|
||||
trace_generic_add_lease(inode, lease);
|
||||
|
||||
error = file_f_owner_allocate(filp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Note that arg is never F_UNLCK here */
|
||||
ctx = locks_get_lock_context(inode, arg);
|
||||
if (!ctx)
|
||||
|
@ -110,7 +110,7 @@ static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask,
|
||||
prev = &dn->dn_next;
|
||||
continue;
|
||||
}
|
||||
fown = &dn->dn_filp->f_owner;
|
||||
fown = file_f_owner(dn->dn_filp);
|
||||
send_sigio(fown, dn->dn_fd, POLL_MSG);
|
||||
if (dn->dn_mask & FS_DN_MULTISHOT)
|
||||
prev = &dn->dn_next;
|
||||
@ -316,6 +316,10 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
error = file_f_owner_allocate(filp);
|
||||
if (error)
|
||||
goto out_err;
|
||||
|
||||
/* set up the new_fsn_mark and new_dn_mark */
|
||||
new_fsn_mark = &new_dn_mark->fsn_mark;
|
||||
fsnotify_init_mark(new_fsn_mark, dnotify_group);
|
||||
|
@ -947,6 +947,7 @@ static inline unsigned imajor(const struct inode *inode)
|
||||
}
|
||||
|
||||
struct fown_struct {
|
||||
struct file *file; /* backpointer for security modules */
|
||||
rwlock_t lock; /* protects pid, uid, euid fields */
|
||||
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
|
||||
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
|
||||
@ -986,52 +987,65 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
|
||||
index < ra->start + ra->size);
|
||||
}
|
||||
|
||||
/*
|
||||
* f_{lock,count,pos_lock} members can be highly contended and share
|
||||
* the same cacheline. f_{lock,mode} are very frequently used together
|
||||
* and so share the same cacheline as well. The read-mostly
|
||||
* f_{path,inode,op} are kept on a separate cacheline.
|
||||
/**
|
||||
* struct file - Represents a file
|
||||
* @f_count: reference count
|
||||
* @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
|
||||
* @f_mode: FMODE_* flags often used in hotpaths
|
||||
* @f_op: file operations
|
||||
* @f_mapping: Contents of a cacheable, mappable object.
|
||||
* @private_data: filesystem or driver specific data
|
||||
* @f_inode: cached inode
|
||||
* @f_flags: file flags
|
||||
* @f_iocb_flags: iocb flags
|
||||
* @f_cred: stashed credentials of creator/opener
|
||||
* @f_path: path of the file
|
||||
* @f_pos_lock: lock protecting file position
|
||||
* @f_pos: file position
|
||||
* @f_version: file version
|
||||
* @f_security: LSM security context of this file
|
||||
* @f_owner: file owner
|
||||
* @f_wb_err: writeback error
|
||||
* @f_sb_err: per sb writeback errors
|
||||
* @f_ep: link of all epoll hooks for this file
|
||||
* @f_task_work: task work entry point
|
||||
* @f_llist: work queue entrypoint
|
||||
* @f_ra: file's readahead state
|
||||
* @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
|
||||
*/
|
||||
struct file {
|
||||
union {
|
||||
/* fput() uses task work when closing and freeing file (default). */
|
||||
struct callback_head f_task_work;
|
||||
/* fput() must use workqueue (most kernel threads). */
|
||||
struct llist_node f_llist;
|
||||
unsigned int f_iocb_flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Protects f_ep, f_flags.
|
||||
* Must not be taken from IRQ context.
|
||||
*/
|
||||
atomic_long_t f_count;
|
||||
spinlock_t f_lock;
|
||||
fmode_t f_mode;
|
||||
atomic_long_t f_count;
|
||||
const struct file_operations *f_op;
|
||||
struct address_space *f_mapping;
|
||||
void *private_data;
|
||||
struct inode *f_inode;
|
||||
unsigned int f_flags;
|
||||
unsigned int f_iocb_flags;
|
||||
const struct cred *f_cred;
|
||||
/* --- cacheline 1 boundary (64 bytes) --- */
|
||||
struct path f_path;
|
||||
struct mutex f_pos_lock;
|
||||
loff_t f_pos;
|
||||
unsigned int f_flags;
|
||||
struct fown_struct f_owner;
|
||||
const struct cred *f_cred;
|
||||
struct file_ra_state f_ra;
|
||||
struct path f_path;
|
||||
struct inode *f_inode; /* cached value */
|
||||
const struct file_operations *f_op;
|
||||
|
||||
u64 f_version;
|
||||
/* --- cacheline 2 boundary (128 bytes) --- */
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *f_security;
|
||||
#endif
|
||||
/* needed for tty driver, and maybe others */
|
||||
void *private_data;
|
||||
|
||||
#ifdef CONFIG_EPOLL
|
||||
/* Used by fs/eventpoll.c to link all the hooks to this file */
|
||||
struct hlist_head *f_ep;
|
||||
#endif /* #ifdef CONFIG_EPOLL */
|
||||
struct address_space *f_mapping;
|
||||
struct fown_struct *f_owner;
|
||||
errseq_t f_wb_err;
|
||||
errseq_t f_sb_err; /* for syncfs */
|
||||
errseq_t f_sb_err;
|
||||
#ifdef CONFIG_EPOLL
|
||||
struct hlist_head *f_ep;
|
||||
#endif
|
||||
union {
|
||||
struct callback_head f_task_work;
|
||||
struct llist_node f_llist;
|
||||
struct file_ra_state f_ra;
|
||||
freeptr_t f_freeptr;
|
||||
};
|
||||
/* --- cacheline 3 boundary (192 bytes) --- */
|
||||
} __randomize_layout
|
||||
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
|
||||
|
||||
@ -1076,6 +1090,12 @@ struct file_lease;
|
||||
#define OFFT_OFFSET_MAX type_max(off_t)
|
||||
#endif
|
||||
|
||||
int file_f_owner_allocate(struct file *file);
|
||||
static inline struct fown_struct *file_f_owner(const struct file *file)
|
||||
{
|
||||
return READ_ONCE(file->f_owner);
|
||||
}
|
||||
|
||||
extern void send_sigio(struct fown_struct *fown, int fd, int band);
|
||||
|
||||
static inline struct inode *file_inode(const struct file *f)
|
||||
@ -1124,7 +1144,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force
|
||||
extern int f_setown(struct file *filp, int who, int force);
|
||||
extern void f_delown(struct file *filp);
|
||||
extern pid_t f_getown(struct file *filp);
|
||||
extern int send_sigurg(struct fown_struct *fown);
|
||||
extern int send_sigurg(struct file *file);
|
||||
|
||||
/*
|
||||
* sb->s_flags. Note that these mirror the equivalent MS_* flags where
|
||||
|
@ -212,6 +212,12 @@ enum _slab_flag_bits {
|
||||
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
|
||||
#endif
|
||||
|
||||
/*
|
||||
* freeptr_t represents a SLUB freelist pointer, which might be encoded
|
||||
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
|
||||
*/
|
||||
typedef struct { unsigned long v; } freeptr_t;
|
||||
|
||||
/*
|
||||
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
||||
*
|
||||
@ -234,14 +240,173 @@ struct mem_cgroup;
|
||||
*/
|
||||
bool slab_is_available(void);
|
||||
|
||||
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
|
||||
/**
|
||||
* struct kmem_cache_args - Less common arguments for kmem_cache_create()
|
||||
*
|
||||
* Any uninitialized fields of the structure are interpreted as unused. The
|
||||
* exception is @freeptr_offset where %0 is a valid value, so
|
||||
* @use_freeptr_offset must be also set to %true in order to interpret the field
|
||||
* as used. For @useroffset %0 is also valid, but only with non-%0
|
||||
* @usersize.
|
||||
*
|
||||
* When %NULL args is passed to kmem_cache_create(), it is equivalent to all
|
||||
* fields unused.
|
||||
*/
|
||||
struct kmem_cache_args {
|
||||
/**
|
||||
* @align: The required alignment for the objects.
|
||||
*
|
||||
* %0 means no specific alignment is requested.
|
||||
*/
|
||||
unsigned int align;
|
||||
/**
|
||||
* @useroffset: Usercopy region offset.
|
||||
*
|
||||
* %0 is a valid offset, when @usersize is non-%0
|
||||
*/
|
||||
unsigned int useroffset;
|
||||
/**
|
||||
* @usersize: Usercopy region size.
|
||||
*
|
||||
* %0 means no usercopy region is specified.
|
||||
*/
|
||||
unsigned int usersize;
|
||||
/**
|
||||
* @freeptr_offset: Custom offset for the free pointer
|
||||
* in &SLAB_TYPESAFE_BY_RCU caches
|
||||
*
|
||||
* By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
|
||||
* outside of the object. This might cause the object to grow in size.
|
||||
* Cache creators that have a reason to avoid this can specify a custom
|
||||
* free pointer offset in their struct where the free pointer will be
|
||||
* placed.
|
||||
*
|
||||
* Note that placing the free pointer inside the object requires the
|
||||
* caller to ensure that no fields are invalidated that are required to
|
||||
* guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
|
||||
* details).
|
||||
*
|
||||
* Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
|
||||
* is specified, %use_freeptr_offset must be set %true.
|
||||
*
|
||||
* Note that @ctor currently isn't supported with custom free pointers
|
||||
* as a @ctor requires an external free pointer.
|
||||
*/
|
||||
unsigned int freeptr_offset;
|
||||
/**
|
||||
* @use_freeptr_offset: Whether a @freeptr_offset is used.
|
||||
*/
|
||||
bool use_freeptr_offset;
|
||||
/**
|
||||
* @ctor: A constructor for the objects.
|
||||
*
|
||||
* The constructor is invoked for each object in a newly allocated slab
|
||||
* page. It is the cache user's responsibility to free object in the
|
||||
* same state as after calling the constructor, or deal appropriately
|
||||
* with any differences between a freshly constructed and a reallocated
|
||||
* object.
|
||||
*
|
||||
* %NULL means no constructor.
|
||||
*/
|
||||
void (*ctor)(void *);
|
||||
};
|
||||
|
||||
struct kmem_cache *__kmem_cache_create_args(const char *name,
|
||||
unsigned int object_size,
|
||||
struct kmem_cache_args *args,
|
||||
slab_flags_t flags);
|
||||
static inline struct kmem_cache *
|
||||
__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
|
||||
slab_flags_t flags, void (*ctor)(void *))
|
||||
{
|
||||
struct kmem_cache_args kmem_args = {
|
||||
.align = align,
|
||||
.ctor = ctor,
|
||||
};
|
||||
|
||||
return __kmem_cache_create_args(name, size, &kmem_args, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* kmem_cache_create_usercopy - Create a kmem cache with a region suitable
|
||||
* for copying to userspace.
|
||||
* @name: A string which is used in /proc/slabinfo to identify this cache.
|
||||
* @size: The size of objects to be created in this cache.
|
||||
* @align: The required alignment for the objects.
|
||||
* @flags: SLAB flags
|
||||
* @useroffset: Usercopy region offset
|
||||
* @usersize: Usercopy region size
|
||||
* @ctor: A constructor for the objects, or %NULL.
|
||||
*
|
||||
* This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
|
||||
* if whitelisting a single field is sufficient, or kmem_cache_create() with
|
||||
* the necessary parameters passed via the args parameter (see
|
||||
* &struct kmem_cache_args)
|
||||
*
|
||||
* Return: a pointer to the cache on success, NULL on failure.
|
||||
*/
|
||||
static inline struct kmem_cache *
|
||||
kmem_cache_create_usercopy(const char *name, unsigned int size,
|
||||
unsigned int align, slab_flags_t flags,
|
||||
void (*ctor)(void *));
|
||||
struct kmem_cache *kmem_cache_create_usercopy(const char *name,
|
||||
unsigned int size, unsigned int align,
|
||||
slab_flags_t flags,
|
||||
unsigned int useroffset, unsigned int usersize,
|
||||
void (*ctor)(void *));
|
||||
void (*ctor)(void *))
|
||||
{
|
||||
struct kmem_cache_args kmem_args = {
|
||||
.align = align,
|
||||
.ctor = ctor,
|
||||
.useroffset = useroffset,
|
||||
.usersize = usersize,
|
||||
};
|
||||
|
||||
return __kmem_cache_create_args(name, size, &kmem_args, flags);
|
||||
}
|
||||
|
||||
/* If NULL is passed for @args, use this variant with default arguments. */
|
||||
static inline struct kmem_cache *
|
||||
__kmem_cache_default_args(const char *name, unsigned int size,
|
||||
struct kmem_cache_args *args,
|
||||
slab_flags_t flags)
|
||||
{
|
||||
struct kmem_cache_args kmem_default_args = {};
|
||||
|
||||
/* Make sure we don't get passed garbage. */
|
||||
if (WARN_ON_ONCE(args))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* kmem_cache_create - Create a kmem cache.
|
||||
* @__name: A string which is used in /proc/slabinfo to identify this cache.
|
||||
* @__object_size: The size of objects to be created in this cache.
|
||||
* @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
|
||||
* means defaults will be used for all the arguments.
|
||||
*
|
||||
* This is currently implemented as a macro using ``_Generic()`` to call
|
||||
* either the new variant of the function, or a legacy one.
|
||||
*
|
||||
* The new variant has 4 parameters:
|
||||
* ``kmem_cache_create(name, object_size, args, flags)``
|
||||
*
|
||||
* See __kmem_cache_create_args() which implements this.
|
||||
*
|
||||
* The legacy variant has 5 parameters:
|
||||
* ``kmem_cache_create(name, object_size, align, flags, ctor)``
|
||||
*
|
||||
* The align and ctor parameters map to the respective fields of
|
||||
* &struct kmem_cache_args
|
||||
*
|
||||
* Context: Cannot be called within a interrupt, but can be interrupted.
|
||||
*
|
||||
* Return: a pointer to the cache on success, NULL on failure.
|
||||
*/
|
||||
#define kmem_cache_create(__name, __object_size, __args, ...) \
|
||||
_Generic((__args), \
|
||||
struct kmem_cache_args *: __kmem_cache_create_args, \
|
||||
void *: __kmem_cache_default_args, \
|
||||
default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
|
||||
|
||||
void kmem_cache_destroy(struct kmem_cache *s);
|
||||
int kmem_cache_shrink(struct kmem_cache *s);
|
||||
|
||||
@ -254,19 +419,22 @@ int kmem_cache_shrink(struct kmem_cache *s);
|
||||
* then the objects will be properly aligned in SMP configurations.
|
||||
*/
|
||||
#define KMEM_CACHE(__struct, __flags) \
|
||||
kmem_cache_create(#__struct, sizeof(struct __struct), \
|
||||
__alignof__(struct __struct), (__flags), NULL)
|
||||
__kmem_cache_create_args(#__struct, sizeof(struct __struct), \
|
||||
&(struct kmem_cache_args) { \
|
||||
.align = __alignof__(struct __struct), \
|
||||
}, (__flags))
|
||||
|
||||
/*
|
||||
* To whitelist a single field for copying to/from usercopy, use this
|
||||
* macro instead for KMEM_CACHE() above.
|
||||
*/
|
||||
#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
|
||||
kmem_cache_create_usercopy(#__struct, \
|
||||
sizeof(struct __struct), \
|
||||
__alignof__(struct __struct), (__flags), \
|
||||
offsetof(struct __struct, __field), \
|
||||
sizeof_field(struct __struct, __field), NULL)
|
||||
__kmem_cache_create_args(#__struct, sizeof(struct __struct), \
|
||||
&(struct kmem_cache_args) { \
|
||||
.align = __alignof__(struct __struct), \
|
||||
.useroffset = offsetof(struct __struct, __field), \
|
||||
.usersize = sizeof_field(struct __struct, __field), \
|
||||
}, (__flags))
|
||||
|
||||
/*
|
||||
* Common kmalloc functions provided by all allocators
|
||||
|
@ -3638,6 +3638,11 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
|
||||
|
||||
static int __init io_uring_init(void)
|
||||
{
|
||||
struct kmem_cache_args kmem_args = {
|
||||
.useroffset = offsetof(struct io_kiocb, cmd.data),
|
||||
.usersize = sizeof_field(struct io_kiocb, cmd.data),
|
||||
};
|
||||
|
||||
#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
|
||||
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
|
||||
BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
|
||||
@ -3722,12 +3727,9 @@ static int __init io_uring_init(void)
|
||||
* range, and HARDENED_USERCOPY will complain if we haven't
|
||||
* correctly annotated this range.
|
||||
*/
|
||||
req_cachep = kmem_cache_create_usercopy("io_kiocb",
|
||||
sizeof(struct io_kiocb), 0,
|
||||
SLAB_HWCACHE_ALIGN | SLAB_PANIC |
|
||||
SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU,
|
||||
offsetof(struct io_kiocb, cmd.data),
|
||||
sizeof_field(struct io_kiocb, cmd.data), NULL);
|
||||
req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
|
||||
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
|
||||
SLAB_TYPESAFE_BY_RCU);
|
||||
io_buf_cachep = KMEM_CACHE(io_buffer,
|
||||
SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
|
||||
|
||||
|
@ -422,7 +422,9 @@ kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
|
||||
gfp_t kmalloc_fix_flags(gfp_t flags);
|
||||
|
||||
/* Functions provided by the slab allocators */
|
||||
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
|
||||
int do_kmem_cache_create(struct kmem_cache *s, const char *name,
|
||||
unsigned int size, struct kmem_cache_args *args,
|
||||
slab_flags_t flags);
|
||||
|
||||
void __init kmem_cache_init(void);
|
||||
extern void create_boot_cache(struct kmem_cache *, const char *name,
|
||||
|
137
mm/slab_common.c
137
mm/slab_common.c
@ -215,32 +215,29 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
|
||||
}
|
||||
|
||||
static struct kmem_cache *create_cache(const char *name,
|
||||
unsigned int object_size, unsigned int align,
|
||||
slab_flags_t flags, unsigned int useroffset,
|
||||
unsigned int usersize, void (*ctor)(void *),
|
||||
struct kmem_cache *root_cache)
|
||||
unsigned int object_size,
|
||||
struct kmem_cache_args *args,
|
||||
slab_flags_t flags)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(useroffset + usersize > object_size))
|
||||
useroffset = usersize = 0;
|
||||
if (WARN_ON(args->useroffset + args->usersize > object_size))
|
||||
args->useroffset = args->usersize = 0;
|
||||
|
||||
/* If a custom freelist pointer is requested make sure it's sane. */
|
||||
err = -EINVAL;
|
||||
if (args->use_freeptr_offset &&
|
||||
(args->freeptr_offset >= object_size ||
|
||||
!(flags & SLAB_TYPESAFE_BY_RCU) ||
|
||||
!IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t))))
|
||||
goto out;
|
||||
|
||||
err = -ENOMEM;
|
||||
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
|
||||
if (!s)
|
||||
goto out;
|
||||
|
||||
s->name = name;
|
||||
s->size = s->object_size = object_size;
|
||||
s->align = align;
|
||||
s->ctor = ctor;
|
||||
#ifdef CONFIG_HARDENED_USERCOPY
|
||||
s->useroffset = useroffset;
|
||||
s->usersize = usersize;
|
||||
#endif
|
||||
|
||||
err = __kmem_cache_create(s, flags);
|
||||
err = do_kmem_cache_create(s, name, object_size, args, flags);
|
||||
if (err)
|
||||
goto out_free_cache;
|
||||
|
||||
@ -255,39 +252,24 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* kmem_cache_create_usercopy - Create a cache with a region suitable
|
||||
* for copying to userspace
|
||||
* __kmem_cache_create_args - Create a kmem cache.
|
||||
* @name: A string which is used in /proc/slabinfo to identify this cache.
|
||||
* @size: The size of objects to be created in this cache.
|
||||
* @align: The required alignment for the objects.
|
||||
* @flags: SLAB flags
|
||||
* @useroffset: Usercopy region offset
|
||||
* @usersize: Usercopy region size
|
||||
* @ctor: A constructor for the objects.
|
||||
* @object_size: The size of objects to be created in this cache.
|
||||
* @args: Additional arguments for the cache creation (see
|
||||
* &struct kmem_cache_args).
|
||||
* @flags: See %SLAB_* flags for an explanation of individual @flags.
|
||||
*
|
||||
* Cannot be called within a interrupt, but can be interrupted.
|
||||
* The @ctor is run when new pages are allocated by the cache.
|
||||
* Not to be called directly, use the kmem_cache_create() wrapper with the same
|
||||
* parameters.
|
||||
*
|
||||
* The flags are
|
||||
*
|
||||
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
||||
* to catch references to uninitialised memory.
|
||||
*
|
||||
* %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
|
||||
* for buffer overruns.
|
||||
*
|
||||
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
|
||||
* cacheline. This can be beneficial if you're counting cycles as closely
|
||||
* as davem.
|
||||
* Context: Cannot be called within a interrupt, but can be interrupted.
|
||||
*
|
||||
* Return: a pointer to the cache on success, NULL on failure.
|
||||
*/
|
||||
struct kmem_cache *
|
||||
kmem_cache_create_usercopy(const char *name,
|
||||
unsigned int size, unsigned int align,
|
||||
slab_flags_t flags,
|
||||
unsigned int useroffset, unsigned int usersize,
|
||||
void (*ctor)(void *))
|
||||
struct kmem_cache *__kmem_cache_create_args(const char *name,
|
||||
unsigned int object_size,
|
||||
struct kmem_cache_args *args,
|
||||
slab_flags_t flags)
|
||||
{
|
||||
struct kmem_cache *s = NULL;
|
||||
const char *cache_name;
|
||||
@ -309,7 +291,7 @@ kmem_cache_create_usercopy(const char *name,
|
||||
|
||||
mutex_lock(&slab_mutex);
|
||||
|
||||
err = kmem_cache_sanity_check(name, size);
|
||||
err = kmem_cache_sanity_check(name, object_size);
|
||||
if (err) {
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -330,12 +312,14 @@ kmem_cache_create_usercopy(const char *name,
|
||||
|
||||
/* Fail closed on bad usersize of useroffset values. */
|
||||
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
|
||||
WARN_ON(!usersize && useroffset) ||
|
||||
WARN_ON(size < usersize || size - usersize < useroffset))
|
||||
usersize = useroffset = 0;
|
||||
WARN_ON(!args->usersize && args->useroffset) ||
|
||||
WARN_ON(object_size < args->usersize ||
|
||||
object_size - args->usersize < args->useroffset))
|
||||
args->usersize = args->useroffset = 0;
|
||||
|
||||
if (!usersize)
|
||||
s = __kmem_cache_alias(name, size, align, flags, ctor);
|
||||
if (!args->usersize)
|
||||
s = __kmem_cache_alias(name, object_size, args->align, flags,
|
||||
args->ctor);
|
||||
if (s)
|
||||
goto out_unlock;
|
||||
|
||||
@ -345,9 +329,8 @@ kmem_cache_create_usercopy(const char *name,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
s = create_cache(cache_name, size,
|
||||
calculate_alignment(flags, align, size),
|
||||
flags, useroffset, usersize, ctor, NULL);
|
||||
args->align = calculate_alignment(flags, args->align, object_size);
|
||||
s = create_cache(cache_name, object_size, args, flags);
|
||||
if (IS_ERR(s)) {
|
||||
err = PTR_ERR(s);
|
||||
kfree_const(cache_name);
|
||||
@ -369,41 +352,7 @@ out_unlock:
|
||||
}
|
||||
return s;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_create_usercopy);
|
||||
|
||||
/**
|
||||
* kmem_cache_create - Create a cache.
|
||||
* @name: A string which is used in /proc/slabinfo to identify this cache.
|
||||
* @size: The size of objects to be created in this cache.
|
||||
* @align: The required alignment for the objects.
|
||||
* @flags: SLAB flags
|
||||
* @ctor: A constructor for the objects.
|
||||
*
|
||||
* Cannot be called within a interrupt, but can be interrupted.
|
||||
* The @ctor is run when new pages are allocated by the cache.
|
||||
*
|
||||
* The flags are
|
||||
*
|
||||
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
||||
* to catch references to uninitialised memory.
|
||||
*
|
||||
* %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
|
||||
* for buffer overruns.
|
||||
*
|
||||
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
|
||||
* cacheline. This can be beneficial if you're counting cycles as closely
|
||||
* as davem.
|
||||
*
|
||||
* Return: a pointer to the cache on success, NULL on failure.
|
||||
*/
|
||||
struct kmem_cache *
|
||||
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
|
||||
slab_flags_t flags, void (*ctor)(void *))
|
||||
{
|
||||
return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
|
||||
ctor);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_create);
|
||||
EXPORT_SYMBOL(__kmem_cache_create_args);
|
||||
|
||||
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
|
||||
|
||||
@ -689,9 +638,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
|
||||
{
|
||||
int err;
|
||||
unsigned int align = ARCH_KMALLOC_MINALIGN;
|
||||
|
||||
s->name = name;
|
||||
s->size = s->object_size = size;
|
||||
struct kmem_cache_args kmem_args = {};
|
||||
|
||||
/*
|
||||
* kmalloc caches guarantee alignment of at least the largest
|
||||
@ -700,14 +647,14 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
|
||||
*/
|
||||
if (flags & SLAB_KMALLOC)
|
||||
align = max(align, 1U << (ffs(size) - 1));
|
||||
s->align = calculate_alignment(flags, align, size);
|
||||
kmem_args.align = calculate_alignment(flags, align, size);
|
||||
|
||||
#ifdef CONFIG_HARDENED_USERCOPY
|
||||
s->useroffset = useroffset;
|
||||
s->usersize = usersize;
|
||||
kmem_args.useroffset = useroffset;
|
||||
kmem_args.usersize = usersize;
|
||||
#endif
|
||||
|
||||
err = __kmem_cache_create(s, flags);
|
||||
err = do_kmem_cache_create(s, name, size, &kmem_args, flags);
|
||||
|
||||
if (err)
|
||||
panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
|
||||
|
160
mm/slub.c
160
mm/slub.c
@ -465,12 +465,6 @@ static struct workqueue_struct *flushwq;
|
||||
* Core slab cache functions
|
||||
*******************************************************************/
|
||||
|
||||
/*
|
||||
* freeptr_t represents a SLUB freelist pointer, which might be encoded
|
||||
* and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
|
||||
*/
|
||||
typedef struct { unsigned long v; } freeptr_t;
|
||||
|
||||
/*
|
||||
* Returns freelist pointer (ptr). With hardening, this is obfuscated
|
||||
* with an XOR of the address where the pointer is held and a per-cache
|
||||
@ -4019,6 +4013,8 @@ static void *__slab_alloc_node(struct kmem_cache *s,
|
||||
/*
|
||||
* If the object has been wiped upon free, make sure it's fully initialized by
|
||||
* zeroing out freelist pointer.
|
||||
*
|
||||
* Note that we also wipe custom freelist pointers.
|
||||
*/
|
||||
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
|
||||
void *obj)
|
||||
@ -5282,7 +5278,7 @@ static void set_cpu_partial(struct kmem_cache *s)
|
||||
* calculate_sizes() determines the order and the distribution of data within
|
||||
* a slab object.
|
||||
*/
|
||||
static int calculate_sizes(struct kmem_cache *s)
|
||||
static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
|
||||
{
|
||||
slab_flags_t flags = s->flags;
|
||||
unsigned int size = s->object_size;
|
||||
@ -5323,7 +5319,8 @@ static int calculate_sizes(struct kmem_cache *s)
|
||||
*/
|
||||
s->inuse = size;
|
||||
|
||||
if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
|
||||
if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
|
||||
(flags & SLAB_POISON) || s->ctor ||
|
||||
((flags & SLAB_RED_ZONE) &&
|
||||
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
|
||||
/*
|
||||
@ -5344,6 +5341,8 @@ static int calculate_sizes(struct kmem_cache *s)
|
||||
*/
|
||||
s->offset = size;
|
||||
size += sizeof(void *);
|
||||
} else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
|
||||
s->offset = args->freeptr_offset;
|
||||
} else {
|
||||
/*
|
||||
* Store freelist pointer near middle of object to keep
|
||||
@ -5418,65 +5417,6 @@ static int calculate_sizes(struct kmem_cache *s)
|
||||
return !!oo_objects(s->oo);
|
||||
}
|
||||
|
||||
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
||||
{
|
||||
s->flags = kmem_cache_flags(flags, s->name);
|
||||
#ifdef CONFIG_SLAB_FREELIST_HARDENED
|
||||
s->random = get_random_long();
|
||||
#endif
|
||||
|
||||
if (!calculate_sizes(s))
|
||||
goto error;
|
||||
if (disable_higher_order_debug) {
|
||||
/*
|
||||
* Disable debugging flags that store metadata if the min slab
|
||||
* order increased.
|
||||
*/
|
||||
if (get_order(s->size) > get_order(s->object_size)) {
|
||||
s->flags &= ~DEBUG_METADATA_FLAGS;
|
||||
s->offset = 0;
|
||||
if (!calculate_sizes(s))
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef system_has_freelist_aba
|
||||
if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
|
||||
/* Enable fast mode */
|
||||
s->flags |= __CMPXCHG_DOUBLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The larger the object size is, the more slabs we want on the partial
|
||||
* list to avoid pounding the page allocator excessively.
|
||||
*/
|
||||
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
|
||||
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
|
||||
|
||||
set_cpu_partial(s);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
s->remote_node_defrag_ratio = 1000;
|
||||
#endif
|
||||
|
||||
/* Initialize the pre-computed randomized freelist if slab is up */
|
||||
if (slab_state >= UP) {
|
||||
if (init_cache_random_seq(s))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!init_kmem_cache_nodes(s))
|
||||
goto error;
|
||||
|
||||
if (alloc_kmem_cache_cpus(s))
|
||||
return 0;
|
||||
|
||||
error:
|
||||
__kmem_cache_release(s);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
|
||||
const char *text)
|
||||
{
|
||||
@ -6030,28 +5970,90 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
|
||||
return s;
|
||||
}
|
||||
|
||||
int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
|
||||
int do_kmem_cache_create(struct kmem_cache *s, const char *name,
|
||||
unsigned int size, struct kmem_cache_args *args,
|
||||
slab_flags_t flags)
|
||||
{
|
||||
int err;
|
||||
int err = -EINVAL;
|
||||
|
||||
err = kmem_cache_open(s, flags);
|
||||
if (err)
|
||||
return err;
|
||||
s->name = name;
|
||||
s->size = s->object_size = size;
|
||||
|
||||
s->flags = kmem_cache_flags(flags, s->name);
|
||||
#ifdef CONFIG_SLAB_FREELIST_HARDENED
|
||||
s->random = get_random_long();
|
||||
#endif
|
||||
s->align = args->align;
|
||||
s->ctor = args->ctor;
|
||||
#ifdef CONFIG_HARDENED_USERCOPY
|
||||
s->useroffset = args->useroffset;
|
||||
s->usersize = args->usersize;
|
||||
#endif
|
||||
|
||||
if (!calculate_sizes(args, s))
|
||||
goto out;
|
||||
if (disable_higher_order_debug) {
|
||||
/*
|
||||
* Disable debugging flags that store metadata if the min slab
|
||||
* order increased.
|
||||
*/
|
||||
if (get_order(s->size) > get_order(s->object_size)) {
|
||||
s->flags &= ~DEBUG_METADATA_FLAGS;
|
||||
s->offset = 0;
|
||||
if (!calculate_sizes(args, s))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef system_has_freelist_aba
|
||||
if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
|
||||
/* Enable fast mode */
|
||||
s->flags |= __CMPXCHG_DOUBLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The larger the object size is, the more slabs we want on the partial
|
||||
* list to avoid pounding the page allocator excessively.
|
||||
*/
|
||||
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
|
||||
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
|
||||
|
||||
set_cpu_partial(s);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
s->remote_node_defrag_ratio = 1000;
|
||||
#endif
|
||||
|
||||
/* Initialize the pre-computed randomized freelist if slab is up */
|
||||
if (slab_state >= UP) {
|
||||
if (init_cache_random_seq(s))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!init_kmem_cache_nodes(s))
|
||||
goto out;
|
||||
|
||||
if (!alloc_kmem_cache_cpus(s))
|
||||
goto out;
|
||||
|
||||
/* Mutex is not taken during early boot */
|
||||
if (slab_state <= UP)
|
||||
return 0;
|
||||
if (slab_state <= UP) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = sysfs_slab_add(s);
|
||||
if (err) {
|
||||
__kmem_cache_release(s);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
debugfs_slab_add(s);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
if (err)
|
||||
__kmem_cache_release(s);
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef SLAB_SUPPORTS_SYSFS
|
||||
|
@ -3429,7 +3429,7 @@ static void sock_def_destruct(struct sock *sk)
|
||||
void sk_send_sigurg(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_socket && sk->sk_socket->file)
|
||||
if (send_sigurg(&sk->sk_socket->file->f_owner))
|
||||
if (send_sigurg(sk->sk_socket->file))
|
||||
sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
|
||||
}
|
||||
EXPORT_SYMBOL(sk_send_sigurg);
|
||||
|
@ -3950,7 +3950,7 @@ static int selinux_file_send_sigiotask(struct task_struct *tsk,
|
||||
struct file_security_struct *fsec;
|
||||
|
||||
/* struct fown_struct is never outside the context of a struct file */
|
||||
file = container_of(fown, struct file, f_owner);
|
||||
file = fown->file;
|
||||
|
||||
fsec = selinux_file(file);
|
||||
|
||||
|
@ -1950,7 +1950,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
|
||||
/*
|
||||
* struct fown_struct is never outside the context of a struct file
|
||||
*/
|
||||
file = container_of(fown, struct file, f_owner);
|
||||
file = fown->file;
|
||||
|
||||
/* we don't log here as rc can be overriden */
|
||||
blob = smack_file(file);
|
||||
|
Loading…
Reference in New Issue
Block a user