mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 20:32:22 +00:00
slab allocators: Remove SLAB_DEBUG_INITIAL flag
I have never seen a use of SLAB_DEBUG_INITIAL. It is only supported by SLAB. I think its purpose was to have a callback after an object has been freed to verify that the state is the constructor state again? The callback is performed before each freeing of an object. I would think that it is much easier to check the object state manually before the free. That also places the check near the code object manipulation of the object. Also the SLAB_DEBUG_INITIAL callback is only performed if the kernel was compiled with SLAB debugging on. If there would be code in a constructor handling SLAB_DEBUG_INITIAL then it would have to be conditional on SLAB_DEBUG otherwise it would just be dead code. But there is no such code in the kernel. I think SLUB_DEBUG_INITIAL is too problematic to make real use of, difficult to understand and there are easier ways to accomplish the same effect (i.e. add debug code before kfree). There is a related flag SLAB_CTOR_VERIFY that is frequently checked to be clear in fs inode caches. Remove the pointless checks (they would even be pointless without removeal of SLAB_DEBUG_INITIAL) from the fs constructors. This is the last slab flag that SLUB did not support. Remove the check for unimplemented flags from SLUB. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4b1d89290b
commit
50953fe9e0
@ -71,8 +71,7 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
|
||||
{
|
||||
struct spufs_inode_info *ei = p;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
}
|
||||
|
@ -940,8 +940,7 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache,
|
||||
{
|
||||
struct ltree_entry *le = obj;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) !=
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
return;
|
||||
|
||||
le->users = 0;
|
||||
|
@ -232,8 +232,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -87,8 +87,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
init_MUTEX(&ei->i_link_lock);
|
||||
init_MUTEX(&ei->i_ext_lock);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
|
@ -453,8 +453,7 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
|
||||
{
|
||||
struct afs_vnode *vnode = _vnode;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
memset(vnode, 0, sizeof(*vnode));
|
||||
inode_init_once(&vnode->vfs_inode);
|
||||
init_waitqueue_head(&vnode->update_waitq);
|
||||
|
@ -293,8 +293,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct befs_inode_info *bi = (struct befs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&bi->vfs_inode);
|
||||
}
|
||||
}
|
||||
|
@ -248,8 +248,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct bfs_inode_info *bi = foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&bi->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -457,9 +457,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
struct bdev_inode *ei = (struct bdev_inode *) foo;
|
||||
struct block_device *bdev = &ei->bdev;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
{
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
memset(bdev, 0, sizeof(*bdev));
|
||||
mutex_init(&bdev->bd_mutex);
|
||||
sema_init(&bdev->bd_mount_sem, 1);
|
||||
|
@ -2953,8 +2953,7 @@ EXPORT_SYMBOL(free_buffer_head);
|
||||
static void
|
||||
init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
struct buffer_head * bh = (struct buffer_head *)data;
|
||||
|
||||
memset(bh, 0, sizeof(*bh));
|
||||
|
@ -701,8 +701,7 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
|
||||
{
|
||||
struct cifsInodeInfo *cifsi = inode;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&cifsi->vfs_inode);
|
||||
INIT_LIST_HEAD(&cifsi->lockList);
|
||||
}
|
||||
|
@ -62,8 +62,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct coda_inode_info *ei = (struct coda_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -583,8 +583,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct efs_inode_info *ei = (struct efs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -160,8 +160,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
rwlock_init(&ei->i_meta_lock);
|
||||
#ifdef CONFIG_EXT2_FS_XATTR
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
|
@ -466,8 +466,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_LIST_HEAD(&ei->i_orphan);
|
||||
#ifdef CONFIG_EXT3_FS_XATTR
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
|
@ -517,8 +517,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_LIST_HEAD(&ei->i_orphan);
|
||||
#ifdef CONFIG_EXT4DEV_FS_XATTR
|
||||
init_rwsem(&ei->xattr_sem);
|
||||
|
@ -40,8 +40,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct fat_cache *cache = (struct fat_cache *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
INIT_LIST_HEAD(&cache->cache_list);
|
||||
}
|
||||
|
||||
|
@ -499,8 +499,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
spin_lock_init(&ei->cache_lru_lock);
|
||||
ei->nr_caches = 0;
|
||||
ei->cache_valid_id = FAT_CACHE_VALID + 1;
|
||||
|
@ -685,8 +685,7 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
|
||||
{
|
||||
struct inode * inode = foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(inode);
|
||||
}
|
||||
|
||||
|
@ -27,8 +27,7 @@
|
||||
static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct gfs2_inode *ip = foo;
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&ip->i_inode);
|
||||
spin_lock_init(&ip->i_spin);
|
||||
init_rwsem(&ip->i_rw_mutex);
|
||||
@ -39,8 +38,7 @@ static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned
|
||||
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct gfs2_glock *gl = foo;
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_HLIST_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_spin);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
|
@ -434,7 +434,7 @@ static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flag
|
||||
{
|
||||
struct hfs_inode_info *i = p;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&i->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -470,7 +470,7 @@ static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long
|
||||
{
|
||||
struct hfsplus_inode_info *i = p;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&i->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -176,8 +176,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
mutex_init(&ei->i_mutex);
|
||||
mutex_init(&ei->i_parent_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
|
@ -556,8 +556,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -213,8 +213,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct inode * inode = (struct inode *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(inode);
|
||||
}
|
||||
|
||||
|
@ -77,8 +77,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
||||
{
|
||||
struct iso_inode_info *ei = foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -47,8 +47,7 @@ static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned l
|
||||
{
|
||||
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
init_MUTEX(&ei->sem);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
@ -184,8 +184,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct metapage *mp = (struct metapage *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
mp->lid = 0;
|
||||
mp->lsn = 0;
|
||||
mp->flag = 0;
|
||||
|
@ -752,8 +752,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
||||
{
|
||||
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
|
||||
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
|
||||
init_rwsem(&jfs_ip->rdwrlock);
|
||||
|
@ -203,8 +203,7 @@ static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
|
||||
{
|
||||
struct file_lock *lock = (struct file_lock *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) !=
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (!(flags & SLAB_CTOR_CONSTRUCTOR))
|
||||
return;
|
||||
|
||||
locks_init_lock(lock);
|
||||
|
@ -73,8 +73,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct minix_inode_info *ei = (struct minix_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -60,8 +60,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
mutex_init(&ei->open_mutex);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
@ -1167,8 +1167,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&nfsi->vfs_inode);
|
||||
spin_lock_init(&nfsi->req_lock);
|
||||
INIT_LIST_HEAD(&nfsi->dirty);
|
||||
|
@ -3085,8 +3085,7 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep,
|
||||
{
|
||||
ntfs_inode *ni = (ntfs_inode *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(VFS_I(ni));
|
||||
}
|
||||
|
||||
|
@ -263,8 +263,7 @@ static void dlmfs_init_once(void *foo,
|
||||
struct dlmfs_inode_private *ip =
|
||||
(struct dlmfs_inode_private *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
ip->ip_dlm = NULL;
|
||||
ip->ip_parent = NULL;
|
||||
|
||||
|
@ -937,8 +937,7 @@ static void ocfs2_inode_init_once(void *data,
|
||||
{
|
||||
struct ocfs2_inode_info *oi = data;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
oi->ip_flags = 0;
|
||||
oi->ip_open_count = 0;
|
||||
spin_lock_init(&oi->ip_lock);
|
||||
|
@ -419,8 +419,7 @@ static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned
|
||||
{
|
||||
struct op_inode_info *oi = (struct op_inode_info *) data;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&oi->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -109,8 +109,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct proc_inode *ei = (struct proc_inode *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -536,8 +536,7 @@ static void init_once(void *foo, struct kmem_cache * cachep,
|
||||
{
|
||||
struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -511,8 +511,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
||||
{
|
||||
struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_LIST_HEAD(&ei->i_prealloc_list);
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
|
||||
|
@ -570,8 +570,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -69,9 +69,8 @@ static void smb_destroy_inode(struct inode *inode)
|
||||
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
|
||||
{
|
||||
struct smb_inode_info *ei = (struct smb_inode_info *) foo;
|
||||
unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
|
||||
|
||||
if ((flags & flagmask) == SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -322,8 +322,7 @@ static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct sysv_inode_info *si = (struct sysv_inode_info *)p;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&si->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -134,9 +134,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct udf_inode_info *ei = (struct udf_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
{
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
ei->i_ext.i_data = NULL;
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
@ -1237,8 +1237,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
|
||||
{
|
||||
struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -360,8 +360,7 @@ xfs_fs_inode_init_once(
|
||||
kmem_zone_t *zonep,
|
||||
unsigned long flags)
|
||||
{
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
|
||||
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
|
||||
*/
|
||||
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
|
||||
#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
|
||||
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
|
||||
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
|
||||
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
|
||||
@ -36,7 +35,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
|
||||
/* Flags passed to a constructor functions */
|
||||
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
|
||||
#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
|
||||
#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
|
||||
|
||||
/*
|
||||
* struct kmem_cache related prototypes
|
||||
|
@ -215,8 +215,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
|
||||
{
|
||||
struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&p->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -1425,8 +1425,7 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long fl
|
||||
{
|
||||
struct sighand_struct *sighand = data;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
spin_lock_init(&sighand->siglock);
|
||||
}
|
||||
|
||||
|
@ -162,8 +162,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
|
||||
static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
|
||||
unsigned long flags)
|
||||
{
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
struct anon_vma *anon_vma = data;
|
||||
|
||||
spin_lock_init(&anon_vma->lock);
|
||||
|
@ -2358,8 +2358,7 @@ static void init_once(void *foo, struct kmem_cache *cachep,
|
||||
{
|
||||
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&p->vfs_inode);
|
||||
#ifdef CONFIG_TMPFS_POSIX_ACL
|
||||
p->i_acl = NULL;
|
||||
|
20
mm/slab.c
20
mm/slab.c
@ -116,8 +116,7 @@
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
|
||||
* SLAB_RED_ZONE & SLAB_POISON.
|
||||
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
|
||||
* 0 for faster, smaller code (especially in the critical paths).
|
||||
*
|
||||
* STATS - 1 to collect stats for /proc/slabinfo.
|
||||
@ -172,7 +171,7 @@
|
||||
|
||||
/* Legal flag mask for kmem_cache_create(). */
|
||||
#if DEBUG
|
||||
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
|
||||
# define CREATE_MASK (SLAB_RED_ZONE | \
|
||||
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
|
||||
SLAB_CACHE_DMA | \
|
||||
SLAB_STORE_USER | \
|
||||
@ -2184,12 +2183,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
||||
|
||||
#if DEBUG
|
||||
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
|
||||
if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
|
||||
/* No constructor, but inital state check requested */
|
||||
printk(KERN_ERR "%s: No con, but init state check "
|
||||
"requested - %s\n", __FUNCTION__, name);
|
||||
flags &= ~SLAB_DEBUG_INITIAL;
|
||||
}
|
||||
#if FORCED_DEBUG
|
||||
/*
|
||||
* Enable redzoning and last user accounting, except for caches with
|
||||
@ -2895,15 +2888,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
|
||||
BUG_ON(objnr >= cachep->num);
|
||||
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
|
||||
|
||||
if (cachep->flags & SLAB_DEBUG_INITIAL) {
|
||||
/*
|
||||
* Need to call the slab's constructor so the caller can
|
||||
* perform a verify of its state (debugging). Called without
|
||||
* the cache-lock held.
|
||||
*/
|
||||
cachep->ctor(objp + obj_offset(cachep),
|
||||
cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
|
||||
}
|
||||
if (cachep->flags & SLAB_POISON && cachep->dtor) {
|
||||
/* we want to cache poison the object,
|
||||
* call the destruction callback
|
||||
|
10
mm/slub.c
10
mm/slub.c
@ -97,9 +97,6 @@
|
||||
*
|
||||
* - Support PAGE_ALLOC_DEBUG. Should be easy to do.
|
||||
*
|
||||
* - SLAB_DEBUG_INITIAL is not supported but I have never seen a use of
|
||||
* it.
|
||||
*
|
||||
* - Variable sizing of the per node arrays
|
||||
*/
|
||||
|
||||
@ -125,11 +122,6 @@
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Flags from the regular SLAB that SLUB does not support:
|
||||
*/
|
||||
#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL)
|
||||
|
||||
/*
|
||||
* Mininum number of partial slabs. These will be left on the partial
|
||||
* lists even if they are empty. kmem_cache_shrink may reclaim them.
|
||||
@ -1748,8 +1740,6 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
|
||||
s->flags = flags;
|
||||
s->align = align;
|
||||
|
||||
BUG_ON(flags & SLUB_UNIMPLEMENTED);
|
||||
|
||||
/*
|
||||
* The page->offset field is only 16 bit wide. This is an offset
|
||||
* in units of words from the beginning of an object. If the slab
|
||||
|
@ -261,8 +261,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
|
||||
{
|
||||
struct socket_alloc *ei = (struct socket_alloc *)foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR))
|
||||
== SLAB_CTOR_CONSTRUCTOR)
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR)
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
}
|
||||
|
||||
|
@ -828,8 +828,7 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
|
||||
{
|
||||
struct rpc_inode *rpci = (struct rpc_inode *) foo;
|
||||
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
if (flags & SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&rpci->vfs_inode);
|
||||
rpci->private = NULL;
|
||||
rpci->nreaders = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user