ext4: move ext4_mb_init_group() function earlier in the mballoc.c

This moves the function around so that it can be called from
ext4_mb_load_buddy().

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
Aneesh Kumar K.V 2009-09-09 23:47:46 -04:00 committed by Theodore Ts'o
parent 91ac6f4331
commit b6a758ec3a

View File

@ -910,6 +910,97 @@ out:
return err;
}
static noinline_for_stack
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
{
int ret = 0;
void *bitmap;
int blocks_per_page;
int block, pnum, poff;
int num_grp_locked = 0;
struct ext4_group_info *this_grp;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct inode *inode = sbi->s_buddy_cache;
struct page *page = NULL, *bitmap_page = NULL;
mb_debug(1, "init group %u\n", group);
blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
this_grp = ext4_get_group_info(sb, group);
/*
* This ensures we don't add group
* to this buddy cache via resize
*/
num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group);
if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
/*
* somebody initialized the group
* return without doing anything
*/
ret = 0;
goto err;
}
/*
* the buddy cache inode stores the block bitmap
* and buddy information in consecutive blocks.
* So for each group we need two blocks.
*/
block = group * 2;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
if (page) {
BUG_ON(page->mapping != inode->i_mapping);
ret = ext4_mb_init_cache(page, NULL);
if (ret) {
unlock_page(page);
goto err;
}
unlock_page(page);
}
if (page == NULL || !PageUptodate(page)) {
ret = -EIO;
goto err;
}
mark_page_accessed(page);
bitmap_page = page;
bitmap = page_address(page) + (poff * sb->s_blocksize);
/* init buddy cache */
block++;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
if (page == bitmap_page) {
/*
* If both the bitmap and buddy are in
* the same page we don't need to force
* init the buddy
*/
unlock_page(page);
} else if (page) {
BUG_ON(page->mapping != inode->i_mapping);
ret = ext4_mb_init_cache(page, bitmap);
if (ret) {
unlock_page(page);
goto err;
}
unlock_page(page);
}
if (page == NULL || !PageUptodate(page)) {
ret = -EIO;
goto err;
}
mark_page_accessed(page);
err:
ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
if (bitmap_page)
page_cache_release(bitmap_page);
if (page)
page_cache_release(page);
return ret;
}
static noinline_for_stack int
ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
struct ext4_buddy *e4b)
@ -1839,97 +1930,6 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
}
static noinline_for_stack
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
{
int ret;
void *bitmap;
int blocks_per_page;
int block, pnum, poff;
int num_grp_locked = 0;
struct ext4_group_info *this_grp;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct inode *inode = sbi->s_buddy_cache;
struct page *page = NULL, *bitmap_page = NULL;
mb_debug(1, "init group %u\n", group);
blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
this_grp = ext4_get_group_info(sb, group);
/*
* This ensures we don't add group
* to this buddy cache via resize
*/
num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group);
if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
/*
* somebody initialized the group
* return without doing anything
*/
ret = 0;
goto err;
}
/*
* the buddy cache inode stores the block bitmap
* and buddy information in consecutive blocks.
* So for each group we need two blocks.
*/
block = group * 2;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
if (page) {
BUG_ON(page->mapping != inode->i_mapping);
ret = ext4_mb_init_cache(page, NULL);
if (ret) {
unlock_page(page);
goto err;
}
unlock_page(page);
}
if (page == NULL || !PageUptodate(page)) {
ret = -EIO;
goto err;
}
mark_page_accessed(page);
bitmap_page = page;
bitmap = page_address(page) + (poff * sb->s_blocksize);
/* init buddy cache */
block++;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
if (page == bitmap_page) {
/*
* If both the bitmap and buddy are in
* the same page we don't need to force
* init the buddy
*/
unlock_page(page);
} else if (page) {
BUG_ON(page->mapping != inode->i_mapping);
ret = ext4_mb_init_cache(page, bitmap);
if (ret) {
unlock_page(page);
goto err;
}
unlock_page(page);
}
if (page == NULL || !PageUptodate(page)) {
ret = -EIO;
goto err;
}
mark_page_accessed(page);
err:
ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
if (bitmap_page)
page_cache_release(bitmap_page);
if (page)
page_cache_release(page);
return ret;
}
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{