mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: Define/reserve new ext4 superblock fields When ext4_ext_insert_extent() fails to insert new blocks ext4: Extent overlap bugfix Remove unnecessary exported symbols. EXT4: Fix whitespace
This commit is contained in:
commit
e984fd486f
@ -30,15 +30,15 @@
|
|||||||
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
|
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
|
||||||
unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
|
unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
|
||||||
{
|
{
|
||||||
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
|
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
|
||||||
ext4_grpblk_t offset;
|
ext4_grpblk_t offset;
|
||||||
|
|
||||||
blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
|
blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
|
||||||
offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
|
offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
|
||||||
if (offsetp)
|
if (offsetp)
|
||||||
*offsetp = offset;
|
*offsetp = offset;
|
||||||
if (blockgrpp)
|
if (blockgrpp)
|
||||||
*blockgrpp = blocknr;
|
*blockgrpp = blocknr;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -374,7 +374,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc
|
|||||||
le32_to_cpu(ix[-1].ei_block));
|
le32_to_cpu(ix[-1].ei_block));
|
||||||
}
|
}
|
||||||
BUG_ON(k && le32_to_cpu(ix->ei_block)
|
BUG_ON(k && le32_to_cpu(ix->ei_block)
|
||||||
<= le32_to_cpu(ix[-1].ei_block));
|
<= le32_to_cpu(ix[-1].ei_block));
|
||||||
if (block < le32_to_cpu(ix->ei_block))
|
if (block < le32_to_cpu(ix->ei_block))
|
||||||
break;
|
break;
|
||||||
chix = ix;
|
chix = ix;
|
||||||
@ -423,8 +423,8 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
|
|||||||
|
|
||||||
path->p_ext = l - 1;
|
path->p_ext = l - 1;
|
||||||
ext_debug(" -> %d:%llu:%d ",
|
ext_debug(" -> %d:%llu:%d ",
|
||||||
le32_to_cpu(path->p_ext->ee_block),
|
le32_to_cpu(path->p_ext->ee_block),
|
||||||
ext_pblock(path->p_ext),
|
ext_pblock(path->p_ext),
|
||||||
le16_to_cpu(path->p_ext->ee_len));
|
le16_to_cpu(path->p_ext->ee_len));
|
||||||
|
|
||||||
#ifdef CHECK_BINSEARCH
|
#ifdef CHECK_BINSEARCH
|
||||||
@ -435,7 +435,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
|
|||||||
chex = ex = EXT_FIRST_EXTENT(eh);
|
chex = ex = EXT_FIRST_EXTENT(eh);
|
||||||
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
|
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
|
||||||
BUG_ON(k && le32_to_cpu(ex->ee_block)
|
BUG_ON(k && le32_to_cpu(ex->ee_block)
|
||||||
<= le32_to_cpu(ex[-1].ee_block));
|
<= le32_to_cpu(ex[-1].ee_block));
|
||||||
if (block < le32_to_cpu(ex->ee_block))
|
if (block < le32_to_cpu(ex->ee_block))
|
||||||
break;
|
break;
|
||||||
chex = ex;
|
chex = ex;
|
||||||
@ -577,7 +577,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
|
|||||||
curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
|
curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
|
||||||
|
|
||||||
BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
|
BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
|
||||||
> le16_to_cpu(curp->p_hdr->eh_max));
|
> le16_to_cpu(curp->p_hdr->eh_max));
|
||||||
BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
|
BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
|
||||||
|
|
||||||
err = ext4_ext_dirty(handle, inode, curp);
|
err = ext4_ext_dirty(handle, inode, curp);
|
||||||
@ -621,12 +621,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
border = path[depth].p_ext[1].ee_block;
|
border = path[depth].p_ext[1].ee_block;
|
||||||
ext_debug("leaf will be split."
|
ext_debug("leaf will be split."
|
||||||
" next leaf starts at %d\n",
|
" next leaf starts at %d\n",
|
||||||
le32_to_cpu(border));
|
le32_to_cpu(border));
|
||||||
} else {
|
} else {
|
||||||
border = newext->ee_block;
|
border = newext->ee_block;
|
||||||
ext_debug("leaf will be added."
|
ext_debug("leaf will be added."
|
||||||
" next leaf starts at %d\n",
|
" next leaf starts at %d\n",
|
||||||
le32_to_cpu(border));
|
le32_to_cpu(border));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -684,9 +684,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
while (path[depth].p_ext <=
|
while (path[depth].p_ext <=
|
||||||
EXT_MAX_EXTENT(path[depth].p_hdr)) {
|
EXT_MAX_EXTENT(path[depth].p_hdr)) {
|
||||||
ext_debug("move %d:%llu:%d in new leaf %llu\n",
|
ext_debug("move %d:%llu:%d in new leaf %llu\n",
|
||||||
le32_to_cpu(path[depth].p_ext->ee_block),
|
le32_to_cpu(path[depth].p_ext->ee_block),
|
||||||
ext_pblock(path[depth].p_ext),
|
ext_pblock(path[depth].p_ext),
|
||||||
le16_to_cpu(path[depth].p_ext->ee_len),
|
le16_to_cpu(path[depth].p_ext->ee_len),
|
||||||
newblock);
|
newblock);
|
||||||
/*memmove(ex++, path[depth].p_ext++,
|
/*memmove(ex++, path[depth].p_ext++,
|
||||||
sizeof(struct ext4_extent));
|
sizeof(struct ext4_extent));
|
||||||
@ -765,9 +765,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
EXT_LAST_INDEX(path[i].p_hdr));
|
EXT_LAST_INDEX(path[i].p_hdr));
|
||||||
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
|
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
|
||||||
ext_debug("%d: move %d:%d in new index %llu\n", i,
|
ext_debug("%d: move %d:%d in new index %llu\n", i,
|
||||||
le32_to_cpu(path[i].p_idx->ei_block),
|
le32_to_cpu(path[i].p_idx->ei_block),
|
||||||
idx_pblock(path[i].p_idx),
|
idx_pblock(path[i].p_idx),
|
||||||
newblock);
|
newblock);
|
||||||
/*memmove(++fidx, path[i].p_idx++,
|
/*memmove(++fidx, path[i].p_idx++,
|
||||||
sizeof(struct ext4_extent_idx));
|
sizeof(struct ext4_extent_idx));
|
||||||
neh->eh_entries++;
|
neh->eh_entries++;
|
||||||
@ -1127,6 +1127,55 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* check if a portion of the "newext" extent overlaps with an
|
||||||
|
* existing extent.
|
||||||
|
*
|
||||||
|
* If there is an overlap discovered, it updates the length of the newext
|
||||||
|
* such that there will be no overlap, and then returns 1.
|
||||||
|
* If there is no overlap found, it returns 0.
|
||||||
|
*/
|
||||||
|
unsigned int ext4_ext_check_overlap(struct inode *inode,
|
||||||
|
struct ext4_extent *newext,
|
||||||
|
struct ext4_ext_path *path)
|
||||||
|
{
|
||||||
|
unsigned long b1, b2;
|
||||||
|
unsigned int depth, len1;
|
||||||
|
unsigned int ret = 0;
|
||||||
|
|
||||||
|
b1 = le32_to_cpu(newext->ee_block);
|
||||||
|
len1 = le16_to_cpu(newext->ee_len);
|
||||||
|
depth = ext_depth(inode);
|
||||||
|
if (!path[depth].p_ext)
|
||||||
|
goto out;
|
||||||
|
b2 = le32_to_cpu(path[depth].p_ext->ee_block);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get the next allocated block if the extent in the path
|
||||||
|
* is before the requested block(s)
|
||||||
|
*/
|
||||||
|
if (b2 < b1) {
|
||||||
|
b2 = ext4_ext_next_allocated_block(path);
|
||||||
|
if (b2 == EXT_MAX_BLOCK)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check for wrap through zero */
|
||||||
|
if (b1 + len1 < b1) {
|
||||||
|
len1 = EXT_MAX_BLOCK - b1;
|
||||||
|
newext->ee_len = cpu_to_le16(len1);
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check for overlap */
|
||||||
|
if (b1 + len1 > b2) {
|
||||||
|
newext->ee_len = cpu_to_le16(b2 - b1);
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ext4_ext_insert_extent:
|
* ext4_ext_insert_extent:
|
||||||
* tries to merge requsted extent into the existing extent or
|
* tries to merge requsted extent into the existing extent or
|
||||||
@ -1212,12 +1261,12 @@ has_space:
|
|||||||
if (!nearex) {
|
if (!nearex) {
|
||||||
/* there is no extent in this leaf, create first one */
|
/* there is no extent in this leaf, create first one */
|
||||||
ext_debug("first extent in the leaf: %d:%llu:%d\n",
|
ext_debug("first extent in the leaf: %d:%llu:%d\n",
|
||||||
le32_to_cpu(newext->ee_block),
|
le32_to_cpu(newext->ee_block),
|
||||||
ext_pblock(newext),
|
ext_pblock(newext),
|
||||||
le16_to_cpu(newext->ee_len));
|
le16_to_cpu(newext->ee_len));
|
||||||
path[depth].p_ext = EXT_FIRST_EXTENT(eh);
|
path[depth].p_ext = EXT_FIRST_EXTENT(eh);
|
||||||
} else if (le32_to_cpu(newext->ee_block)
|
} else if (le32_to_cpu(newext->ee_block)
|
||||||
> le32_to_cpu(nearex->ee_block)) {
|
> le32_to_cpu(nearex->ee_block)) {
|
||||||
/* BUG_ON(newext->ee_block == nearex->ee_block); */
|
/* BUG_ON(newext->ee_block == nearex->ee_block); */
|
||||||
if (nearex != EXT_LAST_EXTENT(eh)) {
|
if (nearex != EXT_LAST_EXTENT(eh)) {
|
||||||
len = EXT_MAX_EXTENT(eh) - nearex;
|
len = EXT_MAX_EXTENT(eh) - nearex;
|
||||||
@ -1225,9 +1274,9 @@ has_space:
|
|||||||
len = len < 0 ? 0 : len;
|
len = len < 0 ? 0 : len;
|
||||||
ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
|
ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
|
||||||
"move %d from 0x%p to 0x%p\n",
|
"move %d from 0x%p to 0x%p\n",
|
||||||
le32_to_cpu(newext->ee_block),
|
le32_to_cpu(newext->ee_block),
|
||||||
ext_pblock(newext),
|
ext_pblock(newext),
|
||||||
le16_to_cpu(newext->ee_len),
|
le16_to_cpu(newext->ee_len),
|
||||||
nearex, len, nearex + 1, nearex + 2);
|
nearex, len, nearex + 1, nearex + 2);
|
||||||
memmove(nearex + 2, nearex + 1, len);
|
memmove(nearex + 2, nearex + 1, len);
|
||||||
}
|
}
|
||||||
@ -1358,9 +1407,9 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
|
|||||||
cbex.ec_start = 0;
|
cbex.ec_start = 0;
|
||||||
cbex.ec_type = EXT4_EXT_CACHE_GAP;
|
cbex.ec_type = EXT4_EXT_CACHE_GAP;
|
||||||
} else {
|
} else {
|
||||||
cbex.ec_block = le32_to_cpu(ex->ee_block);
|
cbex.ec_block = le32_to_cpu(ex->ee_block);
|
||||||
cbex.ec_len = le16_to_cpu(ex->ee_len);
|
cbex.ec_len = le16_to_cpu(ex->ee_len);
|
||||||
cbex.ec_start = ext_pblock(ex);
|
cbex.ec_start = ext_pblock(ex);
|
||||||
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
|
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1431,16 +1480,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
|
|||||||
len = le32_to_cpu(ex->ee_block) - block;
|
len = le32_to_cpu(ex->ee_block) - block;
|
||||||
ext_debug("cache gap(before): %lu [%lu:%lu]",
|
ext_debug("cache gap(before): %lu [%lu:%lu]",
|
||||||
(unsigned long) block,
|
(unsigned long) block,
|
||||||
(unsigned long) le32_to_cpu(ex->ee_block),
|
(unsigned long) le32_to_cpu(ex->ee_block),
|
||||||
(unsigned long) le16_to_cpu(ex->ee_len));
|
(unsigned long) le16_to_cpu(ex->ee_len));
|
||||||
} else if (block >= le32_to_cpu(ex->ee_block)
|
} else if (block >= le32_to_cpu(ex->ee_block)
|
||||||
+ le16_to_cpu(ex->ee_len)) {
|
+ le16_to_cpu(ex->ee_len)) {
|
||||||
lblock = le32_to_cpu(ex->ee_block)
|
lblock = le32_to_cpu(ex->ee_block)
|
||||||
+ le16_to_cpu(ex->ee_len);
|
+ le16_to_cpu(ex->ee_len);
|
||||||
len = ext4_ext_next_allocated_block(path);
|
len = ext4_ext_next_allocated_block(path);
|
||||||
ext_debug("cache gap(after): [%lu:%lu] %lu",
|
ext_debug("cache gap(after): [%lu:%lu] %lu",
|
||||||
(unsigned long) le32_to_cpu(ex->ee_block),
|
(unsigned long) le32_to_cpu(ex->ee_block),
|
||||||
(unsigned long) le16_to_cpu(ex->ee_len),
|
(unsigned long) le16_to_cpu(ex->ee_len),
|
||||||
(unsigned long) block);
|
(unsigned long) block);
|
||||||
BUG_ON(len == lblock);
|
BUG_ON(len == lblock);
|
||||||
len = len - lblock;
|
len = len - lblock;
|
||||||
@ -1468,9 +1517,9 @@ ext4_ext_in_cache(struct inode *inode, unsigned long block,
|
|||||||
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
|
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
|
||||||
cex->ec_type != EXT4_EXT_CACHE_EXTENT);
|
cex->ec_type != EXT4_EXT_CACHE_EXTENT);
|
||||||
if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
|
if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
|
||||||
ex->ee_block = cpu_to_le32(cex->ec_block);
|
ex->ee_block = cpu_to_le32(cex->ec_block);
|
||||||
ext4_ext_store_pblock(ex, cex->ec_start);
|
ext4_ext_store_pblock(ex, cex->ec_start);
|
||||||
ex->ee_len = cpu_to_le16(cex->ec_len);
|
ex->ee_len = cpu_to_le16(cex->ec_len);
|
||||||
ext_debug("%lu cached by %lu:%lu:%llu\n",
|
ext_debug("%lu cached by %lu:%lu:%llu\n",
|
||||||
(unsigned long) block,
|
(unsigned long) block,
|
||||||
(unsigned long) cex->ec_block,
|
(unsigned long) cex->ec_block,
|
||||||
@ -1956,9 +2005,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||||||
/* we should allocate requested block */
|
/* we should allocate requested block */
|
||||||
} else if (goal == EXT4_EXT_CACHE_EXTENT) {
|
} else if (goal == EXT4_EXT_CACHE_EXTENT) {
|
||||||
/* block is already allocated */
|
/* block is already allocated */
|
||||||
newblock = iblock
|
newblock = iblock
|
||||||
- le32_to_cpu(newex.ee_block)
|
- le32_to_cpu(newex.ee_block)
|
||||||
+ ext_pblock(&newex);
|
+ ext_pblock(&newex);
|
||||||
/* number of remaining blocks in the extent */
|
/* number of remaining blocks in the extent */
|
||||||
allocated = le16_to_cpu(newex.ee_len) -
|
allocated = le16_to_cpu(newex.ee_len) -
|
||||||
(iblock - le32_to_cpu(newex.ee_block));
|
(iblock - le32_to_cpu(newex.ee_block));
|
||||||
@ -1987,7 +2036,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||||||
|
|
||||||
ex = path[depth].p_ext;
|
ex = path[depth].p_ext;
|
||||||
if (ex) {
|
if (ex) {
|
||||||
unsigned long ee_block = le32_to_cpu(ex->ee_block);
|
unsigned long ee_block = le32_to_cpu(ex->ee_block);
|
||||||
ext4_fsblk_t ee_start = ext_pblock(ex);
|
ext4_fsblk_t ee_start = ext_pblock(ex);
|
||||||
unsigned short ee_len = le16_to_cpu(ex->ee_len);
|
unsigned short ee_len = le16_to_cpu(ex->ee_len);
|
||||||
|
|
||||||
@ -2000,7 +2049,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||||||
if (ee_len > EXT_MAX_LEN)
|
if (ee_len > EXT_MAX_LEN)
|
||||||
goto out2;
|
goto out2;
|
||||||
/* if found extent covers block, simply return it */
|
/* if found extent covers block, simply return it */
|
||||||
if (iblock >= ee_block && iblock < ee_block + ee_len) {
|
if (iblock >= ee_block && iblock < ee_block + ee_len) {
|
||||||
newblock = iblock - ee_block + ee_start;
|
newblock = iblock - ee_block + ee_start;
|
||||||
/* number of remaining blocks in the extent */
|
/* number of remaining blocks in the extent */
|
||||||
allocated = ee_len - (iblock - ee_block);
|
allocated = ee_len - (iblock - ee_block);
|
||||||
@ -2031,7 +2080,15 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||||||
|
|
||||||
/* allocate new block */
|
/* allocate new block */
|
||||||
goal = ext4_ext_find_goal(inode, path, iblock);
|
goal = ext4_ext_find_goal(inode, path, iblock);
|
||||||
allocated = max_blocks;
|
|
||||||
|
/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
|
||||||
|
newex.ee_block = cpu_to_le32(iblock);
|
||||||
|
newex.ee_len = cpu_to_le16(max_blocks);
|
||||||
|
err = ext4_ext_check_overlap(inode, &newex, path);
|
||||||
|
if (err)
|
||||||
|
allocated = le16_to_cpu(newex.ee_len);
|
||||||
|
else
|
||||||
|
allocated = max_blocks;
|
||||||
newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
|
newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
|
||||||
if (!newblock)
|
if (!newblock)
|
||||||
goto out2;
|
goto out2;
|
||||||
@ -2039,12 +2096,15 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||||||
goal, newblock, allocated);
|
goal, newblock, allocated);
|
||||||
|
|
||||||
/* try to insert new extent into found leaf and return */
|
/* try to insert new extent into found leaf and return */
|
||||||
newex.ee_block = cpu_to_le32(iblock);
|
|
||||||
ext4_ext_store_pblock(&newex, newblock);
|
ext4_ext_store_pblock(&newex, newblock);
|
||||||
newex.ee_len = cpu_to_le16(allocated);
|
newex.ee_len = cpu_to_le16(allocated);
|
||||||
err = ext4_ext_insert_extent(handle, inode, path, &newex);
|
err = ext4_ext_insert_extent(handle, inode, path, &newex);
|
||||||
if (err)
|
if (err) {
|
||||||
|
/* free data blocks we just allocated */
|
||||||
|
ext4_free_blocks(handle, inode, ext_pblock(&newex),
|
||||||
|
le16_to_cpu(newex.ee_len));
|
||||||
goto out2;
|
goto out2;
|
||||||
|
}
|
||||||
|
|
||||||
if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
|
if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
|
||||||
EXT4_I(inode)->i_disksize = inode->i_size;
|
EXT4_I(inode)->i_disksize = inode->i_size;
|
||||||
@ -2157,11 +2217,3 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
|
|||||||
|
|
||||||
return needed;
|
return needed;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(ext4_mark_inode_dirty);
|
|
||||||
EXPORT_SYMBOL(ext4_ext_invalidate_cache);
|
|
||||||
EXPORT_SYMBOL(ext4_ext_insert_extent);
|
|
||||||
EXPORT_SYMBOL(ext4_ext_walk_space);
|
|
||||||
EXPORT_SYMBOL(ext4_ext_find_goal);
|
|
||||||
EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
|
|
||||||
|
|
||||||
|
@ -255,8 +255,8 @@ static int verify_chain(Indirect *from, Indirect *to)
|
|||||||
* @inode: inode in question (we are only interested in its superblock)
|
* @inode: inode in question (we are only interested in its superblock)
|
||||||
* @i_block: block number to be parsed
|
* @i_block: block number to be parsed
|
||||||
* @offsets: array to store the offsets in
|
* @offsets: array to store the offsets in
|
||||||
* @boundary: set this non-zero if the referred-to block is likely to be
|
* @boundary: set this non-zero if the referred-to block is likely to be
|
||||||
* followed (on disk) by an indirect block.
|
* followed (on disk) by an indirect block.
|
||||||
*
|
*
|
||||||
* To store the locations of file's data ext4 uses a data structure common
|
* To store the locations of file's data ext4 uses a data structure common
|
||||||
* for UNIX filesystems - tree of pointers anchored in the inode, with
|
* for UNIX filesystems - tree of pointers anchored in the inode, with
|
||||||
|
@ -46,7 +46,7 @@
|
|||||||
*/
|
*/
|
||||||
#define NAMEI_RA_CHUNKS 2
|
#define NAMEI_RA_CHUNKS 2
|
||||||
#define NAMEI_RA_BLOCKS 4
|
#define NAMEI_RA_BLOCKS 4
|
||||||
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
|
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
|
||||||
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
|
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
|
||||||
|
|
||||||
static struct buffer_head *ext4_append(handle_t *handle,
|
static struct buffer_head *ext4_append(handle_t *handle,
|
||||||
@ -241,7 +241,7 @@ static inline unsigned dx_node_limit (struct inode *dir)
|
|||||||
static void dx_show_index (char * label, struct dx_entry *entries)
|
static void dx_show_index (char * label, struct dx_entry *entries)
|
||||||
{
|
{
|
||||||
int i, n = dx_get_count (entries);
|
int i, n = dx_get_count (entries);
|
||||||
printk("%s index ", label);
|
printk("%s index ", label);
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
printk("%x->%u ", i? dx_get_hash(entries + i) :
|
printk("%x->%u ", i? dx_get_hash(entries + i) :
|
||||||
0, dx_get_block(entries + i));
|
0, dx_get_block(entries + i));
|
||||||
|
@ -1985,7 +1985,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
|
|||||||
|
|
||||||
if (bd_claim(bdev, sb)) {
|
if (bd_claim(bdev, sb)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"EXT4: failed to claim external journal device.\n");
|
"EXT4: failed to claim external journal device.\n");
|
||||||
blkdev_put(bdev);
|
blkdev_put(bdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -32,9 +32,9 @@
|
|||||||
/*
|
/*
|
||||||
* Define EXT4_RESERVATION to reserve data blocks for expanding files
|
* Define EXT4_RESERVATION to reserve data blocks for expanding files
|
||||||
*/
|
*/
|
||||||
#define EXT4_DEFAULT_RESERVE_BLOCKS 8
|
#define EXT4_DEFAULT_RESERVE_BLOCKS 8
|
||||||
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
|
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
|
||||||
#define EXT4_MAX_RESERVE_BLOCKS 1027
|
#define EXT4_MAX_RESERVE_BLOCKS 1027
|
||||||
#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
|
#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
|
||||||
/*
|
/*
|
||||||
* Always enable hashed directories
|
* Always enable hashed directories
|
||||||
@ -204,12 +204,12 @@ struct ext4_group_desc
|
|||||||
|
|
||||||
/* Used to pass group descriptor data when online resize is done */
|
/* Used to pass group descriptor data when online resize is done */
|
||||||
struct ext4_new_group_input {
|
struct ext4_new_group_input {
|
||||||
__u32 group; /* Group number for this data */
|
__u32 group; /* Group number for this data */
|
||||||
__u64 block_bitmap; /* Absolute block number of block bitmap */
|
__u64 block_bitmap; /* Absolute block number of block bitmap */
|
||||||
__u64 inode_bitmap; /* Absolute block number of inode bitmap */
|
__u64 inode_bitmap; /* Absolute block number of inode bitmap */
|
||||||
__u64 inode_table; /* Absolute block number of inode table start */
|
__u64 inode_table; /* Absolute block number of inode table start */
|
||||||
__u32 blocks_count; /* Total number of blocks in this group */
|
__u32 blocks_count; /* Total number of blocks in this group */
|
||||||
__u16 reserved_blocks; /* Number of reserved blocks in this group */
|
__u16 reserved_blocks; /* Number of reserved blocks in this group */
|
||||||
__u16 unused;
|
__u16 unused;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -310,7 +310,7 @@ struct ext4_inode {
|
|||||||
__u8 l_i_frag; /* Fragment number */
|
__u8 l_i_frag; /* Fragment number */
|
||||||
__u8 l_i_fsize; /* Fragment size */
|
__u8 l_i_fsize; /* Fragment size */
|
||||||
__le16 l_i_file_acl_high;
|
__le16 l_i_file_acl_high;
|
||||||
__le16 l_i_uid_high; /* these 2 fields */
|
__le16 l_i_uid_high; /* these 2 fields */
|
||||||
__le16 l_i_gid_high; /* were reserved2[0] */
|
__le16 l_i_gid_high; /* were reserved2[0] */
|
||||||
__u32 l_i_reserved2;
|
__u32 l_i_reserved2;
|
||||||
} linux2;
|
} linux2;
|
||||||
@ -513,7 +513,14 @@ struct ext4_super_block {
|
|||||||
/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
|
/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
|
||||||
__le32 s_r_blocks_count_hi; /* Reserved blocks count */
|
__le32 s_r_blocks_count_hi; /* Reserved blocks count */
|
||||||
__le32 s_free_blocks_count_hi; /* Free blocks count */
|
__le32 s_free_blocks_count_hi; /* Free blocks count */
|
||||||
__u32 s_reserved[169]; /* Padding to the end of the block */
|
__u16 s_min_extra_isize; /* All inodes have at least # bytes */
|
||||||
|
__u16 s_want_extra_isize; /* New inodes should reserve # bytes */
|
||||||
|
__u32 s_flags; /* Miscellaneous flags */
|
||||||
|
__u16 s_raid_stride; /* RAID stride */
|
||||||
|
__u16 s_mmp_interval; /* # seconds to wait in MMP checking */
|
||||||
|
__u64 s_mmp_block; /* Block for multi-mount protection */
|
||||||
|
__u32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
|
||||||
|
__u32 s_reserved[163]; /* Padding to the end of the block */
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
@ -780,9 +787,9 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
|
|||||||
* Ok, these declarations are also in <linux/kernel.h> but none of the
|
* Ok, these declarations are also in <linux/kernel.h> but none of the
|
||||||
* ext4 source programs needs to include it so they are duplicated here.
|
* ext4 source programs needs to include it so they are duplicated here.
|
||||||
*/
|
*/
|
||||||
# define NORET_TYPE /**/
|
# define NORET_TYPE /**/
|
||||||
# define ATTRIB_NORET __attribute__((noreturn))
|
# define ATTRIB_NORET __attribute__((noreturn))
|
||||||
# define NORET_AND noreturn,
|
# define NORET_AND noreturn,
|
||||||
|
|
||||||
/* balloc.c */
|
/* balloc.c */
|
||||||
extern unsigned int ext4_block_group(struct super_block *sb,
|
extern unsigned int ext4_block_group(struct super_block *sb,
|
||||||
|
@ -151,8 +151,8 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
|
|||||||
((struct ext4_extent_idx *) (((char *) (__hdr__)) + \
|
((struct ext4_extent_idx *) (((char *) (__hdr__)) + \
|
||||||
sizeof(struct ext4_extent_header)))
|
sizeof(struct ext4_extent_header)))
|
||||||
#define EXT_HAS_FREE_INDEX(__path__) \
|
#define EXT_HAS_FREE_INDEX(__path__) \
|
||||||
(le16_to_cpu((__path__)->p_hdr->eh_entries) \
|
(le16_to_cpu((__path__)->p_hdr->eh_entries) \
|
||||||
< le16_to_cpu((__path__)->p_hdr->eh_max))
|
< le16_to_cpu((__path__)->p_hdr->eh_max))
|
||||||
#define EXT_LAST_EXTENT(__hdr__) \
|
#define EXT_LAST_EXTENT(__hdr__) \
|
||||||
(EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
|
(EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
|
||||||
#define EXT_LAST_INDEX(__hdr__) \
|
#define EXT_LAST_INDEX(__hdr__) \
|
||||||
@ -190,6 +190,7 @@ ext4_ext_invalidate_cache(struct inode *inode)
|
|||||||
|
|
||||||
extern int ext4_extent_tree_init(handle_t *, struct inode *);
|
extern int ext4_extent_tree_init(handle_t *, struct inode *);
|
||||||
extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *);
|
extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *);
|
||||||
|
extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
|
||||||
extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
|
extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
|
||||||
extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
|
extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
|
||||||
extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *);
|
extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *);
|
||||||
|
@ -41,14 +41,14 @@ struct ext4_reserve_window_node {
|
|||||||
|
|
||||||
struct ext4_block_alloc_info {
|
struct ext4_block_alloc_info {
|
||||||
/* information about reservation window */
|
/* information about reservation window */
|
||||||
struct ext4_reserve_window_node rsv_window_node;
|
struct ext4_reserve_window_node rsv_window_node;
|
||||||
/*
|
/*
|
||||||
* was i_next_alloc_block in ext4_inode_info
|
* was i_next_alloc_block in ext4_inode_info
|
||||||
* is the logical (file-relative) number of the
|
* is the logical (file-relative) number of the
|
||||||
* most-recently-allocated block in this file.
|
* most-recently-allocated block in this file.
|
||||||
* We use this for detecting linearly ascending allocation requests.
|
* We use this for detecting linearly ascending allocation requests.
|
||||||
*/
|
*/
|
||||||
__u32 last_alloc_logical_block;
|
__u32 last_alloc_logical_block;
|
||||||
/*
|
/*
|
||||||
* Was i_next_alloc_goal in ext4_inode_info
|
* Was i_next_alloc_goal in ext4_inode_info
|
||||||
* is the *physical* companion to i_next_alloc_block.
|
* is the *physical* companion to i_next_alloc_block.
|
||||||
@ -56,7 +56,7 @@ struct ext4_block_alloc_info {
|
|||||||
* allocated to this file. This give us the goal (target) for the next
|
* allocated to this file. This give us the goal (target) for the next
|
||||||
* allocation when we detect linearly ascending requests.
|
* allocation when we detect linearly ascending requests.
|
||||||
*/
|
*/
|
||||||
ext4_fsblk_t last_alloc_physical_block;
|
ext4_fsblk_t last_alloc_physical_block;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define rsv_start rsv_window._rsv_start
|
#define rsv_start rsv_window._rsv_start
|
||||||
|
Loading…
Reference in New Issue
Block a user