mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
Miscellaneous ext4 bug fixes (all stable fodder)
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAl5IrGgACgkQ8vlZVpUN gaMi2Qf+NuXFQ6xpxjw90ZmkOXbsqbFVAIx3yjzW5WKIVyHrhG19P8NuNaCbUajP WEUzrY/LMmcODd46zKcCR61jOygKCJ5NXyF8qTEFltiMuqugvwmTv49ofRxX+Zlf YkavQGo+cPs5p1xzo+bKqhA29zhc4PApd3bGpjgUqDlfQxBmm2vNpkbXKj/Bk6CL k9c1EodygpTO45Kb3Y/JvPcbTLOOu0hPnrCemtb1Vc9tm0j6I+g5RtQofUag+1BX FOR5Z2EUwByb7F+TTxO1Jse0oFjcxH9J/VqeKi2Fbzucdq3NrRypiFj0XPFBRPFn g7ONbaoIGiK+02pCNFuwbY0WmLQVfg== =T3j1 -----END PGP SIGNATURE----- Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4 Pull ext4 fixes from Ted Ts'o: "Miscellaneous ext4 bug fixes (all stable fodder)" * tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: improve explanation of a mount failure caused by a misconfigured kernel jbd2: do not clear the BH_Mapped flag when forgetting a metadata buffer jbd2: move the clearing of b_modified flag to the journal_unmap_buffer() ext4: add cond_resched() to ext4_protect_reserved_inode ext4: fix checksum errors with indexed dirs ext4: fix support for inode sizes > 1024 bytes ext4: simplify checking quota limits in ext4_statfs() ext4: don't assume that mmp_nodename/bdevname have NUL
This commit is contained in:
commit
8a8b80967b
@ -207,6 +207,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
|
||||
return PTR_ERR(inode);
|
||||
num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
|
||||
while (i < num) {
|
||||
cond_resched();
|
||||
map.m_lblk = i;
|
||||
map.m_len = num - i;
|
||||
n = ext4_map_blocks(NULL, inode, &map, 0);
|
||||
|
@ -129,12 +129,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
|
||||
if (err != ERR_BAD_DX_DIR) {
|
||||
return err;
|
||||
}
|
||||
/*
|
||||
* We don't set the inode dirty flag since it's not
|
||||
* critical that it get flushed back to the disk.
|
||||
*/
|
||||
ext4_clear_inode_flag(file_inode(file),
|
||||
EXT4_INODE_INDEX);
|
||||
/* Can we just clear INDEX flag to ignore htree information? */
|
||||
if (!ext4_has_metadata_csum(sb)) {
|
||||
/*
|
||||
* We don't set the inode dirty flag since it's not
|
||||
* critical that it gets flushed back to the disk.
|
||||
*/
|
||||
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
if (ext4_has_inline_data(inode)) {
|
||||
|
@ -2544,8 +2544,11 @@ void ext4_insert_dentry(struct inode *inode,
|
||||
struct ext4_filename *fname);
|
||||
static inline void ext4_update_dx_flag(struct inode *inode)
|
||||
{
|
||||
if (!ext4_has_feature_dir_index(inode->i_sb))
|
||||
if (!ext4_has_feature_dir_index(inode->i_sb)) {
|
||||
/* ext4_iget() should have caught this... */
|
||||
WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
|
||||
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
|
||||
}
|
||||
}
|
||||
static const unsigned char ext4_filetype_table[] = {
|
||||
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
|
||||
|
@ -4644,6 +4644,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
||||
ret = -EFSCORRUPTED;
|
||||
goto bad_inode;
|
||||
}
|
||||
/*
|
||||
* If dir_index is not enabled but there's dir with INDEX flag set,
|
||||
* we'd normally treat htree data as empty space. But with metadata
|
||||
* checksumming that corrupts checksums so forbid that.
|
||||
*/
|
||||
if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
|
||||
ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
|
||||
ext4_error_inode(inode, function, line, 0,
|
||||
"iget: Dir with htree data on filesystem without dir_index feature.");
|
||||
ret = -EFSCORRUPTED;
|
||||
goto bad_inode;
|
||||
}
|
||||
ei->i_disksize = inode->i_size;
|
||||
#ifdef CONFIG_QUOTA
|
||||
ei->i_reserved_quota = 0;
|
||||
|
@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
|
||||
{
|
||||
__ext4_warning(sb, function, line, "%s", msg);
|
||||
__ext4_warning(sb, function, line,
|
||||
"MMP failure info: last update time: %llu, last update "
|
||||
"node: %s, last update device: %s",
|
||||
(long long unsigned int) le64_to_cpu(mmp->mmp_time),
|
||||
mmp->mmp_nodename, mmp->mmp_bdevname);
|
||||
"MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
|
||||
(unsigned long long)le64_to_cpu(mmp->mmp_time),
|
||||
(int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
|
||||
(int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -154,6 +154,7 @@ static int kmmpd(void *data)
|
||||
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
|
||||
EXT4_MMP_MIN_CHECK_INTERVAL);
|
||||
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
|
||||
BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
|
||||
bdevname(bh->b_bdev, mmp->mmp_bdevname);
|
||||
|
||||
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
|
||||
@ -379,7 +380,8 @@ skip:
|
||||
/*
|
||||
* Start a kernel thread to update the MMP block periodically.
|
||||
*/
|
||||
EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
|
||||
EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
|
||||
(int)sizeof(mmp->mmp_bdevname),
|
||||
bdevname(bh->b_bdev,
|
||||
mmp->mmp_bdevname));
|
||||
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
|
||||
|
@ -2213,6 +2213,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
|
||||
retval = ext4_dx_add_entry(handle, &fname, dir, inode);
|
||||
if (!retval || (retval != ERR_BAD_DX_DIR))
|
||||
goto out;
|
||||
/* Can we just ignore htree data? */
|
||||
if (ext4_has_metadata_csum(sb)) {
|
||||
EXT4_ERROR_INODE(dir,
|
||||
"Directory has corrupted htree index.");
|
||||
retval = -EFSCORRUPTED;
|
||||
goto out;
|
||||
}
|
||||
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
|
||||
dx_fallback++;
|
||||
ext4_mark_inode_dirty(handle, dir);
|
||||
|
@ -3009,17 +3009,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_QUOTA
|
||||
if (ext4_has_feature_quota(sb) && !readonly) {
|
||||
#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
|
||||
if (!readonly && (ext4_has_feature_quota(sb) ||
|
||||
ext4_has_feature_project(sb))) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Filesystem with quota feature cannot be mounted RDWR "
|
||||
"without CONFIG_QUOTA");
|
||||
return 0;
|
||||
}
|
||||
if (ext4_has_feature_project(sb) && !readonly) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Filesystem with project quota feature cannot be mounted RDWR "
|
||||
"without CONFIG_QUOTA");
|
||||
"The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_QUOTA */
|
||||
@ -3814,6 +3808,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
*/
|
||||
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
|
||||
|
||||
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
|
||||
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
|
||||
blocksize > EXT4_MAX_BLOCK_SIZE) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Unsupported filesystem blocksize %d (%d log_block_size)",
|
||||
blocksize, le32_to_cpu(es->s_log_block_size));
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
|
||||
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
|
||||
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
|
||||
@ -3831,6 +3834,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"unsupported inode size: %d",
|
||||
sbi->s_inode_size);
|
||||
ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
|
||||
goto failed_mount;
|
||||
}
|
||||
/*
|
||||
@ -4033,14 +4037,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
|
||||
goto failed_mount;
|
||||
|
||||
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
|
||||
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
|
||||
blocksize > EXT4_MAX_BLOCK_SIZE) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"Unsupported filesystem blocksize %d (%d log_block_size)",
|
||||
blocksize, le32_to_cpu(es->s_log_block_size));
|
||||
goto failed_mount;
|
||||
}
|
||||
if (le32_to_cpu(es->s_log_block_size) >
|
||||
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
@ -5585,10 +5581,7 @@ static int ext4_statfs_project(struct super_block *sb,
|
||||
return PTR_ERR(dquot);
|
||||
spin_lock(&dquot->dq_dqb_lock);
|
||||
|
||||
limit = 0;
|
||||
if (dquot->dq_dqb.dqb_bsoftlimit &&
|
||||
(!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
|
||||
limit = dquot->dq_dqb.dqb_bsoftlimit;
|
||||
limit = dquot->dq_dqb.dqb_bsoftlimit;
|
||||
if (dquot->dq_dqb.dqb_bhardlimit &&
|
||||
(!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
|
||||
limit = dquot->dq_dqb.dqb_bhardlimit;
|
||||
@ -5603,10 +5596,7 @@ static int ext4_statfs_project(struct super_block *sb,
|
||||
(buf->f_blocks - curblock) : 0;
|
||||
}
|
||||
|
||||
limit = 0;
|
||||
if (dquot->dq_dqb.dqb_isoftlimit &&
|
||||
(!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
|
||||
limit = dquot->dq_dqb.dqb_isoftlimit;
|
||||
limit = dquot->dq_dqb.dqb_isoftlimit;
|
||||
if (dquot->dq_dqb.dqb_ihardlimit &&
|
||||
(!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
|
||||
limit = dquot->dq_dqb.dqb_ihardlimit;
|
||||
|
@ -976,29 +976,33 @@ restart_loop:
|
||||
* it. */
|
||||
|
||||
/*
|
||||
* A buffer which has been freed while still being journaled by
|
||||
* a previous transaction.
|
||||
*/
|
||||
if (buffer_freed(bh)) {
|
||||
* A buffer which has been freed while still being journaled
|
||||
* by a previous transaction, refile the buffer to BJ_Forget of
|
||||
* the running transaction. If the just committed transaction
|
||||
* contains "add to orphan" operation, we can completely
|
||||
* invalidate the buffer now. We are rather through in that
|
||||
* since the buffer may be still accessible when blocksize <
|
||||
* pagesize and it is attached to the last partial page.
|
||||
*/
|
||||
if (buffer_freed(bh) && !jh->b_next_transaction) {
|
||||
struct address_space *mapping;
|
||||
|
||||
clear_buffer_freed(bh);
|
||||
clear_buffer_jbddirty(bh);
|
||||
|
||||
/*
|
||||
* If the running transaction is the one containing
|
||||
* "add to orphan" operation (b_next_transaction !=
|
||||
* NULL), we have to wait for that transaction to
|
||||
* commit before we can really get rid of the buffer.
|
||||
* So just clear b_modified to not confuse transaction
|
||||
* credit accounting and refile the buffer to
|
||||
* BJ_Forget of the running transaction. If the just
|
||||
* committed transaction contains "add to orphan"
|
||||
* operation, we can completely invalidate the buffer
|
||||
* now. We are rather through in that since the
|
||||
* buffer may be still accessible when blocksize <
|
||||
* pagesize and it is attached to the last partial
|
||||
* page.
|
||||
* Block device buffers need to stay mapped all the
|
||||
* time, so it is enough to clear buffer_jbddirty and
|
||||
* buffer_freed bits. For the file mapping buffers (i.e.
|
||||
* journalled data) we need to unmap buffer and clear
|
||||
* more bits. We also need to be careful about the check
|
||||
* because the data page mapping can get cleared under
|
||||
* out hands, which alse need not to clear more bits
|
||||
* because the page and buffers will be freed and can
|
||||
* never be reused once we are done with them.
|
||||
*/
|
||||
jh->b_modified = 0;
|
||||
if (!jh->b_next_transaction) {
|
||||
clear_buffer_freed(bh);
|
||||
clear_buffer_jbddirty(bh);
|
||||
mapping = READ_ONCE(bh->b_page->mapping);
|
||||
if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
|
||||
clear_buffer_mapped(bh);
|
||||
clear_buffer_new(bh);
|
||||
clear_buffer_req(bh);
|
||||
|
@ -2329,14 +2329,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
|
||||
return -EBUSY;
|
||||
}
|
||||
/*
|
||||
* OK, buffer won't be reachable after truncate. We just set
|
||||
* j_next_transaction to the running transaction (if there is
|
||||
* one) and mark buffer as freed so that commit code knows it
|
||||
* should clear dirty bits when it is done with the buffer.
|
||||
* OK, buffer won't be reachable after truncate. We just clear
|
||||
* b_modified to not confuse transaction credit accounting, and
|
||||
* set j_next_transaction to the running transaction (if there
|
||||
* is one) and mark buffer as freed so that commit code knows
|
||||
* it should clear dirty bits when it is done with the buffer.
|
||||
*/
|
||||
set_buffer_freed(bh);
|
||||
if (journal->j_running_transaction && buffer_jbddirty(bh))
|
||||
jh->b_next_transaction = journal->j_running_transaction;
|
||||
jh->b_modified = 0;
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
spin_unlock(&jh->b_state_lock);
|
||||
write_unlock(&journal->j_state_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user