Changes since last update:

- Fix a bunch of static checker complaints about uninitialized variables
   and insufficient range checks.
 - Avoid a crash when incore extent map data are corrupt.
 - Disallow FITRIM when we haven't recovered the log and know the
   metadata are stale.
 - Fix a data corruption when doing unaligned overlapping dio writes.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAlyaSCwACgkQ+H93GTRK
 tOsBSRAAoD6npxZjzApGk7y0y2d+8+/f3BBXdyOHhzg8G/VTcVW+ZQsVibeXEYYm
 d02iu3RCQ3AsJVN3Z2FUgAUkf+2duS6QWJH6hL29+fn9aeHb8CYDtlZU9uW6Mf2K
 DKuWR3v3aesXEKzL8DVbJa825UWy3fyfggQWvRUvMD+uO/Td2gZEpUSQeBLAUFMZ
 4Yj0q1zjWVfi3lcsQDY+gsL3+8hGBD4YldyoX8eUCI78/WMeXzwP4WECNnSBfmM7
 Ke63AniGKeAkAMX0PtwiOTITjD6c2Msa9jbriSdUSkX1xnnq5CDbqQHJ7sEefyYT
 ff8INci0hL/8kZx63CjrpNZQ5hB5+rIusz2tScmJ/hBnGtAMLg8Duq98ZmQSlSOy
 fVV1L+roDGRHO+SEaF4xko2dwMu4iSJmGW50PrXjCJdCgZ7tBaL87k5GQ/W1A0KX
 EFje3OPBbGYKHdPdk0TqRoIs2qgOuAYERlLZWcgLLscnOp7XwhgSrvwThV7I7TNB
 eu8+xEH7H3V+BHa+OuLgLDFklj1UhyQR8DLKXs/j+DyhD1f5xh6sXVnVhNAZdhbU
 OLlgjKT9BkfIsNOgWcjg9SO2EoU/Oi3InDkNz8mSebFpixEG+bvXyguzB+Y2IgA8
 8btKHyLOnxJJ1Zb4dnZLFgVWV3QMUip4AlFBXSkzOefDznjGPms=
 =iNqS
 -----END PGP SIGNATURE-----

Merge tag 'xfs-5.1-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:
 "Here are a few fixes for some corruption bugs and uninitialized
  variable problems. The few patches here have gone through a few days
  worth of fstest runs with no new problems observed.

  Changes since last update:

   - Fix a bunch of static checker complaints about uninitialized
     variables and insufficient range checks.

   - Avoid a crash when incore extent map data are corrupt.

   - Disallow FITRIM when we haven't recovered the log and know the
     metadata are stale.

   - Fix a data corruption when doing unaligned overlapping dio writes"

* tag 'xfs-5.1-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: serialize unaligned dio writes against all other dio writes
  xfs: prohibit fstrim in norecovery mode
  xfs: always init bma in xfs_bmapi_write
  xfs: fix btree scrub checking with regards to root-in-inode
  xfs: dabtree scrub needs to range-check level
  xfs: don't trip over uninitialized buffer on extent read of corrupted inode
This commit is contained in:
Linus Torvalds 2019-03-29 14:36:57 -07:00
commit c6503f12d1
5 changed files with 49 additions and 17 deletions

View File

@ -1191,7 +1191,10 @@ xfs_iread_extents(
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
*/
level = be16_to_cpu(block->bb_level);
ASSERT(level > 0);
if (unlikely(level == 0)) {
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
bno = be64_to_cpu(*pp);
@ -4249,9 +4252,13 @@ xfs_bmapi_write(
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap) /* i/o: mval size/count */
{
struct xfs_bmalloca bma = {
.tp = tp,
.ip = ip,
.total = total,
};
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
xfs_fileoff_t end; /* end of mapped file region */
bool eof = false; /* after the end of extents */
int error; /* error return */
@ -4319,10 +4326,6 @@ xfs_bmapi_write(
eof = true;
if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
bma.prev.br_startoff = NULLFILEOFF;
bma.tp = tp;
bma.ip = ip;
bma.total = total;
bma.datatype = 0;
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
n = 0;

View File

@ -415,8 +415,17 @@ xchk_btree_check_owner(
struct xfs_btree_cur *cur = bs->cur;
struct check_owner *co;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
/*
* In theory, xfs_btree_get_block should only give us a null buffer
* pointer for the root of a root-in-inode btree type, but we need
* to check defensively here in case the cursor state is also screwed
* up.
*/
if (bp == NULL) {
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
/*
* We want to cross-reference each btree block with the bnobt

View File

@ -574,6 +574,11 @@ xchk_da_btree(
/* Drill another level deeper. */
blkno = be32_to_cpu(key->before);
level++;
if (level >= XFS_DA_NODE_MAXDEPTH) {
/* Too deep! */
xchk_da_set_corrupt(&ds, level - 1);
break;
}
ds.tree_level--;
error = xchk_da_btree_block(&ds, level, blkno);
if (error)

View File

@ -161,6 +161,14 @@ xfs_ioc_trim(
return -EPERM;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
/*
* We haven't recovered the log, so we cannot use our bnobt-guided
* storage zapping commands.
*/
if (mp->m_flags & XFS_MOUNT_NORECOVERY)
return -EROFS;
if (copy_from_user(&range, urange, sizeof(range)))
return -EFAULT;

View File

@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
count = iov_iter_count(from);
/*
* If we are doing unaligned IO, wait for all other IO to drain,
* otherwise demote the lock if we had to take the exclusive lock
* for other reasons in xfs_file_aio_write_checks.
* If we are doing unaligned IO, we can't allow any other overlapping IO
* in-flight at the same time or we risk data corruption. Wait for all
* other IO to drain before we submit. If the IO is aligned, demote the
* iolock if we had to take the exclusive lock in
* xfs_file_aio_write_checks() for other reasons.
*/
if (unaligned_io) {
/* If we are going to wait for other DIO to finish, bail */
if (iocb->ki_flags & IOCB_NOWAIT) {
if (atomic_read(&inode->i_dio_count))
return -EAGAIN;
} else {
inode_dio_wait(inode);
}
/* unaligned dio always waits, bail */
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
inode_dio_wait(inode);
} else if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
/*
* If unaligned, this is the only IO in-flight. If it has not yet
* completed, wait on it before we release the iolock to prevent
* subsequent overlapping IO.
*/
if (ret == -EIOCBQUEUED && unaligned_io)
inode_dio_wait(inode);
out:
xfs_iunlock(ip, iolock);