Merge branch 'xfs-4.10-misc-fixes-1' into for-next

This commit is contained in:
Dave Chinner 2016-11-10 10:29:43 +11:00
commit 0fc204e2eb
12 changed files with 111 additions and 122 deletions

View File

@ -515,7 +515,7 @@ xfs_bmap_trace_exlist(
state |= BMAP_ATTRFORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
ASSERT(cnt == xfs_iext_count(ifp));
for (idx = 0; idx < cnt; idx++)
trace_xfs_extlist(ip, idx, whichfork, caller_ip);
}
@ -811,7 +811,7 @@ try_another_ag:
XFS_BTREE_LONG_PTRS);
arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
for (cnt = i = 0; i < nextents; i++) {
ep = xfs_iext_get_ext(ifp, i);
if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
@ -1296,7 +1296,7 @@ xfs_bmap_read_extents(
/*
* Here with bp and block set to the leftmost leaf node in the tree.
*/
room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
room = xfs_iext_count(ifp);
i = 0;
/*
* Loop over all leaf nodes. Copy information to the extent records.
@ -1361,7 +1361,7 @@ xfs_bmap_read_extents(
return error;
block = XFS_BUF_TO_BLOCK(bp);
}
ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
ASSERT(i == xfs_iext_count(ifp));
ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
return 0;
@ -1404,7 +1404,7 @@ xfs_bmap_search_multi_extents(
if (lastx > 0) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
}
if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
if (lastx < xfs_iext_count(ifp)) {
xfs_bmbt_get_all(ep, gotp);
*eofp = 0;
} else {
@ -1497,7 +1497,7 @@ xfs_bmap_first_unused(
(error = xfs_iread_extents(tp, ip, whichfork)))
return error;
lowest = *first_unused;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
off = xfs_bmbt_get_startoff(ep);
@ -1582,7 +1582,7 @@ xfs_bmap_last_extent(
return error;
}
nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
if (nextents == 0) {
*is_empty = 1;
return 0;
@ -1735,7 +1735,7 @@ xfs_bmap_add_extent_delay_real(
&bma->ip->i_d.di_nextents);
ASSERT(bma->idx >= 0);
ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
ASSERT(bma->idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
@ -1794,7 +1794,7 @@ xfs_bmap_add_extent_delay_real(
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
if (bma->idx < xfs_iext_count(ifp) - 1) {
state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
@ -2300,7 +2300,7 @@ xfs_bmap_add_extent_unwritten_real(
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
ASSERT(*idx >= 0);
ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
XFS_STATS_INC(mp, xs_add_exlist);
@ -2356,7 +2356,7 @@ xfs_bmap_add_extent_unwritten_real(
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
if (*idx < xfs_iext_count(&ip->i_df) - 1) {
state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
if (isnullstartblock(RIGHT.br_startblock))
@ -2836,7 +2836,7 @@ xfs_bmap_add_extent_hole_delay(
* Check and set flags if the current (right) segment exists.
* If it doesn't exist, we're converting the hole at end-of-file.
*/
if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
if (*idx < xfs_iext_count(ifp)) {
state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
@ -2966,7 +2966,7 @@ xfs_bmap_add_extent_hole_real(
ifp = XFS_IFORK_PTR(bma->ip, whichfork);
ASSERT(bma->idx >= 0);
ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
ASSERT(bma->idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
!(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
@ -2992,7 +2992,7 @@ xfs_bmap_add_extent_hole_real(
* Check and set flags if this segment has a current value.
* Not true if we're inserting into the "hole" at eof.
*/
if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
if (bma->idx < xfs_iext_count(ifp)) {
state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
if (isnullstartblock(right.br_startblock))
@ -4221,7 +4221,7 @@ xfs_bmapi_read(
break;
/* Else go on to the next record. */
if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
if (++lastx < xfs_iext_count(ifp))
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
else
eof = 1;
@ -4733,7 +4733,7 @@ xfs_bmapi_write(
/* Else go on to the next record. */
bma.prev = bma.got;
if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
if (++bma.idx < xfs_iext_count(ifp)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
&bma.got);
} else
@ -4885,7 +4885,7 @@ xfs_bmap_del_extent_delay(
da_new = 0;
ASSERT(*idx >= 0);
ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(del->br_blockcount > 0);
ASSERT(got->br_startoff <= del->br_startoff);
ASSERT(got_endoff >= del_endoff);
@ -5016,7 +5016,7 @@ xfs_bmap_del_extent_cow(
got_endoff = got->br_startoff + got->br_blockcount;
ASSERT(*idx >= 0);
ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(del->br_blockcount > 0);
ASSERT(got->br_startoff <= del->br_startoff);
ASSERT(got_endoff >= del_endoff);
@ -5122,8 +5122,7 @@ xfs_bmap_del_extent(
state |= BMAP_COWFORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
(uint)sizeof(xfs_bmbt_rec_t)));
ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
ASSERT(del->br_blockcount > 0);
ep = xfs_iext_get_ext(ifp, *idx);
xfs_bmbt_get_all(ep, &got);
@ -5448,7 +5447,6 @@ __xfs_bunmapi(
int logflags; /* transaction logging flags */
xfs_extlen_t mod; /* rt extent offset */
xfs_mount_t *mp; /* mount structure */
xfs_extnum_t nextents; /* number of file extents */
xfs_bmbt_irec_t prev; /* previous extent record */
xfs_fileoff_t start; /* first file offset deleted */
int tmp_logflags; /* partial logging flags */
@ -5480,8 +5478,7 @@ __xfs_bunmapi(
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
(error = xfs_iread_extents(tp, ip, whichfork)))
return error;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
if (nextents == 0) {
if (xfs_iext_count(ifp) == 0) {
*rlen = 0;
return 0;
}
@ -5966,7 +5963,7 @@ xfs_bmse_shift_one(
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
total_extents = xfs_iext_count(ifp);
xfs_bmbt_get_all(gotp, &got);
@ -6143,7 +6140,7 @@ xfs_bmap_shift_extents(
* are collapsing out, so we cannot use the count of real extents here.
* Instead we have to calculate it from the incore fork.
*/
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
total_extents = xfs_iext_count(ifp);
if (total_extents == 0) {
*done = 1;
goto del_cursor;
@ -6203,7 +6200,7 @@ xfs_bmap_shift_extents(
* count can change. Update the total and grade the next record.
*/
if (direction == SHIFT_LEFT) {
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
total_extents = xfs_iext_count(ifp);
stop_extent = total_extents;
}

View File

@ -775,6 +775,13 @@ xfs_idestroy_fork(
}
}
/* Count number of incore extents based on if_bytes */
xfs_extnum_t
xfs_iext_count(struct xfs_ifork *ifp)
{
return ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
}
/*
* Convert in-core extents to on-disk form
*
@ -803,7 +810,7 @@ xfs_iextents_copy(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(ifp->if_bytes > 0);
nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nrecs = xfs_iext_count(ifp);
XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
ASSERT(nrecs > 0);
@ -941,7 +948,7 @@ xfs_iext_get_ext(
xfs_extnum_t idx) /* index of target extent */
{
ASSERT(idx >= 0);
ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
ASSERT(idx < xfs_iext_count(ifp));
if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
return ifp->if_u1.if_ext_irec->er_extbuf;
@ -1017,7 +1024,7 @@ xfs_iext_add(
int new_size; /* size of extents after adding */
xfs_extnum_t nextents; /* number of extents in file */
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
ASSERT((idx >= 0) && (idx <= nextents));
byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
new_size = ifp->if_bytes + byte_diff;
@ -1241,7 +1248,7 @@ xfs_iext_remove(
trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
ASSERT(ext_diff > 0);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
if (new_size == 0) {
@ -1270,7 +1277,7 @@ xfs_iext_remove_inline(
ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
ASSERT(idx < XFS_INLINE_EXTS);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
ASSERT(((nextents - ext_diff) > 0) &&
(nextents - ext_diff) < XFS_INLINE_EXTS);
@ -1309,7 +1316,7 @@ xfs_iext_remove_direct(
ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
new_size = ifp->if_bytes -
(ext_diff * sizeof(xfs_bmbt_rec_t));
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
if (new_size == 0) {
xfs_iext_destroy(ifp);
@ -1546,7 +1553,7 @@ xfs_iext_indirect_to_direct(
int size; /* size of file extents */
ASSERT(ifp->if_flags & XFS_IFEXTIREC);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
ASSERT(nextents <= XFS_LINEAR_EXTS);
size = nextents * sizeof(xfs_bmbt_rec_t);
@ -1620,7 +1627,7 @@ xfs_iext_bno_to_ext(
xfs_extnum_t nextents; /* number of file extents */
xfs_fileoff_t startoff = 0; /* start offset of extent */
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
if (nextents == 0) {
*idxp = 0;
return NULL;
@ -1733,8 +1740,8 @@ xfs_iext_idx_to_irec(
ASSERT(ifp->if_flags & XFS_IFEXTIREC);
ASSERT(page_idx >= 0);
ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
ASSERT(page_idx <= xfs_iext_count(ifp));
ASSERT(page_idx < xfs_iext_count(ifp) || realloc);
nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
erp_idx = 0;
@ -1782,7 +1789,7 @@ xfs_iext_irec_init(
xfs_extnum_t nextents; /* number of extents in file */
ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
ASSERT(nextents <= XFS_LINEAR_EXTS);
erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
@ -1906,7 +1913,7 @@ xfs_iext_irec_compact(
ASSERT(ifp->if_flags & XFS_IFEXTIREC);
nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
if (nextents == 0) {
xfs_iext_destroy(ifp);

View File

@ -152,6 +152,7 @@ void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
struct xfs_bmbt_rec_host *
xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *);
void xfs_iext_insert(struct xfs_inode *, xfs_extnum_t, xfs_extnum_t,
struct xfs_bmbt_irec *, int);
void xfs_iext_add(struct xfs_ifork *, xfs_extnum_t, int);

View File

@ -1360,6 +1360,26 @@ __xfs_get_blocks(
if (error)
goto out_unlock;
/*
* The only time we can ever safely find delalloc blocks on direct I/O
* is a dio write to post-eof speculative preallocation. All other
* scenarios are indicative of a problem or misuse (such as mixing
* direct and mapped I/O).
*
* The file may be unmapped by the time we get here so we cannot
* reliably fail the I/O based on mapping. Instead, fail the I/O if this
* is a read or a write within eof. Otherwise, carry on but warn as a
* precuation if the file happens to be mapped.
*/
if (direct && imap.br_startblock == DELAYSTARTBLOCK) {
if (!create || offset < i_size_read(VFS_I(ip))) {
WARN_ON_ONCE(1);
error = -EIO;
goto out_unlock;
}
WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping));
}
/* for DAX, we convert unwritten extents directly */
if (create &&
(!nimaps ||
@ -1444,8 +1464,6 @@ __xfs_get_blocks(
(new || ISUNWRITTEN(&imap))))
set_buffer_new(bh_result);
BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
return 0;
out_unlock:

View File

@ -359,9 +359,7 @@ xfs_bmap_count_blocks(
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
xfs_bmap_count_leaves(ifp, 0,
ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
count);
xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
return 0;
}
@ -426,7 +424,7 @@ xfs_getbmapx_fix_eof_hole(
ifp = XFS_IFORK_PTR(ip, whichfork);
if (!moretocome &&
xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
(lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
(lastx == xfs_iext_count(ifp) - 1))
out->bmv_oflags |= BMV_OF_LAST;
}
@ -1792,6 +1790,7 @@ xfs_swap_extent_forks(
struct xfs_ifork tempifp, *ifp, *tifp;
int aforkblks = 0;
int taforkblks = 0;
xfs_extnum_t nextents;
__uint64_t tmp;
int error;
@ -1877,14 +1876,13 @@ xfs_swap_extent_forks(
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
/* If the extents fit in the inode, fix the
* pointer. Otherwise it's already NULL or
* pointing to the extent.
/*
* If the extents fit in the inode, fix the pointer. Otherwise
* it's already NULL or pointing to the extent.
*/
if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
ifp->if_u1.if_extents =
ifp->if_u2.if_inline_ext;
}
nextents = xfs_iext_count(&ip->i_df);
if (nextents <= XFS_INLINE_EXTS)
ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
@ -1896,14 +1894,13 @@ xfs_swap_extent_forks(
switch (tip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
/* If the extents fit in the inode, fix the
* pointer. Otherwise it's already NULL or
* pointing to the extent.
/*
* If the extents fit in the inode, fix the pointer. Otherwise
* it's already NULL or pointing to the extent.
*/
if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
tifp->if_u1.if_extents =
tifp->if_u2.if_inline_ext;
}
nextents = xfs_iext_count(&tip->i_df);
if (nextents <= XFS_INLINE_EXTS)
tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
(*target_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:

View File

@ -123,7 +123,6 @@ __xfs_inode_free(
{
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!xfs_isiflocked(ip));
XFS_STATS_DEC(ip->i_mount, vn_active);
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
@ -133,6 +132,8 @@ void
xfs_inode_free(
struct xfs_inode *ip)
{
ASSERT(!xfs_isiflocked(ip));
/*
* Because we use RCU freeing we need to ensure the inode always
* appears to be reclaimed with an invalid inode number when in the
@ -981,6 +982,7 @@ restart:
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip);
/* xfs_iflush_abort() drops the flush lock */
xfs_iflush_abort(ip, false);
goto reclaim;
}
@ -989,10 +991,10 @@ restart:
goto out_ifunlock;
xfs_iunpin_wait(ip);
}
if (xfs_iflags_test(ip, XFS_ISTALE))
goto reclaim;
if (xfs_inode_clean(ip))
if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
goto reclaim;
}
/*
* Never flush out dirty data during non-blocking reclaim, as it would
@ -1030,25 +1032,24 @@ restart:
xfs_buf_relse(bp);
}
xfs_iflock(ip);
reclaim:
ASSERT(!xfs_isiflocked(ip));
/*
* Because we use RCU freeing we need to ensure the inode always appears
* to be reclaimed with an invalid inode number when in the free state.
* We do this as early as possible under the ILOCK and flush lock so
* that xfs_iflush_cluster() can be guaranteed to detect races with us
* here. By doing this, we guarantee that once xfs_iflush_cluster has
* locked both the XFS_ILOCK and the flush lock that it will see either
* a valid, flushable inode that will serialise correctly against the
* locks below, or it will see a clean (and invalid) inode that it can
* skip.
* We do this as early as possible under the ILOCK so that
* xfs_iflush_cluster() can be guaranteed to detect races with us here.
* By doing this, we guarantee that once xfs_iflush_cluster has locked
* XFS_ILOCK that it will see either a valid, flushable inode that will
* serialise correctly, or it will see a clean (and invalid) inode that
* it can skip.
*/
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock);
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
@ -1580,10 +1581,15 @@ xfs_inode_free_cowblocks(
struct xfs_eofblocks *eofb = args;
bool need_iolock = true;
int match;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
if (!xfs_reflink_has_real_cow_blocks(ip)) {
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
*/
if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
trace_xfs_inode_free_cowblocks_invalid(ip);
xfs_inode_clear_cowblocks_tag(ip);
return 0;

View File

@ -246,6 +246,11 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
* Synchronize processes attempting to flush the in-core inode back to disk.
*/
static inline int xfs_isiflocked(struct xfs_inode *ip)
{
return xfs_iflags_test(ip, XFS_IFLOCK);
}
extern void __xfs_iflock(struct xfs_inode *ip);
static inline int xfs_iflock_nowait(struct xfs_inode *ip)
@ -261,16 +266,12 @@ static inline void xfs_iflock(struct xfs_inode *ip)
static inline void xfs_ifunlock(struct xfs_inode *ip)
{
ASSERT(xfs_isiflocked(ip));
xfs_iflags_clear(ip, XFS_IFLOCK);
smp_mb();
wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT);
}
static inline int xfs_isiflocked(struct xfs_inode *ip)
{
return xfs_iflags_test(ip, XFS_IFLOCK);
}
/*
* Flags for inode locking.
* Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)

View File

@ -164,7 +164,7 @@ xfs_inode_item_format_data_fork(
struct xfs_bmbt_rec *p;
ASSERT(ip->i_df.if_u1.if_extents != NULL);
ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0);
ASSERT(xfs_iext_count(&ip->i_df) > 0);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK);
@ -261,7 +261,7 @@ xfs_inode_item_format_attr_fork(
ip->i_afp->if_bytes > 0) {
struct xfs_bmbt_rec *p;
ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) ==
ASSERT(xfs_iext_count(ip->i_afp) ==
ip->i_d.di_anextents);
ASSERT(ip->i_afp->if_u1.if_extents != NULL);

View File

@ -910,16 +910,14 @@ xfs_ioc_fsgetxattr(
if (attr) {
if (ip->i_afp) {
if (ip->i_afp->if_flags & XFS_IFEXTENTS)
fa.fsx_nextents = ip->i_afp->if_bytes /
sizeof(xfs_bmbt_rec_t);
fa.fsx_nextents = xfs_iext_count(ip->i_afp);
else
fa.fsx_nextents = ip->i_d.di_anextents;
} else
fa.fsx_nextents = 0;
} else {
if (ip->i_df.if_flags & XFS_IFEXTENTS)
fa.fsx_nextents = ip->i_df.if_bytes /
sizeof(xfs_bmbt_rec_t);
fa.fsx_nextents = xfs_iext_count(&ip->i_df);
else
fa.fsx_nextents = ip->i_d.di_nextents;
}

View File

@ -1135,7 +1135,7 @@ xfs_qm_get_rtblks(
return error;
}
rtblks = 0;
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
nextents = xfs_iext_count(ifp);
for (idx = 0; idx < nextents; idx++)
rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
*O_rtblks = (xfs_qcnt_t)rtblks;

View File

@ -486,7 +486,7 @@ xfs_reflink_trim_irec_to_next_cow(
/* This is the extent before; try sliding up one. */
if (irec.br_startoff < offset_fsb) {
idx++;
if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
if (idx >= xfs_iext_count(ifp))
return 0;
gotp = xfs_iext_get_ext(ifp, idx);
xfs_bmbt_get_all(gotp, &irec);
@ -566,7 +566,7 @@ xfs_reflink_cancel_cow_blocks(
xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
}
if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec))
if (++idx >= xfs_iext_count(ifp))
break;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
}
@ -1697,37 +1697,3 @@ out:
trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
return error;
}
/*
* Does this inode have any real CoW reservations?
*/
bool
xfs_reflink_has_real_cow_blocks(
struct xfs_inode *ip)
{
struct xfs_bmbt_irec irec;
struct xfs_ifork *ifp;
struct xfs_bmbt_rec_host *gotp;
xfs_extnum_t idx;
if (!xfs_is_reflink_inode(ip))
return false;
/* Go find the old extent in the CoW fork. */
ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
gotp = xfs_iext_bno_to_ext(ifp, 0, &idx);
while (gotp) {
xfs_bmbt_get_all(gotp, &irec);
if (!isnullstartblock(irec.br_startblock))
return true;
/* Roll on... */
idx++;
if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
break;
gotp = xfs_iext_get_ext(ifp, idx);
}
return false;
}

View File

@ -50,6 +50,4 @@ extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len);
extern bool xfs_reflink_has_real_cow_blocks(struct xfs_inode *ip);
#endif /* __XFS_REFLINK_H */