Merge branch 'xfs-misc-fixes-for-3.20-3' into for-next
This commit is contained in:
commit
3fd1b0d158
@ -91,16 +91,6 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
kmem_free(const void *ptr)
|
|
||||||
{
|
|
||||||
if (!is_vmalloc_addr(ptr)) {
|
|
||||||
kfree(ptr);
|
|
||||||
} else {
|
|
||||||
vfree(ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
void *
|
||||||
kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
|
kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
|
||||||
xfs_km_flags_t flags)
|
xfs_km_flags_t flags)
|
||||||
|
@ -63,7 +63,10 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
|||||||
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
||||||
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
|
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
|
||||||
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
|
extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
|
||||||
extern void kmem_free(const void *);
|
static inline void kmem_free(const void *ptr)
|
||||||
|
{
|
||||||
|
kvfree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
||||||
|
@ -135,30 +135,22 @@ xfs_setfilesize_trans_alloc(
|
|||||||
*/
|
*/
|
||||||
STATIC int
|
STATIC int
|
||||||
xfs_setfilesize(
|
xfs_setfilesize(
|
||||||
struct xfs_ioend *ioend)
|
struct xfs_inode *ip,
|
||||||
|
struct xfs_trans *tp,
|
||||||
|
xfs_off_t offset,
|
||||||
|
size_t size)
|
||||||
{
|
{
|
||||||
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
|
||||||
struct xfs_trans *tp = ioend->io_append_trans;
|
|
||||||
xfs_fsize_t isize;
|
xfs_fsize_t isize;
|
||||||
|
|
||||||
/*
|
|
||||||
* The transaction may have been allocated in the I/O submission thread,
|
|
||||||
* thus we need to mark ourselves as beeing in a transaction manually.
|
|
||||||
* Similarly for freeze protection.
|
|
||||||
*/
|
|
||||||
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
|
|
||||||
rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
|
|
||||||
0, 1, _THIS_IP_);
|
|
||||||
|
|
||||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||||
isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
|
isize = xfs_new_eof(ip, offset + size);
|
||||||
if (!isize) {
|
if (!isize) {
|
||||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||||
xfs_trans_cancel(tp, 0);
|
xfs_trans_cancel(tp, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
|
trace_xfs_setfilesize(ip, offset, size);
|
||||||
|
|
||||||
ip->i_d.di_size = isize;
|
ip->i_d.di_size = isize;
|
||||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||||
@ -167,6 +159,25 @@ xfs_setfilesize(
|
|||||||
return xfs_trans_commit(tp, 0);
|
return xfs_trans_commit(tp, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
STATIC int
|
||||||
|
xfs_setfilesize_ioend(
|
||||||
|
struct xfs_ioend *ioend)
|
||||||
|
{
|
||||||
|
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
||||||
|
struct xfs_trans *tp = ioend->io_append_trans;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The transaction may have been allocated in the I/O submission thread,
|
||||||
|
* thus we need to mark ourselves as being in a transaction manually.
|
||||||
|
* Similarly for freeze protection.
|
||||||
|
*/
|
||||||
|
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
|
||||||
|
rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
|
||||||
|
0, 1, _THIS_IP_);
|
||||||
|
|
||||||
|
return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Schedule IO completion handling on the final put of an ioend.
|
* Schedule IO completion handling on the final put of an ioend.
|
||||||
*
|
*
|
||||||
@ -182,8 +193,7 @@ xfs_finish_ioend(
|
|||||||
|
|
||||||
if (ioend->io_type == XFS_IO_UNWRITTEN)
|
if (ioend->io_type == XFS_IO_UNWRITTEN)
|
||||||
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
|
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
|
||||||
else if (ioend->io_append_trans ||
|
else if (ioend->io_append_trans)
|
||||||
(ioend->io_isdirect && xfs_ioend_is_append(ioend)))
|
|
||||||
queue_work(mp->m_data_workqueue, &ioend->io_work);
|
queue_work(mp->m_data_workqueue, &ioend->io_work);
|
||||||
else
|
else
|
||||||
xfs_destroy_ioend(ioend);
|
xfs_destroy_ioend(ioend);
|
||||||
@ -215,22 +225,8 @@ xfs_end_io(
|
|||||||
if (ioend->io_type == XFS_IO_UNWRITTEN) {
|
if (ioend->io_type == XFS_IO_UNWRITTEN) {
|
||||||
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
|
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
|
||||||
ioend->io_size);
|
ioend->io_size);
|
||||||
} else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
|
|
||||||
/*
|
|
||||||
* For direct I/O we do not know if we need to allocate blocks
|
|
||||||
* or not so we can't preallocate an append transaction as that
|
|
||||||
* results in nested reservations and log space deadlocks. Hence
|
|
||||||
* allocate the transaction here. While this is sub-optimal and
|
|
||||||
* can block IO completion for some time, we're stuck with doing
|
|
||||||
* it this way until we can pass the ioend to the direct IO
|
|
||||||
* allocation callbacks and avoid nesting that way.
|
|
||||||
*/
|
|
||||||
error = xfs_setfilesize_trans_alloc(ioend);
|
|
||||||
if (error)
|
|
||||||
goto done;
|
|
||||||
error = xfs_setfilesize(ioend);
|
|
||||||
} else if (ioend->io_append_trans) {
|
} else if (ioend->io_append_trans) {
|
||||||
error = xfs_setfilesize(ioend);
|
error = xfs_setfilesize_ioend(ioend);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(!xfs_ioend_is_append(ioend));
|
ASSERT(!xfs_ioend_is_append(ioend));
|
||||||
}
|
}
|
||||||
@ -241,17 +237,6 @@ done:
|
|||||||
xfs_destroy_ioend(ioend);
|
xfs_destroy_ioend(ioend);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Call IO completion handling in caller context on the final put of an ioend.
|
|
||||||
*/
|
|
||||||
STATIC void
|
|
||||||
xfs_finish_ioend_sync(
|
|
||||||
struct xfs_ioend *ioend)
|
|
||||||
{
|
|
||||||
if (atomic_dec_and_test(&ioend->io_remaining))
|
|
||||||
xfs_end_io(&ioend->io_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate and initialise an IO completion structure.
|
* Allocate and initialise an IO completion structure.
|
||||||
* We need to track unwritten extent write completion here initially.
|
* We need to track unwritten extent write completion here initially.
|
||||||
@ -273,7 +258,6 @@ xfs_alloc_ioend(
|
|||||||
* all the I/O from calling the completion routine too early.
|
* all the I/O from calling the completion routine too early.
|
||||||
*/
|
*/
|
||||||
atomic_set(&ioend->io_remaining, 1);
|
atomic_set(&ioend->io_remaining, 1);
|
||||||
ioend->io_isdirect = 0;
|
|
||||||
ioend->io_error = 0;
|
ioend->io_error = 0;
|
||||||
ioend->io_list = NULL;
|
ioend->io_list = NULL;
|
||||||
ioend->io_type = type;
|
ioend->io_type = type;
|
||||||
@ -1459,11 +1443,7 @@ xfs_get_blocks_direct(
|
|||||||
*
|
*
|
||||||
* If the private argument is non-NULL __xfs_get_blocks signals us that we
|
* If the private argument is non-NULL __xfs_get_blocks signals us that we
|
||||||
* need to issue a transaction to convert the range from unwritten to written
|
* need to issue a transaction to convert the range from unwritten to written
|
||||||
* extents. In case this is regular synchronous I/O we just call xfs_end_io
|
* extents.
|
||||||
* to do this and we are done. But in case this was a successful AIO
|
|
||||||
* request this handler is called from interrupt context, from which we
|
|
||||||
* can't start transactions. In that case offload the I/O completion to
|
|
||||||
* the workqueues we also use for buffered I/O completion.
|
|
||||||
*/
|
*/
|
||||||
STATIC void
|
STATIC void
|
||||||
xfs_end_io_direct_write(
|
xfs_end_io_direct_write(
|
||||||
@ -1472,7 +1452,12 @@ xfs_end_io_direct_write(
|
|||||||
ssize_t size,
|
ssize_t size,
|
||||||
void *private)
|
void *private)
|
||||||
{
|
{
|
||||||
struct xfs_ioend *ioend = iocb->private;
|
struct inode *inode = file_inode(iocb->ki_filp);
|
||||||
|
struct xfs_inode *ip = XFS_I(inode);
|
||||||
|
struct xfs_mount *mp = ip->i_mount;
|
||||||
|
|
||||||
|
if (XFS_FORCED_SHUTDOWN(mp))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* While the generic direct I/O code updates the inode size, it does
|
* While the generic direct I/O code updates the inode size, it does
|
||||||
@ -1480,22 +1465,33 @@ xfs_end_io_direct_write(
|
|||||||
* end_io handler thinks the on-disk size is outside the in-core
|
* end_io handler thinks the on-disk size is outside the in-core
|
||||||
* size. To prevent this just update it a little bit earlier here.
|
* size. To prevent this just update it a little bit earlier here.
|
||||||
*/
|
*/
|
||||||
if (offset + size > i_size_read(ioend->io_inode))
|
if (offset + size > i_size_read(inode))
|
||||||
i_size_write(ioend->io_inode, offset + size);
|
i_size_write(inode, offset + size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* blockdev_direct_IO can return an error even after the I/O
|
* For direct I/O we do not know if we need to allocate blocks or not,
|
||||||
* completion handler was called. Thus we need to protect
|
* so we can't preallocate an append transaction, as that results in
|
||||||
* against double-freeing.
|
* nested reservations and log space deadlocks. Hence allocate the
|
||||||
|
* transaction here. While this is sub-optimal and can block IO
|
||||||
|
* completion for some time, we're stuck with doing it this way until
|
||||||
|
* we can pass the ioend to the direct IO allocation callbacks and
|
||||||
|
* avoid nesting that way.
|
||||||
*/
|
*/
|
||||||
iocb->private = NULL;
|
if (private && size > 0) {
|
||||||
|
xfs_iomap_write_unwritten(ip, offset, size);
|
||||||
|
} else if (offset + size > ip->i_d.di_size) {
|
||||||
|
struct xfs_trans *tp;
|
||||||
|
int error;
|
||||||
|
|
||||||
ioend->io_offset = offset;
|
tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
|
||||||
ioend->io_size = size;
|
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
|
||||||
if (private && size > 0)
|
if (error) {
|
||||||
ioend->io_type = XFS_IO_UNWRITTEN;
|
xfs_trans_cancel(tp, 0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
xfs_finish_ioend_sync(ioend);
|
xfs_setfilesize(ip, tp, offset, size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC ssize_t
|
STATIC ssize_t
|
||||||
@ -1507,39 +1503,16 @@ xfs_vm_direct_IO(
|
|||||||
{
|
{
|
||||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||||
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
|
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
|
||||||
struct xfs_ioend *ioend = NULL;
|
|
||||||
ssize_t ret;
|
|
||||||
|
|
||||||
if (rw & WRITE) {
|
if (rw & WRITE) {
|
||||||
size_t size = iov_iter_count(iter);
|
return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
|
||||||
|
|
||||||
/*
|
|
||||||
* We cannot preallocate a size update transaction here as we
|
|
||||||
* don't know whether allocation is necessary or not. Hence we
|
|
||||||
* can only tell IO completion that one is necessary if we are
|
|
||||||
* not doing unwritten extent conversion.
|
|
||||||
*/
|
|
||||||
iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
|
|
||||||
if (offset + size > XFS_I(inode)->i_d.di_size)
|
|
||||||
ioend->io_isdirect = 1;
|
|
||||||
|
|
||||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
|
|
||||||
offset, xfs_get_blocks_direct,
|
offset, xfs_get_blocks_direct,
|
||||||
xfs_end_io_direct_write, NULL,
|
xfs_end_io_direct_write, NULL,
|
||||||
DIO_ASYNC_EXTEND);
|
DIO_ASYNC_EXTEND);
|
||||||
if (ret != -EIOCBQUEUED && iocb->private)
|
|
||||||
goto out_destroy_ioend;
|
|
||||||
} else {
|
|
||||||
ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
|
|
||||||
offset, xfs_get_blocks_direct,
|
|
||||||
NULL, NULL, 0);
|
|
||||||
}
|
}
|
||||||
|
return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
|
||||||
return ret;
|
offset, xfs_get_blocks_direct,
|
||||||
|
NULL, NULL, 0);
|
||||||
out_destroy_ioend:
|
|
||||||
xfs_destroy_ioend(ioend);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -24,14 +24,12 @@ extern mempool_t *xfs_ioend_pool;
|
|||||||
* Types of I/O for bmap clustering and I/O completion tracking.
|
* Types of I/O for bmap clustering and I/O completion tracking.
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
|
|
||||||
XFS_IO_DELALLOC, /* covers delalloc region */
|
XFS_IO_DELALLOC, /* covers delalloc region */
|
||||||
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
|
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
|
||||||
XFS_IO_OVERWRITE, /* covers already allocated extent */
|
XFS_IO_OVERWRITE, /* covers already allocated extent */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define XFS_IO_TYPES \
|
#define XFS_IO_TYPES \
|
||||||
{ 0, "" }, \
|
|
||||||
{ XFS_IO_DELALLOC, "delalloc" }, \
|
{ XFS_IO_DELALLOC, "delalloc" }, \
|
||||||
{ XFS_IO_UNWRITTEN, "unwritten" }, \
|
{ XFS_IO_UNWRITTEN, "unwritten" }, \
|
||||||
{ XFS_IO_OVERWRITE, "overwrite" }
|
{ XFS_IO_OVERWRITE, "overwrite" }
|
||||||
@ -45,7 +43,6 @@ typedef struct xfs_ioend {
|
|||||||
unsigned int io_type; /* delalloc / unwritten */
|
unsigned int io_type; /* delalloc / unwritten */
|
||||||
int io_error; /* I/O error code */
|
int io_error; /* I/O error code */
|
||||||
atomic_t io_remaining; /* hold count */
|
atomic_t io_remaining; /* hold count */
|
||||||
unsigned int io_isdirect : 1;/* direct I/O */
|
|
||||||
struct inode *io_inode; /* file being written to */
|
struct inode *io_inode; /* file being written to */
|
||||||
struct buffer_head *io_buffer_head;/* buffer linked list head */
|
struct buffer_head *io_buffer_head;/* buffer linked list head */
|
||||||
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
|
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
|
||||||
|
@ -127,6 +127,42 @@ xfs_iozero(
|
|||||||
return (-status);
|
return (-status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
xfs_update_prealloc_flags(
|
||||||
|
struct xfs_inode *ip,
|
||||||
|
enum xfs_prealloc_flags flags)
|
||||||
|
{
|
||||||
|
struct xfs_trans *tp;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
|
||||||
|
error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
|
||||||
|
if (error) {
|
||||||
|
xfs_trans_cancel(tp, 0);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||||
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||||
|
|
||||||
|
if (!(flags & XFS_PREALLOC_INVISIBLE)) {
|
||||||
|
ip->i_d.di_mode &= ~S_ISUID;
|
||||||
|
if (ip->i_d.di_mode & S_IXGRP)
|
||||||
|
ip->i_d.di_mode &= ~S_ISGID;
|
||||||
|
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (flags & XFS_PREALLOC_SET)
|
||||||
|
ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
|
||||||
|
if (flags & XFS_PREALLOC_CLEAR)
|
||||||
|
ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
|
||||||
|
|
||||||
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||||
|
if (flags & XFS_PREALLOC_SYNC)
|
||||||
|
xfs_trans_set_sync(tp);
|
||||||
|
return xfs_trans_commit(tp, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fsync operations on directories are much simpler than on regular files,
|
* Fsync operations on directories are much simpler than on regular files,
|
||||||
* as there is no file data to flush, and thus also no need for explicit
|
* as there is no file data to flush, and thus also no need for explicit
|
||||||
@ -784,8 +820,8 @@ xfs_file_fallocate(
|
|||||||
{
|
{
|
||||||
struct inode *inode = file_inode(file);
|
struct inode *inode = file_inode(file);
|
||||||
struct xfs_inode *ip = XFS_I(inode);
|
struct xfs_inode *ip = XFS_I(inode);
|
||||||
struct xfs_trans *tp;
|
|
||||||
long error;
|
long error;
|
||||||
|
enum xfs_prealloc_flags flags = 0;
|
||||||
loff_t new_size = 0;
|
loff_t new_size = 0;
|
||||||
|
|
||||||
if (!S_ISREG(inode->i_mode))
|
if (!S_ISREG(inode->i_mode))
|
||||||
@ -822,6 +858,8 @@ xfs_file_fallocate(
|
|||||||
if (error)
|
if (error)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
} else {
|
} else {
|
||||||
|
flags |= XFS_PREALLOC_SET;
|
||||||
|
|
||||||
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
|
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
|
||||||
offset + len > i_size_read(inode)) {
|
offset + len > i_size_read(inode)) {
|
||||||
new_size = offset + len;
|
new_size = offset + len;
|
||||||
@ -839,28 +877,10 @@ xfs_file_fallocate(
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
|
|
||||||
error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
|
|
||||||
if (error) {
|
|
||||||
xfs_trans_cancel(tp, 0);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
||||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
|
||||||
ip->i_d.di_mode &= ~S_ISUID;
|
|
||||||
if (ip->i_d.di_mode & S_IXGRP)
|
|
||||||
ip->i_d.di_mode &= ~S_ISGID;
|
|
||||||
|
|
||||||
if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)))
|
|
||||||
ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
|
|
||||||
|
|
||||||
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
|
|
||||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
||||||
|
|
||||||
if (file->f_flags & O_DSYNC)
|
if (file->f_flags & O_DSYNC)
|
||||||
xfs_trans_set_sync(tp);
|
flags |= XFS_PREALLOC_SYNC;
|
||||||
error = xfs_trans_commit(tp, 0);
|
|
||||||
|
error = xfs_update_prealloc_flags(ip, flags);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
@ -377,6 +377,15 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *);
|
|||||||
int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
|
int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
|
||||||
|
|
||||||
/* from xfs_file.c */
|
/* from xfs_file.c */
|
||||||
|
enum xfs_prealloc_flags {
|
||||||
|
XFS_PREALLOC_SET = (1 << 1),
|
||||||
|
XFS_PREALLOC_CLEAR = (1 << 2),
|
||||||
|
XFS_PREALLOC_SYNC = (1 << 3),
|
||||||
|
XFS_PREALLOC_INVISIBLE = (1 << 4),
|
||||||
|
};
|
||||||
|
|
||||||
|
int xfs_update_prealloc_flags(struct xfs_inode *,
|
||||||
|
enum xfs_prealloc_flags);
|
||||||
int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
|
int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
|
||||||
int xfs_iozero(struct xfs_inode *, loff_t, size_t);
|
int xfs_iozero(struct xfs_inode *, loff_t, size_t);
|
||||||
|
|
||||||
|
@ -606,11 +606,8 @@ xfs_ioc_space(
|
|||||||
unsigned int cmd,
|
unsigned int cmd,
|
||||||
xfs_flock64_t *bf)
|
xfs_flock64_t *bf)
|
||||||
{
|
{
|
||||||
struct xfs_mount *mp = ip->i_mount;
|
|
||||||
struct xfs_trans *tp;
|
|
||||||
struct iattr iattr;
|
struct iattr iattr;
|
||||||
bool setprealloc = false;
|
enum xfs_prealloc_flags flags = 0;
|
||||||
bool clrprealloc = false;
|
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -630,6 +627,11 @@ xfs_ioc_space(
|
|||||||
if (!S_ISREG(inode->i_mode))
|
if (!S_ISREG(inode->i_mode))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (filp->f_flags & O_DSYNC)
|
||||||
|
flags |= XFS_PREALLOC_SYNC;
|
||||||
|
if (ioflags & XFS_IO_INVIS)
|
||||||
|
flags |= XFS_PREALLOC_INVISIBLE;
|
||||||
|
|
||||||
error = mnt_want_write_file(filp);
|
error = mnt_want_write_file(filp);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
@ -673,25 +675,23 @@ xfs_ioc_space(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (bf->l_start < 0 ||
|
if (bf->l_start < 0 ||
|
||||||
bf->l_start > mp->m_super->s_maxbytes ||
|
bf->l_start > inode->i_sb->s_maxbytes ||
|
||||||
bf->l_start + bf->l_len < 0 ||
|
bf->l_start + bf->l_len < 0 ||
|
||||||
bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
|
bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case XFS_IOC_ZERO_RANGE:
|
case XFS_IOC_ZERO_RANGE:
|
||||||
|
flags |= XFS_PREALLOC_SET;
|
||||||
error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
|
error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
|
||||||
if (!error)
|
|
||||||
setprealloc = true;
|
|
||||||
break;
|
break;
|
||||||
case XFS_IOC_RESVSP:
|
case XFS_IOC_RESVSP:
|
||||||
case XFS_IOC_RESVSP64:
|
case XFS_IOC_RESVSP64:
|
||||||
|
flags |= XFS_PREALLOC_SET;
|
||||||
error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
|
error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
|
||||||
XFS_BMAPI_PREALLOC);
|
XFS_BMAPI_PREALLOC);
|
||||||
if (!error)
|
|
||||||
setprealloc = true;
|
|
||||||
break;
|
break;
|
||||||
case XFS_IOC_UNRESVSP:
|
case XFS_IOC_UNRESVSP:
|
||||||
case XFS_IOC_UNRESVSP64:
|
case XFS_IOC_UNRESVSP64:
|
||||||
@ -701,6 +701,7 @@ xfs_ioc_space(
|
|||||||
case XFS_IOC_ALLOCSP64:
|
case XFS_IOC_ALLOCSP64:
|
||||||
case XFS_IOC_FREESP:
|
case XFS_IOC_FREESP:
|
||||||
case XFS_IOC_FREESP64:
|
case XFS_IOC_FREESP64:
|
||||||
|
flags |= XFS_PREALLOC_CLEAR;
|
||||||
if (bf->l_start > XFS_ISIZE(ip)) {
|
if (bf->l_start > XFS_ISIZE(ip)) {
|
||||||
error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
|
error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
|
||||||
bf->l_start - XFS_ISIZE(ip), 0);
|
bf->l_start - XFS_ISIZE(ip), 0);
|
||||||
@ -712,8 +713,6 @@ xfs_ioc_space(
|
|||||||
iattr.ia_size = bf->l_start;
|
iattr.ia_size = bf->l_start;
|
||||||
|
|
||||||
error = xfs_setattr_size(ip, &iattr);
|
error = xfs_setattr_size(ip, &iattr);
|
||||||
if (!error)
|
|
||||||
clrprealloc = true;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
@ -723,32 +722,7 @@ xfs_ioc_space(
|
|||||||
if (error)
|
if (error)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
|
error = xfs_update_prealloc_flags(ip, flags);
|
||||||
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
|
|
||||||
if (error) {
|
|
||||||
xfs_trans_cancel(tp, 0);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
||||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
|
||||||
|
|
||||||
if (!(ioflags & XFS_IO_INVIS)) {
|
|
||||||
ip->i_d.di_mode &= ~S_ISUID;
|
|
||||||
if (ip->i_d.di_mode & S_IXGRP)
|
|
||||||
ip->i_d.di_mode &= ~S_ISGID;
|
|
||||||
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (setprealloc)
|
|
||||||
ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
|
|
||||||
else if (clrprealloc)
|
|
||||||
ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
|
|
||||||
|
|
||||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
||||||
if (filp->f_flags & O_DSYNC)
|
|
||||||
xfs_trans_set_sync(tp);
|
|
||||||
error = xfs_trans_commit(tp, 0);
|
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||||
|
@ -423,7 +423,7 @@ xfs_compat_attrmulti_by_handle(
|
|||||||
|
|
||||||
ops = memdup_user(compat_ptr(am_hreq.ops), size);
|
ops = memdup_user(compat_ptr(am_hreq.ops), size);
|
||||||
if (IS_ERR(ops)) {
|
if (IS_ERR(ops)) {
|
||||||
error = -PTR_ERR(ops);
|
error = PTR_ERR(ops);
|
||||||
goto out_dput;
|
goto out_dput;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user