2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-22 23:36:05 +00:00
|
|
|
#include "xfs_shared.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_inode.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_acl.h"
|
|
|
|
#include "xfs_quota.h"
|
2022-05-04 02:41:02 +00:00
|
|
|
#include "xfs_da_format.h"
|
|
|
|
#include "xfs_da_btree.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "xfs_attr.h"
|
2013-10-22 23:50:10 +00:00
|
|
|
#include "xfs_trans.h"
|
2009-12-14 23:14:59 +00:00
|
|
|
#include "xfs_trace.h"
|
2012-11-06 14:50:38 +00:00
|
|
|
#include "xfs_icache.h"
|
2013-08-12 10:49:45 +00:00
|
|
|
#include "xfs_symlink.h"
|
2014-12-03 22:43:17 +00:00
|
|
|
#include "xfs_dir2.h"
|
2016-06-20 23:53:44 +00:00
|
|
|
#include "xfs_iomap.h"
|
2019-11-02 16:40:53 +00:00
|
|
|
#include "xfs_error.h"
|
2021-04-07 12:36:43 +00:00
|
|
|
#include "xfs_ioctl.h"
|
2022-05-27 00:34:04 +00:00
|
|
|
#include "xfs_xattr.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-06-10 15:07:47 +00:00
|
|
|
#include <linux/posix_acl.h>
|
2006-01-11 04:35:44 +00:00
|
|
|
#include <linux/security.h>
|
2018-03-07 01:04:00 +00:00
|
|
|
#include <linux/iversion.h>
|
2020-05-23 07:30:11 +00:00
|
|
|
#include <linux/fiemap.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-02-27 05:51:39 +00:00
|
|
|
/*
|
2020-06-09 04:33:54 +00:00
|
|
|
* Directories have different lock order w.r.t. mmap_lock compared to regular
|
2014-02-27 05:51:39 +00:00
|
|
|
* files. This is due to readdir potentially triggering page faults on a user
|
|
|
|
* buffer inside filldir(), and this happens with the ilock on the directory
|
|
|
|
* held. For regular files, the lock order is the other way around - the
|
2020-06-09 04:33:54 +00:00
|
|
|
* mmap_lock is taken during the page fault, and then we lock the ilock to do
|
2014-02-27 05:51:39 +00:00
|
|
|
* block mapping. Hence we need a different class for the directory ilock so
|
|
|
|
* that lockdep can tell them apart.
|
|
|
|
*/
|
|
|
|
static struct lock_class_key xfs_nondir_ilock_class;
|
|
|
|
static struct lock_class_key xfs_dir_ilock_class;
|
|
|
|
|
2012-03-07 04:50:19 +00:00
|
|
|
static int
|
|
|
|
xfs_initxattrs(
|
|
|
|
struct inode *inode,
|
|
|
|
const struct xattr *xattr_array,
|
|
|
|
void *fs_info)
|
2011-06-06 19:29:25 +00:00
|
|
|
{
|
2012-03-07 04:50:19 +00:00
|
|
|
const struct xattr *xattr;
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
int error = 0;
|
2011-06-06 19:29:25 +00:00
|
|
|
|
|
|
|
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
2020-02-27 01:30:33 +00:00
|
|
|
struct xfs_da_args args = {
|
|
|
|
.dp = ip,
|
2020-02-27 01:30:42 +00:00
|
|
|
.attr_filter = XFS_ATTR_SECURE,
|
2020-02-27 01:30:33 +00:00
|
|
|
.name = xattr->name,
|
|
|
|
.namelen = strlen(xattr->name),
|
|
|
|
.value = xattr->value,
|
|
|
|
.valuelen = xattr->value_len,
|
|
|
|
};
|
2022-05-27 00:34:04 +00:00
|
|
|
error = xfs_attr_change(&args);
|
2011-06-06 19:29:25 +00:00
|
|
|
if (error < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-01-11 04:35:44 +00:00
|
|
|
/*
|
|
|
|
* Hook in SELinux. This is not quite correct yet, what we really need
|
|
|
|
* here (as we do for default ACLs) is a mechanism by which creation of
|
|
|
|
* these attrs can be journalled at inode creation time (along with the
|
|
|
|
* inode, of course, such that log replay can't cause these to be lost).
|
|
|
|
*/
|
2022-07-09 17:56:02 +00:00
|
|
|
int
|
|
|
|
xfs_inode_init_security(
|
2008-03-06 02:46:43 +00:00
|
|
|
struct inode *inode,
|
2011-02-01 16:05:39 +00:00
|
|
|
struct inode *dir,
|
|
|
|
const struct qstr *qstr)
|
2006-01-11 04:35:44 +00:00
|
|
|
{
|
2014-06-25 04:58:08 +00:00
|
|
|
return security_inode_init_security(inode, dir, qstr,
|
2014-05-14 23:22:21 +00:00
|
|
|
&xfs_initxattrs, NULL);
|
2006-01-11 04:35:44 +00:00
|
|
|
}
|
|
|
|
|
2008-04-10 02:22:07 +00:00
|
|
|
static void
|
|
|
|
xfs_dentry_to_name(
|
2017-01-17 19:41:44 +00:00
|
|
|
struct xfs_name *namep,
|
|
|
|
struct dentry *dentry)
|
|
|
|
{
|
|
|
|
namep->name = dentry->d_name.name;
|
|
|
|
namep->len = dentry->d_name.len;
|
|
|
|
namep->type = XFS_DIR3_FT_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xfs_dentry_mode_to_name(
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name *namep,
|
2013-08-12 10:50:09 +00:00
|
|
|
struct dentry *dentry,
|
|
|
|
int mode)
|
2008-04-10 02:22:07 +00:00
|
|
|
{
|
|
|
|
namep->name = dentry->d_name.name;
|
|
|
|
namep->len = dentry->d_name.len;
|
2017-01-17 19:41:43 +00:00
|
|
|
namep->type = xfs_mode_to_ftype(mode);
|
2017-01-17 19:41:44 +00:00
|
|
|
|
|
|
|
if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
|
|
|
|
return 0;
|
2008-04-10 02:22:07 +00:00
|
|
|
}
|
|
|
|
|
2007-02-10 07:34:56 +00:00
|
|
|
STATIC void
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_cleanup_inode(
|
2007-08-29 00:58:01 +00:00
|
|
|
struct inode *dir,
|
2008-03-06 02:46:43 +00:00
|
|
|
struct inode *inode,
|
2008-06-23 03:25:17 +00:00
|
|
|
struct dentry *dentry)
|
2006-02-01 01:14:34 +00:00
|
|
|
{
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name teardown;
|
2006-02-01 01:14:34 +00:00
|
|
|
|
|
|
|
/* Oh, the horror.
|
2006-03-14 02:33:36 +00:00
|
|
|
* If we can't add the ACL or we fail in
|
2022-07-09 17:56:02 +00:00
|
|
|
* xfs_inode_init_security we must back out.
|
2006-02-01 01:14:34 +00:00
|
|
|
* ENOSPC can hit here, among other things.
|
|
|
|
*/
|
2017-01-17 19:41:44 +00:00
|
|
|
xfs_dentry_to_name(&teardown, dentry);
|
2006-02-01 01:14:34 +00:00
|
|
|
|
2008-06-23 03:25:17 +00:00
|
|
|
xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
|
2006-02-01 01:14:34 +00:00
|
|
|
}
|
|
|
|
|
xfs: initialise attr fork on inode create
When we allocate a new inode, we often need to add an attribute to
the inode as part of the create. This can happen as a result of
needing to add default ACLs or security labels before the inode is
made visible to userspace.
This is highly inefficient right now. We do the create transaction
to allocate the inode, then we do an "add attr fork" transaction to
modify the just created empty inode to set the inode fork offset to
allow attributes to be stored, then we go and do the attribute
creation.
This means 3 transactions instead of 1 to allocate an inode, and
this greatly increases the load on the CIL commit code, resulting in
excessive contention on the CIL spin locks and performance
degradation:
18.99% [kernel] [k] __pv_queued_spin_lock_slowpath
3.57% [kernel] [k] do_raw_spin_lock
2.51% [kernel] [k] __raw_callee_save___pv_queued_spin_unlock
2.48% [kernel] [k] memcpy
2.34% [kernel] [k] xfs_log_commit_cil
The typical profile resulting from running fsmark on a selinux enabled
filesytem is adds this overhead to the create path:
- 15.30% xfs_init_security
- 15.23% security_inode_init_security
- 13.05% xfs_initxattrs
- 12.94% xfs_attr_set
- 6.75% xfs_bmap_add_attrfork
- 5.51% xfs_trans_commit
- 5.48% __xfs_trans_commit
- 5.35% xfs_log_commit_cil
- 3.86% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.70% xfs_trans_alloc
0.52% xfs_trans_reserve
- 5.41% xfs_attr_set_args
- 5.39% xfs_attr_set_shortform.constprop.0
- 4.46% xfs_trans_commit
- 4.46% __xfs_trans_commit
- 4.33% xfs_log_commit_cil
- 2.74% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
0.60% xfs_inode_item_format
0.90% xfs_attr_try_sf_addname
- 1.99% selinux_inode_init_security
- 1.02% security_sid_to_context_force
- 1.00% security_sid_to_context_core
- 0.92% sidtab_entry_to_string
- 0.90% sidtab_sid2str_get
0.59% sidtab_sid2str_put.part.0
- 0.82% selinux_determine_inode_label
- 0.77% security_transition_sid
0.70% security_compute_sid.part.0
And fsmark creation rate performance drops by ~25%. The key point to
note here is that half the additional overhead comes from adding the
attribute fork to the newly created inode. That's crazy, considering
we can do this same thing at inode create time with a couple of
lines of code and no extra overhead.
So, if we know we are going to add an attribute immediately after
creating the inode, let's just initialise the attribute fork inside
the create transaction and chop that whole chunk of code out of
the create fast path. This completely removes the performance
drop caused by enabling SELinux, and the profile looks like:
- 8.99% xfs_init_security
- 9.00% security_inode_init_security
- 6.43% xfs_initxattrs
- 6.37% xfs_attr_set
- 5.45% xfs_attr_set_args
- 5.42% xfs_attr_set_shortform.constprop.0
- 4.51% xfs_trans_commit
- 4.54% __xfs_trans_commit
- 4.59% xfs_log_commit_cil
- 2.67% _raw_spin_lock
- 3.28% do_raw_spin_lock
3.08% __pv_queued_spin_lock_slowpath
0.66% xfs_inode_item_format
- 0.90% xfs_attr_try_sf_addname
- 0.60% xfs_trans_alloc
- 2.35% selinux_inode_init_security
- 1.25% security_sid_to_context_force
- 1.21% security_sid_to_context_core
- 1.19% sidtab_entry_to_string
- 1.20% sidtab_sid2str_get
- 0.86% sidtab_sid2str_put.part.0
- 0.62% _raw_spin_lock_irqsave
- 0.77% do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.84% selinux_determine_inode_label
- 0.83% security_transition_sid
0.86% security_compute_sid.part.0
Which indicates the XFS overhead of creating the selinux xattr has
been halved. This doesn't fix the CIL lock contention problem, just
means it's not a limiting factor for this workload. Lock contention
in the security subsystems is going to be an issue soon, though...
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
[djwong: fix compilation error when CONFIG_SECURITY=n]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
2021-03-22 16:52:03 +00:00
|
|
|
/*
|
|
|
|
* Check to see if we are likely to need an extended attribute to be added to
|
|
|
|
* the inode we are about to allocate. This allows the attribute fork to be
|
|
|
|
* created during the inode allocation, reducing the number of transactions we
|
|
|
|
* need to do in this fast path.
|
|
|
|
*
|
|
|
|
* The security checks are optimistic, but not guaranteed. The two LSMs that
|
|
|
|
* require xattrs to be added here (selinux and smack) are also the only two
|
|
|
|
* LSMs that add a sb->s_security structure to the superblock. Hence if security
|
|
|
|
* is enabled and sb->s_security is set, we have a pretty good idea that we are
|
|
|
|
* going to be asked to add a security xattr immediately after allocating the
|
|
|
|
* xfs inode and instantiating the VFS inode.
|
|
|
|
*/
|
|
|
|
static inline bool
|
|
|
|
xfs_create_need_xattr(
|
|
|
|
struct inode *dir,
|
|
|
|
struct posix_acl *default_acl,
|
|
|
|
struct posix_acl *acl)
|
|
|
|
{
|
|
|
|
if (acl)
|
|
|
|
return true;
|
|
|
|
if (default_acl)
|
|
|
|
return true;
|
|
|
|
#if IS_ENABLED(CONFIG_SECURITY)
|
|
|
|
if (dir->i_sb->s_security)
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
STATIC int
|
2014-05-05 21:34:28 +00:00
|
|
|
xfs_generic_create(
|
2021-01-21 13:19:58 +00:00
|
|
|
struct user_namespace *mnt_userns,
|
2005-04-16 22:20:36 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
2011-07-26 05:52:52 +00:00
|
|
|
umode_t mode,
|
2014-05-05 21:34:28 +00:00
|
|
|
dev_t rdev,
|
2022-09-24 05:00:00 +00:00
|
|
|
struct file *tmpfile) /* unnamed file */
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-03-06 02:44:35 +00:00
|
|
|
struct inode *inode;
|
2008-03-06 02:46:05 +00:00
|
|
|
struct xfs_inode *ip = NULL;
|
2013-12-20 13:16:50 +00:00
|
|
|
struct posix_acl *default_acl, *acl;
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name name;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Irix uses Missed'em'V split, but doesn't want to see
|
|
|
|
* the upper 5 bits of (14bit) major.
|
|
|
|
*/
|
2009-02-09 07:38:02 +00:00
|
|
|
if (S_ISCHR(mode) || S_ISBLK(mode)) {
|
|
|
|
if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
rdev = 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-12-20 13:16:50 +00:00
|
|
|
error = posix_acl_create(dir, &mode, &default_acl, &acl);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-01-17 19:41:44 +00:00
|
|
|
/* Verify mode is valid also for tmpfile case */
|
|
|
|
error = xfs_dentry_mode_to_name(&name, dentry, mode);
|
|
|
|
if (unlikely(error))
|
|
|
|
goto out_free_acl;
|
|
|
|
|
2014-05-05 21:34:28 +00:00
|
|
|
if (!tmpfile) {
|
2021-01-21 13:19:58 +00:00
|
|
|
error = xfs_create(mnt_userns, XFS_I(dir), &name, mode, rdev,
|
xfs: initialise attr fork on inode create
When we allocate a new inode, we often need to add an attribute to
the inode as part of the create. This can happen as a result of
needing to add default ACLs or security labels before the inode is
made visible to userspace.
This is highly inefficient right now. We do the create transaction
to allocate the inode, then we do an "add attr fork" transaction to
modify the just created empty inode to set the inode fork offset to
allow attributes to be stored, then we go and do the attribute
creation.
This means 3 transactions instead of 1 to allocate an inode, and
this greatly increases the load on the CIL commit code, resulting in
excessive contention on the CIL spin locks and performance
degradation:
18.99% [kernel] [k] __pv_queued_spin_lock_slowpath
3.57% [kernel] [k] do_raw_spin_lock
2.51% [kernel] [k] __raw_callee_save___pv_queued_spin_unlock
2.48% [kernel] [k] memcpy
2.34% [kernel] [k] xfs_log_commit_cil
The typical profile resulting from running fsmark on a selinux enabled
filesytem is adds this overhead to the create path:
- 15.30% xfs_init_security
- 15.23% security_inode_init_security
- 13.05% xfs_initxattrs
- 12.94% xfs_attr_set
- 6.75% xfs_bmap_add_attrfork
- 5.51% xfs_trans_commit
- 5.48% __xfs_trans_commit
- 5.35% xfs_log_commit_cil
- 3.86% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.70% xfs_trans_alloc
0.52% xfs_trans_reserve
- 5.41% xfs_attr_set_args
- 5.39% xfs_attr_set_shortform.constprop.0
- 4.46% xfs_trans_commit
- 4.46% __xfs_trans_commit
- 4.33% xfs_log_commit_cil
- 2.74% _raw_spin_lock
- do_raw_spin_lock
__pv_queued_spin_lock_slowpath
0.60% xfs_inode_item_format
0.90% xfs_attr_try_sf_addname
- 1.99% selinux_inode_init_security
- 1.02% security_sid_to_context_force
- 1.00% security_sid_to_context_core
- 0.92% sidtab_entry_to_string
- 0.90% sidtab_sid2str_get
0.59% sidtab_sid2str_put.part.0
- 0.82% selinux_determine_inode_label
- 0.77% security_transition_sid
0.70% security_compute_sid.part.0
And fsmark creation rate performance drops by ~25%. The key point to
note here is that half the additional overhead comes from adding the
attribute fork to the newly created inode. That's crazy, considering
we can do this same thing at inode create time with a couple of
lines of code and no extra overhead.
So, if we know we are going to add an attribute immediately after
creating the inode, let's just initialise the attribute fork inside
the create transaction and chop that whole chunk of code out of
the create fast path. This completely removes the performance
drop caused by enabling SELinux, and the profile looks like:
- 8.99% xfs_init_security
- 9.00% security_inode_init_security
- 6.43% xfs_initxattrs
- 6.37% xfs_attr_set
- 5.45% xfs_attr_set_args
- 5.42% xfs_attr_set_shortform.constprop.0
- 4.51% xfs_trans_commit
- 4.54% __xfs_trans_commit
- 4.59% xfs_log_commit_cil
- 2.67% _raw_spin_lock
- 3.28% do_raw_spin_lock
3.08% __pv_queued_spin_lock_slowpath
0.66% xfs_inode_item_format
- 0.90% xfs_attr_try_sf_addname
- 0.60% xfs_trans_alloc
- 2.35% selinux_inode_init_security
- 1.25% security_sid_to_context_force
- 1.21% security_sid_to_context_core
- 1.19% sidtab_entry_to_string
- 1.20% sidtab_sid2str_get
- 0.86% sidtab_sid2str_put.part.0
- 0.62% _raw_spin_lock_irqsave
- 0.77% do_raw_spin_lock
__pv_queued_spin_lock_slowpath
- 0.84% selinux_determine_inode_label
- 0.83% security_transition_sid
0.86% security_compute_sid.part.0
Which indicates the XFS overhead of creating the selinux xattr has
been halved. This doesn't fix the CIL lock contention problem, just
means it's not a limiting factor for this workload. Lock contention
in the security subsystems is going to be an issue soon, though...
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
[djwong: fix compilation error when CONFIG_SECURITY=n]
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Gao Xiang <hsiangkao@redhat.com>
2021-03-22 16:52:03 +00:00
|
|
|
xfs_create_need_xattr(dir, default_acl, acl),
|
|
|
|
&ip);
|
2014-05-05 21:34:28 +00:00
|
|
|
} else {
|
2021-01-21 13:19:58 +00:00
|
|
|
error = xfs_create_tmpfile(mnt_userns, XFS_I(dir), mode, &ip);
|
2014-05-05 21:34:28 +00:00
|
|
|
}
|
2008-03-06 02:44:35 +00:00
|
|
|
if (unlikely(error))
|
|
|
|
goto out_free_acl;
|
2006-01-11 04:35:44 +00:00
|
|
|
|
2008-08-13 05:45:15 +00:00
|
|
|
inode = VFS_I(ip);
|
2008-03-06 02:46:05 +00:00
|
|
|
|
2022-07-09 17:56:02 +00:00
|
|
|
error = xfs_inode_init_security(inode, dir, &dentry->d_name);
|
2008-03-06 02:44:35 +00:00
|
|
|
if (unlikely(error))
|
|
|
|
goto out_cleanup_inode;
|
|
|
|
|
|
|
|
if (default_acl) {
|
2017-06-26 15:48:18 +00:00
|
|
|
error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
2013-12-20 13:16:50 +00:00
|
|
|
if (error)
|
2008-03-06 02:44:35 +00:00
|
|
|
goto out_cleanup_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2013-12-20 13:16:50 +00:00
|
|
|
if (acl) {
|
2017-06-26 15:48:18 +00:00
|
|
|
error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
2013-12-20 13:16:50 +00:00
|
|
|
if (error)
|
|
|
|
goto out_cleanup_inode;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-04-05 21:48:27 +00:00
|
|
|
xfs_setup_iops(ip);
|
|
|
|
|
xfs: don't ever put nlink > 0 inodes on the unlinked list
When XFS creates an O_TMPFILE file, the inode is created with nlink = 1,
put on the unlinked list, and then the VFS sets nlink = 0 in d_tmpfile.
If we crash before anything logs the inode (it's dirty incore but the
vfs doesn't tell us it's dirty so we never log that change), the iunlink
processing part of recovery will then explode with a pile of:
XFS: Assertion failed: VFS_I(ip)->i_nlink == 0, file:
fs/xfs/xfs_log_recover.c, line: 5072
Worse yet, since nlink is nonzero, the inodes also don't get cleaned up
and they just leak until the next xfs_repair run.
Therefore, change xfs_iunlink to require that inodes being put on the
unlinked list have nlink == 0, change the tmpfile callers to instantiate
nodes that way, and set the nlink to 1 just prior to calling d_tmpfile.
Fix the comment for xfs_iunlink while we're at it.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2019-02-13 19:15:17 +00:00
|
|
|
if (tmpfile) {
|
|
|
|
/*
|
|
|
|
* The VFS requires that any inode fed to d_tmpfile must have
|
|
|
|
* nlink == 1 so that it can decrement the nlink in d_tmpfile.
|
|
|
|
* However, we created the temp file with nlink == 0 because
|
|
|
|
* we're not allowed to put an inode with nlink > 0 on the
|
|
|
|
* unlinked list. Therefore we have to set nlink to 1 so that
|
|
|
|
* d_tmpfile can immediately set it back to zero.
|
|
|
|
*/
|
|
|
|
set_nlink(inode, 1);
|
2022-09-24 05:00:00 +00:00
|
|
|
d_tmpfile(tmpfile, inode);
|
xfs: don't ever put nlink > 0 inodes on the unlinked list
When XFS creates an O_TMPFILE file, the inode is created with nlink = 1,
put on the unlinked list, and then the VFS sets nlink = 0 in d_tmpfile.
If we crash before anything logs the inode (it's dirty incore but the
vfs doesn't tell us it's dirty so we never log that change), the iunlink
processing part of recovery will then explode with a pile of:
XFS: Assertion failed: VFS_I(ip)->i_nlink == 0, file:
fs/xfs/xfs_log_recover.c, line: 5072
Worse yet, since nlink is nonzero, the inodes also don't get cleaned up
and they just leak until the next xfs_repair run.
Therefore, change xfs_iunlink to require that inodes being put on the
unlinked list have nlink == 0, change the tmpfile callers to instantiate
nodes that way, and set the nlink to 1 just prior to calling d_tmpfile.
Fix the comment for xfs_iunlink while we're at it.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2019-02-13 19:15:17 +00:00
|
|
|
} else
|
2014-05-05 21:34:28 +00:00
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
|
2015-02-23 11:38:08 +00:00
|
|
|
xfs_finish_inode_setup(ip);
|
|
|
|
|
2013-12-20 13:16:50 +00:00
|
|
|
out_free_acl:
|
2020-12-04 00:43:19 +00:00
|
|
|
posix_acl_release(default_acl);
|
|
|
|
posix_acl_release(acl);
|
2014-06-25 04:58:08 +00:00
|
|
|
return error;
|
2008-03-06 02:44:35 +00:00
|
|
|
|
|
|
|
out_cleanup_inode:
|
2015-02-23 11:38:08 +00:00
|
|
|
xfs_finish_inode_setup(ip);
|
2014-05-05 21:34:28 +00:00
|
|
|
if (!tmpfile)
|
|
|
|
xfs_cleanup_inode(dir, inode, dentry);
|
2018-07-25 19:52:32 +00:00
|
|
|
xfs_irele(ip);
|
2013-12-20 13:16:50 +00:00
|
|
|
goto out_free_acl;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 21:34:28 +00:00
|
|
|
STATIC int
|
|
|
|
xfs_vn_mknod(
|
2023-01-13 11:49:16 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
umode_t mode,
|
|
|
|
dev_t rdev)
|
2014-05-05 21:34:28 +00:00
|
|
|
{
|
2023-01-13 11:49:16 +00:00
|
|
|
return xfs_generic_create(mnt_idmap_owner(idmap), dir, dentry, mode,
|
|
|
|
rdev, NULL);
|
2014-05-05 21:34:28 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_create(
|
2023-01-13 11:49:13 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
umode_t mode,
|
|
|
|
bool flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-13 11:49:13 +00:00
|
|
|
struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
|
2022-09-24 05:00:00 +00:00
|
|
|
return xfs_generic_create(mnt_userns, dir, dentry, mode, 0, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_mkdir(
|
2023-01-13 11:49:15 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
umode_t mode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-13 11:49:15 +00:00
|
|
|
return xfs_generic_create(mnt_idmap_owner(idmap), dir, dentry,
|
|
|
|
mode | S_IFDIR, 0, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC struct dentry *
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_lookup(
|
2005-04-16 22:20:36 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
2012-06-10 21:13:09 +00:00
|
|
|
unsigned int flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2018-05-01 03:04:19 +00:00
|
|
|
struct inode *inode;
|
2008-03-06 02:46:25 +00:00
|
|
|
struct xfs_inode *cip;
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name name;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (dentry->d_name.len >= MAXNAMELEN)
|
|
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
|
|
|
2017-01-17 19:41:44 +00:00
|
|
|
xfs_dentry_to_name(&name, dentry);
|
2008-05-21 06:58:22 +00:00
|
|
|
error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
|
2018-05-01 03:04:19 +00:00
|
|
|
if (likely(!error))
|
|
|
|
inode = VFS_I(cip);
|
|
|
|
else if (likely(error == -ENOENT))
|
|
|
|
inode = NULL;
|
|
|
|
else
|
|
|
|
inode = ERR_PTR(error);
|
|
|
|
return d_splice_alias(inode, dentry);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-05-21 06:58:22 +00:00
|
|
|
STATIC struct dentry *
|
|
|
|
xfs_vn_ci_lookup(
|
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
2012-06-10 21:13:09 +00:00
|
|
|
unsigned int flags)
|
2008-05-21 06:58:22 +00:00
|
|
|
{
|
|
|
|
struct xfs_inode *ip;
|
|
|
|
struct xfs_name xname;
|
|
|
|
struct xfs_name ci_name;
|
|
|
|
struct qstr dname;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (dentry->d_name.len >= MAXNAMELEN)
|
|
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
|
|
|
2017-01-17 19:41:44 +00:00
|
|
|
xfs_dentry_to_name(&xname, dentry);
|
2008-05-21 06:58:22 +00:00
|
|
|
error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
|
|
|
|
if (unlikely(error)) {
|
2014-06-25 04:58:08 +00:00
|
|
|
if (unlikely(error != -ENOENT))
|
|
|
|
return ERR_PTR(error);
|
2008-05-22 07:21:40 +00:00
|
|
|
/*
|
|
|
|
* call d_add(dentry, NULL) here when d_drop_negative_children
|
|
|
|
* is called in xfs_vn_mknod (ie. allow negative dentries
|
|
|
|
* with CI filesystems).
|
|
|
|
*/
|
2008-05-21 06:58:22 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if exact match, just splice and exit */
|
|
|
|
if (!ci_name.name)
|
2008-08-13 05:45:15 +00:00
|
|
|
return d_splice_alias(VFS_I(ip), dentry);
|
2008-05-21 06:58:22 +00:00
|
|
|
|
|
|
|
/* else case-insensitive match... */
|
|
|
|
dname.name = ci_name.name;
|
|
|
|
dname.len = ci_name.len;
|
2008-08-07 21:49:07 +00:00
|
|
|
dentry = d_add_ci(dentry, VFS_I(ip), &dname);
|
2008-05-21 06:58:22 +00:00
|
|
|
kmem_free(ci_name.name);
|
|
|
|
return dentry;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_link(
|
2005-04-16 22:20:36 +00:00
|
|
|
struct dentry *old_dentry,
|
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry)
|
|
|
|
{
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *inode = d_inode(old_dentry);
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name name;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
|
2017-01-17 19:41:44 +00:00
|
|
|
error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
|
|
|
|
if (unlikely(error))
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-10 02:22:07 +00:00
|
|
|
error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
|
2008-12-03 11:20:27 +00:00
|
|
|
if (unlikely(error))
|
2014-06-25 04:58:08 +00:00
|
|
|
return error;
|
2008-03-06 02:46:12 +00:00
|
|
|
|
2010-10-23 15:11:40 +00:00
|
|
|
ihold(inode);
|
2008-03-06 02:46:12 +00:00
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_unlink(
|
2005-04-16 22:20:36 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry)
|
|
|
|
{
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name name;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
|
|
|
|
2017-01-17 19:41:44 +00:00
|
|
|
xfs_dentry_to_name(&name, dentry);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-03-17 22:25:59 +00:00
|
|
|
error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
|
2008-06-23 03:25:25 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With unlink, the VFS makes the dentry "negative": no inode,
|
|
|
|
* but still hashed. This is incompatible with case-insensitive
|
|
|
|
* mode, so invalidate (unhash) the dentry in CI-mode.
|
|
|
|
*/
|
2021-08-19 01:46:37 +00:00
|
|
|
if (xfs_has_asciici(XFS_M(dir->i_sb)))
|
2008-06-23 03:25:25 +00:00
|
|
|
d_invalidate(dentry);
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_symlink(
|
2023-01-13 11:49:14 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
const char *symname)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-13 11:49:14 +00:00
|
|
|
struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
|
2008-03-06 02:46:19 +00:00
|
|
|
struct inode *inode;
|
|
|
|
struct xfs_inode *cip = NULL;
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name name;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error;
|
2011-07-26 06:50:15 +00:00
|
|
|
umode_t mode;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-11 08:09:12 +00:00
|
|
|
mode = S_IFLNK |
|
2009-03-29 23:08:22 +00:00
|
|
|
(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
|
2017-01-17 19:41:44 +00:00
|
|
|
error = xfs_dentry_mode_to_name(&name, dentry, mode);
|
|
|
|
if (unlikely(error))
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-01-21 13:19:58 +00:00
|
|
|
error = xfs_symlink(mnt_userns, XFS_I(dir), &name, symname, mode, &cip);
|
2008-03-06 02:46:19 +00:00
|
|
|
if (unlikely(error))
|
|
|
|
goto out;
|
|
|
|
|
2008-08-13 05:45:15 +00:00
|
|
|
inode = VFS_I(cip);
|
2008-03-06 02:46:19 +00:00
|
|
|
|
2022-07-09 17:56:02 +00:00
|
|
|
error = xfs_inode_init_security(inode, dir, &dentry->d_name);
|
2008-03-06 02:46:19 +00:00
|
|
|
if (unlikely(error))
|
|
|
|
goto out_cleanup_inode;
|
|
|
|
|
2016-04-05 21:48:27 +00:00
|
|
|
xfs_setup_iops(cip);
|
|
|
|
|
2008-03-06 02:46:19 +00:00
|
|
|
d_instantiate(dentry, inode);
|
2015-02-23 11:38:08 +00:00
|
|
|
xfs_finish_inode_setup(cip);
|
2008-03-06 02:46:19 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_cleanup_inode:
|
2015-02-23 11:38:08 +00:00
|
|
|
xfs_finish_inode_setup(cip);
|
2008-06-23 03:25:17 +00:00
|
|
|
xfs_cleanup_inode(dir, inode, dentry);
|
2018-07-25 19:52:32 +00:00
|
|
|
xfs_irele(cip);
|
2008-03-06 02:46:19 +00:00
|
|
|
out:
|
2014-06-25 04:58:08 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_rename(
|
2023-01-13 11:49:17 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct inode *odir,
|
|
|
|
struct dentry *odentry,
|
|
|
|
struct inode *ndir,
|
|
|
|
struct dentry *ndentry,
|
|
|
|
unsigned int flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-13 11:49:17 +00:00
|
|
|
struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *new_inode = d_inode(ndentry);
|
2014-12-23 21:51:42 +00:00
|
|
|
int omode = 0;
|
2017-01-17 19:41:44 +00:00
|
|
|
int error;
|
2008-04-10 02:22:07 +00:00
|
|
|
struct xfs_name oname;
|
|
|
|
struct xfs_name nname;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
xfs: add RENAME_WHITEOUT support
Whiteouts are used by overlayfs - it has a crazy convention that a
whiteout is a character device inode with a major:minor of 0:0.
Because it's not documented anywhere, here's an example of what
RENAME_WHITEOUT does on ext4:
# echo foo > /mnt/scratch/foo
# echo bar > /mnt/scratch/bar
# ls -l /mnt/scratch
total 24
-rw-r--r-- 1 root root 4 Feb 11 20:22 bar
-rw-r--r-- 1 root root 4 Feb 11 20:22 foo
drwx------ 2 root root 16384 Feb 11 20:18 lost+found
# src/renameat2 -w /mnt/scratch/foo /mnt/scratch/bar
# ls -l /mnt/scratch
total 20
-rw-r--r-- 1 root root 4 Feb 11 20:22 bar
c--------- 1 root root 0, 0 Feb 11 20:23 foo
drwx------ 2 root root 16384 Feb 11 20:18 lost+found
# cat /mnt/scratch/bar
foo
#
In XFS rename terms, the operation that has been done is that source
(foo) has been moved to the target (bar), which is like a nomal
rename operation, but rather than the source being removed, it have
been replaced with a whiteout.
We can't allocate whiteout inodes within the rename transaction due
to allocation being a multi-commit transaction: rename needs to
be a single, atomic commit. Hence we have several options here, form
most efficient to least efficient:
- use DT_WHT in the target dirent and do no whiteout inode
allocation. The main issue with this approach is that we need
hooks in lookup to create a virtual chardev inode to present
to userspace and in places where we might need to modify the
dirent e.g. unlink. Overlayfs also needs to be taught about
DT_WHT. Most invasive change, lowest overhead.
- create a special whiteout inode in the root directory (e.g. a
".wino" dirent) and then hardlink every new whiteout to it.
This means we only need to create a single whiteout inode, and
rename simply creates a hardlink to it. We can use DT_WHT for
these, though using DT_CHR means we won't have to modify
overlayfs, nor anything in userspace. Downside is we have to
look up the whiteout inode on every operation and create it if
it doesn't exist.
- copy ext4: create a special whiteout chardev inode for every
whiteout. This is more complex than the above options because
of the lack of atomicity between inode creation and the rename
operation, requiring us to create a tmpfile inode and then
linking it into the directory structure during the rename. At
least with a tmpfile inode crashes between the create and
rename doesn't leave unreferenced inodes or directory
pollution around.
By far the simplest thing to do in the short term is to copy ext4.
While it is the most inefficient way of supporting whiteouts, but as
an initial implementation we can simply reuse existing functions and
add a small amount of extra code the the rename operation.
When we get full whiteout support in the VFS (via the dentry cache)
we can then look to supporting DT_WHT method outlined as the first
method of supporting whiteouts. But until then, we'll stick with
what overlayfs expects us to be: dumb and stupid.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
2015-03-25 03:08:08 +00:00
|
|
|
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
|
2014-12-23 21:51:38 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2014-12-23 21:51:42 +00:00
|
|
|
/* if we are exchanging files, we need to set i_mode of both files */
|
|
|
|
if (flags & RENAME_EXCHANGE)
|
2015-03-17 22:25:59 +00:00
|
|
|
omode = d_inode(ndentry)->i_mode;
|
2014-12-23 21:51:42 +00:00
|
|
|
|
2017-01-17 19:41:44 +00:00
|
|
|
error = xfs_dentry_mode_to_name(&oname, odentry, omode);
|
|
|
|
if (omode && unlikely(error))
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = xfs_dentry_mode_to_name(&nname, ndentry,
|
|
|
|
d_inode(odentry)->i_mode);
|
|
|
|
if (unlikely(error))
|
|
|
|
return error;
|
2008-04-10 02:22:07 +00:00
|
|
|
|
2021-01-21 13:19:58 +00:00
|
|
|
return xfs_rename(mnt_userns, XFS_I(odir), &oname,
|
|
|
|
XFS_I(d_inode(odentry)), XFS_I(ndir), &nname,
|
2014-12-23 21:51:42 +00:00
|
|
|
new_inode ? XFS_I(new_inode) : NULL, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* careful here - this function can get called recursively, so
|
|
|
|
* we need to be very careful about how much stack we use.
|
|
|
|
* uio is kmalloced for this reason...
|
|
|
|
*/
|
2015-05-02 17:32:22 +00:00
|
|
|
STATIC const char *
|
2015-11-17 15:20:54 +00:00
|
|
|
xfs_vn_get_link(
|
2005-04-16 22:20:36 +00:00
|
|
|
struct dentry *dentry,
|
2015-11-17 15:20:54 +00:00
|
|
|
struct inode *inode,
|
2015-12-29 20:58:39 +00:00
|
|
|
struct delayed_call *done)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
char *link;
|
2007-08-28 03:59:03 +00:00
|
|
|
int error = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-11-17 15:20:54 +00:00
|
|
|
if (!dentry)
|
|
|
|
return ERR_PTR(-ECHILD);
|
|
|
|
|
2017-07-07 15:37:26 +00:00
|
|
|
link = kmalloc(XFS_SYMLINK_MAXLEN+1, GFP_KERNEL);
|
2007-08-28 03:59:03 +00:00
|
|
|
if (!link)
|
|
|
|
goto out_err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-03-17 22:25:59 +00:00
|
|
|
error = xfs_readlink(XFS_I(d_inode(dentry)), link);
|
2007-08-28 03:59:03 +00:00
|
|
|
if (unlikely(error))
|
|
|
|
goto out_kfree;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-12-29 20:58:39 +00:00
|
|
|
set_delayed_call(done, kfree_link, link);
|
|
|
|
return link;
|
2007-08-28 03:59:03 +00:00
|
|
|
|
|
|
|
out_kfree:
|
|
|
|
kfree(link);
|
|
|
|
out_err:
|
2015-05-02 17:32:22 +00:00
|
|
|
return ERR_PTR(error);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 15:41:43 +00:00
|
|
|
static uint32_t
|
|
|
|
xfs_stat_blksize(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the file blocks are being allocated from a realtime volume, then
|
|
|
|
* always return the realtime extent size.
|
|
|
|
*/
|
|
|
|
if (XFS_IS_REALTIME_INODE(ip))
|
2021-05-31 18:31:56 +00:00
|
|
|
return XFS_FSB_TO_B(mp, xfs_get_extsz_hint(ip));
|
2019-10-28 15:41:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow large block sizes to be reported to userspace programs if the
|
|
|
|
* "largeio" mount option is used.
|
|
|
|
*
|
|
|
|
* If compatibility mode is specified, simply return the basic unit of
|
|
|
|
* caching so that we don't get inefficient read/modify/write I/O from
|
|
|
|
* user apps. Otherwise....
|
|
|
|
*
|
|
|
|
* If the underlying volume is a stripe, then return the stripe width in
|
|
|
|
* bytes as the recommended I/O size. It is not a stripe and we've set a
|
|
|
|
* default buffered I/O size, return that, otherwise return the compat
|
|
|
|
* default.
|
|
|
|
*/
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_large_iosize(mp)) {
|
2019-10-28 15:41:43 +00:00
|
|
|
if (mp->m_swidth)
|
2021-05-31 18:31:56 +00:00
|
|
|
return XFS_FSB_TO_B(mp, mp->m_swidth);
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_allocsize(mp))
|
2019-10-28 15:41:44 +00:00
|
|
|
return 1U << mp->m_allocsize_log;
|
2019-10-28 15:41:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_getattr(
|
2023-01-13 11:49:12 +00:00
|
|
|
struct mnt_idmap *idmap,
|
statx: Add a system call to make enhanced file info available
Add a system call to make extended file information available, including
file creation and some attribute flags where available through the
underlying filesystem.
The getattr inode operation is altered to take two additional arguments: a
u32 request_mask and an unsigned int flags that indicate the
synchronisation mode. This change is propagated to the vfs_getattr*()
function.
Functions like vfs_stat() are now inline wrappers around new functions
vfs_statx() and vfs_statx_fd() to reduce stack usage.
========
OVERVIEW
========
The idea was initially proposed as a set of xattrs that could be retrieved
with getxattr(), but the general preference proved to be for a new syscall
with an extended stat structure.
A number of requests were gathered for features to be included. The
following have been included:
(1) Make the fields a consistent size on all arches and make them large.
(2) Spare space, request flags and information flags are provided for
future expansion.
(3) Better support for the y2038 problem [Arnd Bergmann] (tv_sec is an
__s64).
(4) Creation time: The SMB protocol carries the creation time, which could
be exported by Samba, which will in turn help CIFS make use of
FS-Cache as that can be used for coherency data (stx_btime).
This is also specified in NFSv4 as a recommended attribute and could
be exported by NFSD [Steve French].
(5) Lightweight stat: Ask for just those details of interest, and allow a
netfs (such as NFS) to approximate anything not of interest, possibly
without going to the server [Trond Myklebust, Ulrich Drepper, Andreas
Dilger] (AT_STATX_DONT_SYNC).
(6) Heavyweight stat: Force a netfs to go to the server, even if it thinks
its cached attributes are up to date [Trond Myklebust]
(AT_STATX_FORCE_SYNC).
And the following have been left out for future extension:
(7) Data version number: Could be used by userspace NFS servers [Aneesh
Kumar].
Can also be used to modify fill_post_wcc() in NFSD which retrieves
i_version directly, but has just called vfs_getattr(). It could get
it from the kstat struct if it used vfs_xgetattr() instead.
(There's disagreement on the exact semantics of a single field, since
not all filesystems do this the same way).
(8) BSD stat compatibility: Including more fields from the BSD stat such
as creation time (st_btime) and inode generation number (st_gen)
[Jeremy Allison, Bernd Schubert].
(9) Inode generation number: Useful for FUSE and userspace NFS servers
[Bernd Schubert].
(This was asked for but later deemed unnecessary with the
open-by-handle capability available and caused disagreement as to
whether it's a security hole or not).
(10) Extra coherency data may be useful in making backups [Andreas Dilger].
(No particular data were offered, but things like last backup
timestamp, the data version number and the DOS archive bit would come
into this category).
(11) Allow the filesystem to indicate what it can/cannot provide: A
filesystem can now say it doesn't support a standard stat feature if
that isn't available, so if, for instance, inode numbers or UIDs don't
exist or are fabricated locally...
(This requires a separate system call - I have an fsinfo() call idea
for this).
(12) Store a 16-byte volume ID in the superblock that can be returned in
struct xstat [Steve French].
(Deferred to fsinfo).
(13) Include granularity fields in the time data to indicate the
granularity of each of the times (NFSv4 time_delta) [Steve French].
(Deferred to fsinfo).
(14) FS_IOC_GETFLAGS value. These could be translated to BSD's st_flags.
Note that the Linux IOC flags are a mess and filesystems such as Ext4
define flags that aren't in linux/fs.h, so translation in the kernel
may be a necessity (or, possibly, we provide the filesystem type too).
(Some attributes are made available in stx_attributes, but the general
feeling was that the IOC flags were to ext[234]-specific and shouldn't
be exposed through statx this way).
(15) Mask of features available on file (eg: ACLs, seclabel) [Brad Boyer,
Michael Kerrisk].
(Deferred, probably to fsinfo. Finding out if there's an ACL or
seclabal might require extra filesystem operations).
(16) Femtosecond-resolution timestamps [Dave Chinner].
(A __reserved field has been left in the statx_timestamp struct for
this - if there proves to be a need).
(17) A set multiple attributes syscall to go with this.
===============
NEW SYSTEM CALL
===============
The new system call is:
int ret = statx(int dfd,
const char *filename,
unsigned int flags,
unsigned int mask,
struct statx *buffer);
The dfd, filename and flags parameters indicate the file to query, in a
similar way to fstatat(). There is no equivalent of lstat() as that can be
emulated with statx() by passing AT_SYMLINK_NOFOLLOW in flags. There is
also no equivalent of fstat() as that can be emulated by passing a NULL
filename to statx() with the fd of interest in dfd.
Whether or not statx() synchronises the attributes with the backing store
can be controlled by OR'ing a value into the flags argument (this typically
only affects network filesystems):
(1) AT_STATX_SYNC_AS_STAT tells statx() to behave as stat() does in this
respect.
(2) AT_STATX_FORCE_SYNC will require a network filesystem to synchronise
its attributes with the server - which might require data writeback to
occur to get the timestamps correct.
(3) AT_STATX_DONT_SYNC will suppress synchronisation with the server in a
network filesystem. The resulting values should be considered
approximate.
mask is a bitmask indicating the fields in struct statx that are of
interest to the caller. The user should set this to STATX_BASIC_STATS to
get the basic set returned by stat(). It should be noted that asking for
more information may entail extra I/O operations.
buffer points to the destination for the data. This must be 256 bytes in
size.
======================
MAIN ATTRIBUTES RECORD
======================
The following structures are defined in which to return the main attribute
set:
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__s32 __reserved;
};
struct statx {
__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;
__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];
__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 __spare1[1];
struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;
__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;
__u64 __spare2[14];
};
The defined bits in request_mask and stx_mask are:
STATX_TYPE Want/got stx_mode & S_IFMT
STATX_MODE Want/got stx_mode & ~S_IFMT
STATX_NLINK Want/got stx_nlink
STATX_UID Want/got stx_uid
STATX_GID Want/got stx_gid
STATX_ATIME Want/got stx_atime{,_ns}
STATX_MTIME Want/got stx_mtime{,_ns}
STATX_CTIME Want/got stx_ctime{,_ns}
STATX_INO Want/got stx_ino
STATX_SIZE Want/got stx_size
STATX_BLOCKS Want/got stx_blocks
STATX_BASIC_STATS [The stuff in the normal stat struct]
STATX_BTIME Want/got stx_btime{,_ns}
STATX_ALL [All currently available stuff]
stx_btime is the file creation time, stx_mask is a bitmask indicating the
data provided and __spares*[] are where as-yet undefined fields can be
placed.
Time fields are structures with separate seconds and nanoseconds fields
plus a reserved field in case we want to add even finer resolution. Note
that times will be negative if before 1970; in such a case, the nanosecond
fields will also be negative if not zero.
The bits defined in the stx_attributes field convey information about a
file, how it is accessed, where it is and what it does. The following
attributes map to FS_*_FL flags and are the same numerical value:
STATX_ATTR_COMPRESSED File is compressed by the fs
STATX_ATTR_IMMUTABLE File is marked immutable
STATX_ATTR_APPEND File is append-only
STATX_ATTR_NODUMP File is not to be dumped
STATX_ATTR_ENCRYPTED File requires key to decrypt in fs
Within the kernel, the supported flags are listed by:
KSTAT_ATTR_FS_IOC_FLAGS
[Are any other IOC flags of sufficient general interest to be exposed
through this interface?]
New flags include:
STATX_ATTR_AUTOMOUNT Object is an automount trigger
These are for the use of GUI tools that might want to mark files specially,
depending on what they are.
Fields in struct statx come in a number of classes:
(0) stx_dev_*, stx_blksize.
These are local system information and are always available.
(1) stx_mode, stx_nlinks, stx_uid, stx_gid, stx_[amc]time, stx_ino,
stx_size, stx_blocks.
These will be returned whether the caller asks for them or not. The
corresponding bits in stx_mask will be set to indicate whether they
actually have valid values.
If the caller didn't ask for them, then they may be approximated. For
example, NFS won't waste any time updating them from the server,
unless as a byproduct of updating something requested.
If the values don't actually exist for the underlying object (such as
UID or GID on a DOS file), then the bit won't be set in the stx_mask,
even if the caller asked for the value. In such a case, the returned
value will be a fabrication.
Note that there are instances where the type might not be valid, for
instance Windows reparse points.
(2) stx_rdev_*.
This will be set only if stx_mode indicates we're looking at a
blockdev or a chardev, otherwise will be 0.
(3) stx_btime.
Similar to (1), except this will be set to 0 if it doesn't exist.
=======
TESTING
=======
The following test program can be used to test the statx system call:
samples/statx/test-statx.c
Just compile and run, passing it paths to the files you want to examine.
The file is built automatically if CONFIG_SAMPLES is enabled.
Here's some example output. Firstly, an NFS directory that crosses to
another FSID. Note that the AUTOMOUNT attribute is set because transiting
this directory will cause d_automount to be invoked by the VFS.
[root@andromeda ~]# /tmp/test-statx -A /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:26 Inode: 1703937 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Attributes: 0000000000001000 (-------- -------- -------- -------- -------- -------- ---m---- --------)
Secondly, the result of automounting on that directory.
[root@andromeda ~]# /tmp/test-statx /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:27 Inode: 2 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2017-01-31 16:46:22 +00:00
|
|
|
const struct path *path,
|
|
|
|
struct kstat *stat,
|
|
|
|
u32 request_mask,
|
|
|
|
unsigned int query_flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-13 11:49:12 +00:00
|
|
|
struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
|
statx: Add a system call to make enhanced file info available
Add a system call to make extended file information available, including
file creation and some attribute flags where available through the
underlying filesystem.
The getattr inode operation is altered to take two additional arguments: a
u32 request_mask and an unsigned int flags that indicate the
synchronisation mode. This change is propagated to the vfs_getattr*()
function.
Functions like vfs_stat() are now inline wrappers around new functions
vfs_statx() and vfs_statx_fd() to reduce stack usage.
========
OVERVIEW
========
The idea was initially proposed as a set of xattrs that could be retrieved
with getxattr(), but the general preference proved to be for a new syscall
with an extended stat structure.
A number of requests were gathered for features to be included. The
following have been included:
(1) Make the fields a consistent size on all arches and make them large.
(2) Spare space, request flags and information flags are provided for
future expansion.
(3) Better support for the y2038 problem [Arnd Bergmann] (tv_sec is an
__s64).
(4) Creation time: The SMB protocol carries the creation time, which could
be exported by Samba, which will in turn help CIFS make use of
FS-Cache as that can be used for coherency data (stx_btime).
This is also specified in NFSv4 as a recommended attribute and could
be exported by NFSD [Steve French].
(5) Lightweight stat: Ask for just those details of interest, and allow a
netfs (such as NFS) to approximate anything not of interest, possibly
without going to the server [Trond Myklebust, Ulrich Drepper, Andreas
Dilger] (AT_STATX_DONT_SYNC).
(6) Heavyweight stat: Force a netfs to go to the server, even if it thinks
its cached attributes are up to date [Trond Myklebust]
(AT_STATX_FORCE_SYNC).
And the following have been left out for future extension:
(7) Data version number: Could be used by userspace NFS servers [Aneesh
Kumar].
Can also be used to modify fill_post_wcc() in NFSD which retrieves
i_version directly, but has just called vfs_getattr(). It could get
it from the kstat struct if it used vfs_xgetattr() instead.
(There's disagreement on the exact semantics of a single field, since
not all filesystems do this the same way).
(8) BSD stat compatibility: Including more fields from the BSD stat such
as creation time (st_btime) and inode generation number (st_gen)
[Jeremy Allison, Bernd Schubert].
(9) Inode generation number: Useful for FUSE and userspace NFS servers
[Bernd Schubert].
(This was asked for but later deemed unnecessary with the
open-by-handle capability available and caused disagreement as to
whether it's a security hole or not).
(10) Extra coherency data may be useful in making backups [Andreas Dilger].
(No particular data were offered, but things like last backup
timestamp, the data version number and the DOS archive bit would come
into this category).
(11) Allow the filesystem to indicate what it can/cannot provide: A
filesystem can now say it doesn't support a standard stat feature if
that isn't available, so if, for instance, inode numbers or UIDs don't
exist or are fabricated locally...
(This requires a separate system call - I have an fsinfo() call idea
for this).
(12) Store a 16-byte volume ID in the superblock that can be returned in
struct xstat [Steve French].
(Deferred to fsinfo).
(13) Include granularity fields in the time data to indicate the
granularity of each of the times (NFSv4 time_delta) [Steve French].
(Deferred to fsinfo).
(14) FS_IOC_GETFLAGS value. These could be translated to BSD's st_flags.
Note that the Linux IOC flags are a mess and filesystems such as Ext4
define flags that aren't in linux/fs.h, so translation in the kernel
may be a necessity (or, possibly, we provide the filesystem type too).
(Some attributes are made available in stx_attributes, but the general
feeling was that the IOC flags were to ext[234]-specific and shouldn't
be exposed through statx this way).
(15) Mask of features available on file (eg: ACLs, seclabel) [Brad Boyer,
Michael Kerrisk].
(Deferred, probably to fsinfo. Finding out if there's an ACL or
seclabal might require extra filesystem operations).
(16) Femtosecond-resolution timestamps [Dave Chinner].
(A __reserved field has been left in the statx_timestamp struct for
this - if there proves to be a need).
(17) A set multiple attributes syscall to go with this.
===============
NEW SYSTEM CALL
===============
The new system call is:
int ret = statx(int dfd,
const char *filename,
unsigned int flags,
unsigned int mask,
struct statx *buffer);
The dfd, filename and flags parameters indicate the file to query, in a
similar way to fstatat(). There is no equivalent of lstat() as that can be
emulated with statx() by passing AT_SYMLINK_NOFOLLOW in flags. There is
also no equivalent of fstat() as that can be emulated by passing a NULL
filename to statx() with the fd of interest in dfd.
Whether or not statx() synchronises the attributes with the backing store
can be controlled by OR'ing a value into the flags argument (this typically
only affects network filesystems):
(1) AT_STATX_SYNC_AS_STAT tells statx() to behave as stat() does in this
respect.
(2) AT_STATX_FORCE_SYNC will require a network filesystem to synchronise
its attributes with the server - which might require data writeback to
occur to get the timestamps correct.
(3) AT_STATX_DONT_SYNC will suppress synchronisation with the server in a
network filesystem. The resulting values should be considered
approximate.
mask is a bitmask indicating the fields in struct statx that are of
interest to the caller. The user should set this to STATX_BASIC_STATS to
get the basic set returned by stat(). It should be noted that asking for
more information may entail extra I/O operations.
buffer points to the destination for the data. This must be 256 bytes in
size.
======================
MAIN ATTRIBUTES RECORD
======================
The following structures are defined in which to return the main attribute
set:
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__s32 __reserved;
};
struct statx {
__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;
__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];
__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 __spare1[1];
struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;
__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;
__u64 __spare2[14];
};
The defined bits in request_mask and stx_mask are:
STATX_TYPE Want/got stx_mode & S_IFMT
STATX_MODE Want/got stx_mode & ~S_IFMT
STATX_NLINK Want/got stx_nlink
STATX_UID Want/got stx_uid
STATX_GID Want/got stx_gid
STATX_ATIME Want/got stx_atime{,_ns}
STATX_MTIME Want/got stx_mtime{,_ns}
STATX_CTIME Want/got stx_ctime{,_ns}
STATX_INO Want/got stx_ino
STATX_SIZE Want/got stx_size
STATX_BLOCKS Want/got stx_blocks
STATX_BASIC_STATS [The stuff in the normal stat struct]
STATX_BTIME Want/got stx_btime{,_ns}
STATX_ALL [All currently available stuff]
stx_btime is the file creation time, stx_mask is a bitmask indicating the
data provided and __spares*[] are where as-yet undefined fields can be
placed.
Time fields are structures with separate seconds and nanoseconds fields
plus a reserved field in case we want to add even finer resolution. Note
that times will be negative if before 1970; in such a case, the nanosecond
fields will also be negative if not zero.
The bits defined in the stx_attributes field convey information about a
file, how it is accessed, where it is and what it does. The following
attributes map to FS_*_FL flags and are the same numerical value:
STATX_ATTR_COMPRESSED File is compressed by the fs
STATX_ATTR_IMMUTABLE File is marked immutable
STATX_ATTR_APPEND File is append-only
STATX_ATTR_NODUMP File is not to be dumped
STATX_ATTR_ENCRYPTED File requires key to decrypt in fs
Within the kernel, the supported flags are listed by:
KSTAT_ATTR_FS_IOC_FLAGS
[Are any other IOC flags of sufficient general interest to be exposed
through this interface?]
New flags include:
STATX_ATTR_AUTOMOUNT Object is an automount trigger
These are for the use of GUI tools that might want to mark files specially,
depending on what they are.
Fields in struct statx come in a number of classes:
(0) stx_dev_*, stx_blksize.
These are local system information and are always available.
(1) stx_mode, stx_nlinks, stx_uid, stx_gid, stx_[amc]time, stx_ino,
stx_size, stx_blocks.
These will be returned whether the caller asks for them or not. The
corresponding bits in stx_mask will be set to indicate whether they
actually have valid values.
If the caller didn't ask for them, then they may be approximated. For
example, NFS won't waste any time updating them from the server,
unless as a byproduct of updating something requested.
If the values don't actually exist for the underlying object (such as
UID or GID on a DOS file), then the bit won't be set in the stx_mask,
even if the caller asked for the value. In such a case, the returned
value will be a fabrication.
Note that there are instances where the type might not be valid, for
instance Windows reparse points.
(2) stx_rdev_*.
This will be set only if stx_mode indicates we're looking at a
blockdev or a chardev, otherwise will be 0.
(3) stx_btime.
Similar to (1), except this will be set to 0 if it doesn't exist.
=======
TESTING
=======
The following test program can be used to test the statx system call:
samples/statx/test-statx.c
Just compile and run, passing it paths to the files you want to examine.
The file is built automatically if CONFIG_SAMPLES is enabled.
Here's some example output. Firstly, an NFS directory that crosses to
another FSID. Note that the AUTOMOUNT attribute is set because transiting
this directory will cause d_automount to be invoked by the VFS.
[root@andromeda ~]# /tmp/test-statx -A /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:26 Inode: 1703937 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Attributes: 0000000000001000 (-------- -------- -------- -------- -------- -------- ---m---- --------)
Secondly, the result of automounting on that directory.
[root@andromeda ~]# /tmp/test-statx /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:27 Inode: 2 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2017-01-31 16:46:22 +00:00
|
|
|
struct inode *inode = d_inode(path->dentry);
|
2007-10-11 07:46:39 +00:00
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2022-09-18 20:54:14 +00:00
|
|
|
vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
|
|
|
|
vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
|
2007-10-11 07:46:39 +00:00
|
|
|
|
2010-06-24 01:57:09 +00:00
|
|
|
trace_xfs_getattr(ip);
|
2007-10-11 07:46:39 +00:00
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2014-06-22 05:04:54 +00:00
|
|
|
return -EIO;
|
2007-10-11 07:46:39 +00:00
|
|
|
|
|
|
|
stat->size = XFS_ISIZE(ip);
|
|
|
|
stat->dev = inode->i_sb->s_dev;
|
2016-02-09 05:54:58 +00:00
|
|
|
stat->mode = inode->i_mode;
|
2016-02-09 05:54:58 +00:00
|
|
|
stat->nlink = inode->i_nlink;
|
2022-09-18 20:54:14 +00:00
|
|
|
stat->uid = vfsuid_into_kuid(vfsuid);
|
|
|
|
stat->gid = vfsgid_into_kgid(vfsgid);
|
2007-10-11 07:46:39 +00:00
|
|
|
stat->ino = ip->i_ino;
|
|
|
|
stat->atime = inode->i_atime;
|
2009-10-06 20:29:26 +00:00
|
|
|
stat->mtime = inode->i_mtime;
|
|
|
|
stat->ctime = inode->i_ctime;
|
2021-03-29 18:11:40 +00:00
|
|
|
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
|
2007-10-11 07:46:39 +00:00
|
|
|
|
2021-08-19 01:46:37 +00:00
|
|
|
if (xfs_has_v3inodes(mp)) {
|
2017-03-31 17:32:03 +00:00
|
|
|
if (request_mask & STATX_BTIME) {
|
|
|
|
stat->result_mask |= STATX_BTIME;
|
2021-03-29 18:11:45 +00:00
|
|
|
stat->btime = ip->i_crtime;
|
2017-03-31 17:32:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:14:57 +00:00
|
|
|
/*
|
|
|
|
* Note: If you add another clause to set an attribute flag, please
|
|
|
|
* update attributes_mask below.
|
|
|
|
*/
|
2021-03-29 18:11:44 +00:00
|
|
|
if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
|
2017-03-31 17:32:03 +00:00
|
|
|
stat->attributes |= STATX_ATTR_IMMUTABLE;
|
2021-03-29 18:11:44 +00:00
|
|
|
if (ip->i_diflags & XFS_DIFLAG_APPEND)
|
2017-03-31 17:32:03 +00:00
|
|
|
stat->attributes |= STATX_ATTR_APPEND;
|
2021-03-29 18:11:44 +00:00
|
|
|
if (ip->i_diflags & XFS_DIFLAG_NODUMP)
|
2017-03-31 17:32:03 +00:00
|
|
|
stat->attributes |= STATX_ATTR_NODUMP;
|
2007-10-11 07:46:39 +00:00
|
|
|
|
2019-03-01 16:14:57 +00:00
|
|
|
stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
|
|
|
|
STATX_ATTR_APPEND |
|
|
|
|
STATX_ATTR_NODUMP);
|
|
|
|
|
2007-10-11 07:46:39 +00:00
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFCHR:
|
|
|
|
stat->blksize = BLKDEV_IOSIZE;
|
2017-10-19 18:07:09 +00:00
|
|
|
stat->rdev = inode->i_rdev;
|
2007-10-11 07:46:39 +00:00
|
|
|
break;
|
2022-08-27 06:58:51 +00:00
|
|
|
case S_IFREG:
|
|
|
|
if (request_mask & STATX_DIOALIGN) {
|
|
|
|
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
|
|
|
|
struct block_device *bdev = target->bt_bdev;
|
|
|
|
|
|
|
|
stat->result_mask |= STATX_DIOALIGN;
|
|
|
|
stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
|
|
|
|
stat->dio_offset_align = bdev_logical_block_size(bdev);
|
|
|
|
}
|
|
|
|
fallthrough;
|
2007-10-11 07:46:39 +00:00
|
|
|
default:
|
2019-10-28 15:41:43 +00:00
|
|
|
stat->blksize = xfs_stat_blksize(ip);
|
2007-10-11 07:46:39 +00:00
|
|
|
stat->rdev = 0;
|
|
|
|
break;
|
2006-09-28 01:01:22 +00:00
|
|
|
}
|
2007-10-11 07:46:39 +00:00
|
|
|
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2016-05-26 12:46:43 +00:00
|
|
|
static int
|
|
|
|
xfs_vn_change_ok(
|
2023-01-13 11:49:11 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2021-01-21 13:19:58 +00:00
|
|
|
struct dentry *dentry,
|
|
|
|
struct iattr *iattr)
|
2016-05-26 12:46:43 +00:00
|
|
|
{
|
2016-05-26 14:55:18 +00:00
|
|
|
struct xfs_mount *mp = XFS_I(d_inode(dentry))->i_mount;
|
2016-05-26 12:46:43 +00:00
|
|
|
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_is_readonly(mp))
|
2016-05-26 12:46:43 +00:00
|
|
|
return -EROFS;
|
|
|
|
|
2021-08-19 01:46:53 +00:00
|
|
|
if (xfs_is_shutdown(mp))
|
2016-05-26 12:46:43 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2023-01-13 11:49:11 +00:00
|
|
|
return setattr_prepare(idmap, dentry, iattr);
|
2016-05-26 12:46:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set non-size attributes of an inode.
|
|
|
|
*
|
|
|
|
* Caution: The caller of this function is responsible for calling
|
2016-05-26 14:55:18 +00:00
|
|
|
* setattr_prepare() or otherwise verifying the change is fine.
|
2016-05-26 12:46:43 +00:00
|
|
|
*/
|
2020-12-11 04:00:39 +00:00
|
|
|
static int
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_setattr_nonsize(
|
2023-01-13 11:49:11 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2022-09-23 08:29:39 +00:00
|
|
|
struct dentry *dentry,
|
2011-07-08 12:34:23 +00:00
|
|
|
struct xfs_inode *ip,
|
2020-12-11 04:00:39 +00:00
|
|
|
struct iattr *iattr)
|
2011-07-08 12:34:23 +00:00
|
|
|
{
|
|
|
|
xfs_mount_t *mp = ip->i_mount;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
int mask = iattr->ia_valid;
|
|
|
|
xfs_trans_t *tp;
|
|
|
|
int error;
|
2022-03-08 18:51:16 +00:00
|
|
|
kuid_t uid = GLOBAL_ROOT_UID;
|
|
|
|
kgid_t gid = GLOBAL_ROOT_GID;
|
2011-07-08 12:34:23 +00:00
|
|
|
struct xfs_dquot *udqp = NULL, *gdqp = NULL;
|
2022-03-08 18:51:16 +00:00
|
|
|
struct xfs_dquot *old_udqp = NULL, *old_gdqp = NULL;
|
2023-01-13 11:49:11 +00:00
|
|
|
struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
ASSERT((mask & ATTR_SIZE) == 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If disk quotas is on, we make sure that the dquots do exist on disk,
|
|
|
|
* before we start any other transactions. Trying to do this later
|
|
|
|
* is messy. We don't care to take a readlock to look at the ids
|
|
|
|
* in inode here, because we can't hold it across the trans_reserve.
|
|
|
|
* If the IDs do change before we take the ilock, we're covered
|
|
|
|
* because the i_*dquot fields will get updated anyway.
|
|
|
|
*/
|
|
|
|
if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
|
|
|
|
uint qflags = 0;
|
|
|
|
|
|
|
|
if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
|
2022-06-21 14:14:54 +00:00
|
|
|
uid = from_vfsuid(mnt_userns, i_user_ns(inode),
|
|
|
|
iattr->ia_vfsuid);
|
2011-07-08 12:34:23 +00:00
|
|
|
qflags |= XFS_QMOPT_UQUOTA;
|
|
|
|
} else {
|
2013-08-15 18:08:01 +00:00
|
|
|
uid = inode->i_uid;
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
|
2022-06-21 14:14:54 +00:00
|
|
|
gid = from_vfsgid(mnt_userns, i_user_ns(inode),
|
|
|
|
iattr->ia_vfsgid);
|
2011-07-08 12:34:23 +00:00
|
|
|
qflags |= XFS_QMOPT_GQUOTA;
|
|
|
|
} else {
|
2013-08-15 18:08:01 +00:00
|
|
|
gid = inode->i_gid;
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We take a reference when we initialize udqp and gdqp,
|
|
|
|
* so it is important that we never blindly double trip on
|
|
|
|
* the same variable. See xfs_create() for an example.
|
|
|
|
*/
|
|
|
|
ASSERT(udqp == NULL);
|
|
|
|
ASSERT(gdqp == NULL);
|
2021-03-29 18:11:39 +00:00
|
|
|
error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_projid,
|
2013-08-15 18:08:01 +00:00
|
|
|
qflags, &udqp, &gdqp, NULL);
|
2011-07-08 12:34:23 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2021-01-29 19:32:09 +00:00
|
|
|
error = xfs_trans_alloc_ichange(ip, udqp, gdqp, NULL,
|
2022-02-26 00:18:30 +00:00
|
|
|
has_capability_noaudit(current, CAP_FOWNER), &tp);
|
2011-07-08 12:34:23 +00:00
|
|
|
if (error)
|
2016-04-05 23:19:55 +00:00
|
|
|
goto out_dqrele;
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
/*
|
2022-03-08 18:51:16 +00:00
|
|
|
* Register quota modifications in the transaction. Must be the owner
|
|
|
|
* or privileged. These IDs could have changed since we last looked at
|
|
|
|
* them. But, we're assured that if the ownership did change while we
|
|
|
|
* didn't have the inode locked, inode's dquot(s) would have changed
|
|
|
|
* also.
|
2011-07-08 12:34:23 +00:00
|
|
|
*/
|
2022-06-21 14:14:51 +00:00
|
|
|
if (XFS_IS_UQUOTA_ON(mp) &&
|
2022-06-21 14:14:54 +00:00
|
|
|
i_uid_needs_update(mnt_userns, iattr, inode)) {
|
2022-03-08 18:51:16 +00:00
|
|
|
ASSERT(udqp);
|
|
|
|
old_udqp = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp);
|
|
|
|
}
|
2022-06-21 14:14:51 +00:00
|
|
|
if (XFS_IS_GQUOTA_ON(mp) &&
|
2022-06-21 14:14:54 +00:00
|
|
|
i_gid_needs_update(mnt_userns, iattr, inode)) {
|
2022-03-08 18:51:16 +00:00
|
|
|
ASSERT(xfs_has_pquotino(mp) || !XFS_IS_PQUOTA_ON(mp));
|
|
|
|
ASSERT(gdqp);
|
|
|
|
old_gdqp = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp);
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
2023-01-13 11:49:11 +00:00
|
|
|
setattr_copy(idmap, inode, iattr);
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
XFS_STATS_INC(mp, xs_ig_attrchg);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_wsync(mp))
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_trans_set_sync(tp);
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Release any dquot(s) the inode had kept before chown.
|
|
|
|
*/
|
2022-03-08 18:51:16 +00:00
|
|
|
xfs_qm_dqrele(old_udqp);
|
|
|
|
xfs_qm_dqrele(old_gdqp);
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_qm_dqrele(udqp);
|
|
|
|
xfs_qm_dqrele(gdqp);
|
|
|
|
|
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX(hch): Updating the ACL entries is not atomic vs the i_mode
|
|
|
|
* update. We could avoid this with linked transactions
|
|
|
|
* and passing down the transaction pointer all the way
|
|
|
|
* to attr_set. No previous user of the generic
|
|
|
|
* Posix ACL code seems to care about this issue either.
|
|
|
|
*/
|
2020-12-11 04:00:39 +00:00
|
|
|
if (mask & ATTR_MODE) {
|
2022-09-23 08:29:39 +00:00
|
|
|
error = posix_acl_chmod(mnt_userns, dentry, inode->i_mode);
|
2011-07-08 12:34:23 +00:00
|
|
|
if (error)
|
2014-06-22 05:04:54 +00:00
|
|
|
return error;
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2016-04-05 23:19:55 +00:00
|
|
|
out_dqrele:
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_qm_dqrele(udqp);
|
|
|
|
xfs_qm_dqrele(gdqp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncate file. Must have write permission and not be a directory.
|
2016-05-26 12:46:43 +00:00
|
|
|
*
|
|
|
|
* Caution: The caller of this function is responsible for calling
|
2016-05-26 14:55:18 +00:00
|
|
|
* setattr_prepare() or otherwise verifying the change is fine.
|
2011-07-08 12:34:23 +00:00
|
|
|
*/
|
2017-08-31 22:11:06 +00:00
|
|
|
STATIC int
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_setattr_size(
|
2023-01-13 11:49:11 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2022-09-23 08:29:39 +00:00
|
|
|
struct dentry *dentry,
|
2011-07-08 12:34:23 +00:00
|
|
|
struct xfs_inode *ip,
|
2013-10-14 14:09:35 +00:00
|
|
|
struct iattr *iattr)
|
2011-07-08 12:34:23 +00:00
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
struct inode *inode = VFS_I(ip);
|
2011-12-18 20:00:04 +00:00
|
|
|
xfs_off_t oldsize, newsize;
|
2011-07-08 12:34:23 +00:00
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
2012-03-27 14:34:48 +00:00
|
|
|
uint lock_flags = 0;
|
2015-02-23 11:37:08 +00:00
|
|
|
bool did_zeroing = false;
|
2011-07-08 12:34:23 +00:00
|
|
|
|
2013-10-14 14:09:35 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
2015-02-23 10:45:32 +00:00
|
|
|
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
|
2016-02-09 05:54:58 +00:00
|
|
|
ASSERT(S_ISREG(inode->i_mode));
|
2014-02-09 23:35:22 +00:00
|
|
|
ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
|
2021-01-23 00:48:19 +00:00
|
|
|
ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
2011-12-18 20:00:11 +00:00
|
|
|
oldsize = inode->i_size;
|
2011-12-18 20:00:04 +00:00
|
|
|
newsize = iattr->ia_size;
|
|
|
|
|
2011-07-08 12:34:23 +00:00
|
|
|
/*
|
|
|
|
* Short circuit the truncate case for zero length files.
|
|
|
|
*/
|
2020-05-18 17:27:22 +00:00
|
|
|
if (newsize == 0 && oldsize == 0 && ip->i_df.if_nextents == 0) {
|
2014-02-09 23:35:22 +00:00
|
|
|
if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
|
2013-10-14 14:09:35 +00:00
|
|
|
return 0;
|
2011-07-08 12:34:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the regular setattr path to update the timestamps.
|
|
|
|
*/
|
|
|
|
iattr->ia_valid &= ~ATTR_SIZE;
|
2023-01-13 11:49:11 +00:00
|
|
|
return xfs_setattr_nonsize(idmap, dentry, ip, iattr);
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the dquots are attached to the inode.
|
|
|
|
*/
|
2018-05-04 22:30:21 +00:00
|
|
|
error = xfs_qm_dqattach(ip);
|
2011-07-08 12:34:23 +00:00
|
|
|
if (error)
|
2013-10-14 14:09:35 +00:00
|
|
|
return error;
|
2011-07-08 12:34:23 +00:00
|
|
|
|
2016-06-20 23:52:47 +00:00
|
|
|
/*
|
|
|
|
* Wait for all direct I/O to complete.
|
|
|
|
*/
|
|
|
|
inode_dio_wait(inode);
|
|
|
|
|
2011-07-08 12:34:23 +00:00
|
|
|
/*
|
2015-02-23 11:37:08 +00:00
|
|
|
* File data changes must be complete before we start the transaction to
|
|
|
|
* modify the inode. This needs to be done before joining the inode to
|
|
|
|
* the transaction because the inode cannot be unlocked once it is a
|
|
|
|
* part of the transaction.
|
|
|
|
*
|
2016-06-20 23:52:47 +00:00
|
|
|
* Start with zeroing any data beyond EOF that we may expose on file
|
|
|
|
* extension, or zeroing out the rest of the block on a downward
|
|
|
|
* truncate.
|
2011-07-08 12:34:23 +00:00
|
|
|
*/
|
2011-12-18 20:00:04 +00:00
|
|
|
if (newsize > oldsize) {
|
2018-03-14 06:15:32 +00:00
|
|
|
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
|
2021-11-29 10:21:49 +00:00
|
|
|
error = xfs_zero_range(ip, oldsize, newsize - oldsize,
|
|
|
|
&did_zeroing);
|
2016-06-20 23:52:47 +00:00
|
|
|
} else {
|
2020-10-29 21:30:48 +00:00
|
|
|
/*
|
|
|
|
* iomap won't detect a dirty page over an unwritten block (or a
|
|
|
|
* cow block over a hole) and subsequently skips zeroing the
|
|
|
|
* newly post-EOF portion of the page. Flush the new EOF to
|
|
|
|
* convert the block before the pagecache truncate.
|
|
|
|
*/
|
|
|
|
error = filemap_write_and_wait_range(inode->i_mapping, newsize,
|
|
|
|
newsize);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2021-11-29 10:21:49 +00:00
|
|
|
error = xfs_truncate_page(ip, newsize, &did_zeroing);
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
2016-06-20 23:52:47 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2014-05-06 22:05:45 +00:00
|
|
|
/*
|
2015-02-23 10:46:58 +00:00
|
|
|
* We've already locked out new page faults, so now we can safely remove
|
|
|
|
* pages from the page cache knowing they won't get refaulted until we
|
|
|
|
* drop the XFS_MMAP_EXCL lock after the extent manipulations are
|
|
|
|
* complete. The truncate_setsize() call also cleans partial EOF page
|
|
|
|
* PTEs on extending truncates and hence ensures sub-page block size
|
|
|
|
* filesystems are correctly handled, too.
|
2014-05-06 22:05:45 +00:00
|
|
|
*
|
2015-02-23 10:46:58 +00:00
|
|
|
* We have to do all the page cache truncate work outside the
|
|
|
|
* transaction context as the "lock" order is page lock->log space
|
|
|
|
* reservation as defined by extent allocation in the writeback path.
|
2016-04-05 23:19:55 +00:00
|
|
|
* Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but
|
2015-02-23 10:46:58 +00:00
|
|
|
* having already truncated the in-memory version of the file (i.e. made
|
|
|
|
* user visible changes). There's not much we can do about this, except
|
|
|
|
* to hope that the caller sees ENOMEM and retries the truncate
|
|
|
|
* operation.
|
xfs: truncate pagecache before writeback in xfs_setattr_size()
On truncate down, if new size is not block size aligned, we zero the
rest of block to avoid exposing stale data to user, and
iomap_truncate_page() skips zeroing if the range is already in
unwritten state or a hole. Then we writeback from on-disk i_size to
the new size if this range hasn't been written to disk yet, and
truncate page cache beyond new EOF and set in-core i_size.
The problem is that we could write data between di_size and newsize
before removing the page cache beyond newsize, as the extents may
still be in unwritten state right after a buffer write. As such, the
page of data that newsize lies in has not been zeroed by page cache
invalidation before it is written, and xfs_do_writepage() hasn't
triggered it's "zero data beyond EOF" case because we haven't
updated in-core i_size yet. Then a subsequent mmap read could see
non-zeros past EOF.
I occasionally see this in fsx runs in fstests generic/112, a
simplified fsx operation sequence is like (assuming 4k block size
xfs):
fallocate 0x0 0x1000 0x0 keep_size
write 0x0 0x1000 0x0
truncate 0x0 0x800 0x1000
punch_hole 0x0 0x800 0x800
mapread 0x0 0x800 0x800
where fallocate allocates unwritten extent but doesn't update
i_size, buffer write populates the page cache and extent is still
unwritten, truncate skips zeroing page past new EOF and writes the
page to disk, punch_hole invalidates the page cache, at last mapread
reads the block back and sees non-zero beyond EOF.
Fix it by moving truncate_setsize() to before writeback so the page
cache invalidation zeros the partial page at the new EOF. This also
triggers "zero data beyond EOF" in xfs_do_writepage() at writeback
time, because newsize has been set and page straddles the newsize.
Also fixed the wrong 'end' param of filemap_write_and_wait_range()
call while we're at it, the 'end' is inclusive and should be
'newsize - 1'.
Suggested-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Eryu Guan <eguan@redhat.com>
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-11-02 04:43:50 +00:00
|
|
|
*
|
|
|
|
* And we update in-core i_size and truncate page cache beyond newsize
|
2021-03-29 18:11:40 +00:00
|
|
|
* before writeback the [i_disk_size, newsize] range, so we're
|
|
|
|
* guaranteed not to write stale data past the new EOF on truncate down.
|
2014-05-06 22:05:45 +00:00
|
|
|
*/
|
|
|
|
truncate_setsize(inode, newsize);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
xfs: truncate pagecache before writeback in xfs_setattr_size()
On truncate down, if new size is not block size aligned, we zero the
rest of block to avoid exposing stale data to user, and
iomap_truncate_page() skips zeroing if the range is already in
unwritten state or a hole. Then we writeback from on-disk i_size to
the new size if this range hasn't been written to disk yet, and
truncate page cache beyond new EOF and set in-core i_size.
The problem is that we could write data between di_size and newsize
before removing the page cache beyond newsize, as the extents may
still be in unwritten state right after a buffer write. As such, the
page of data that newsize lies in has not been zeroed by page cache
invalidation before it is written, and xfs_do_writepage() hasn't
triggered it's "zero data beyond EOF" case because we haven't
updated in-core i_size yet. Then a subsequent mmap read could see
non-zeros past EOF.
I occasionally see this in fsx runs in fstests generic/112, a
simplified fsx operation sequence is like (assuming 4k block size
xfs):
fallocate 0x0 0x1000 0x0 keep_size
write 0x0 0x1000 0x0
truncate 0x0 0x800 0x1000
punch_hole 0x0 0x800 0x800
mapread 0x0 0x800 0x800
where fallocate allocates unwritten extent but doesn't update
i_size, buffer write populates the page cache and extent is still
unwritten, truncate skips zeroing page past new EOF and writes the
page to disk, punch_hole invalidates the page cache, at last mapread
reads the block back and sees non-zero beyond EOF.
Fix it by moving truncate_setsize() to before writeback so the page
cache invalidation zeros the partial page at the new EOF. This also
triggers "zero data beyond EOF" in xfs_do_writepage() at writeback
time, because newsize has been set and page straddles the newsize.
Also fixed the wrong 'end' param of filemap_write_and_wait_range()
call while we're at it, the 'end' is inclusive and should be
'newsize - 1'.
Suggested-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Eryu Guan <eguan@redhat.com>
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-11-02 04:43:50 +00:00
|
|
|
/*
|
|
|
|
* We are going to log the inode size change in this transaction so
|
|
|
|
* any previous writes that are beyond the on disk EOF and the new
|
|
|
|
* EOF that have not been written out need to be written here. If we
|
|
|
|
* do not write the data out, we expose ourselves to the null files
|
|
|
|
* problem. Note that this includes any block zeroing we did above;
|
|
|
|
* otherwise those blocks may not be zeroed after a crash.
|
|
|
|
*/
|
|
|
|
if (did_zeroing ||
|
2021-03-29 18:11:40 +00:00
|
|
|
(newsize > ip->i_disk_size && oldsize != ip->i_disk_size)) {
|
xfs: truncate pagecache before writeback in xfs_setattr_size()
On truncate down, if new size is not block size aligned, we zero the
rest of block to avoid exposing stale data to user, and
iomap_truncate_page() skips zeroing if the range is already in
unwritten state or a hole. Then we writeback from on-disk i_size to
the new size if this range hasn't been written to disk yet, and
truncate page cache beyond new EOF and set in-core i_size.
The problem is that we could write data between di_size and newsize
before removing the page cache beyond newsize, as the extents may
still be in unwritten state right after a buffer write. As such, the
page of data that newsize lies in has not been zeroed by page cache
invalidation before it is written, and xfs_do_writepage() hasn't
triggered it's "zero data beyond EOF" case because we haven't
updated in-core i_size yet. Then a subsequent mmap read could see
non-zeros past EOF.
I occasionally see this in fsx runs in fstests generic/112, a
simplified fsx operation sequence is like (assuming 4k block size
xfs):
fallocate 0x0 0x1000 0x0 keep_size
write 0x0 0x1000 0x0
truncate 0x0 0x800 0x1000
punch_hole 0x0 0x800 0x800
mapread 0x0 0x800 0x800
where fallocate allocates unwritten extent but doesn't update
i_size, buffer write populates the page cache and extent is still
unwritten, truncate skips zeroing page past new EOF and writes the
page to disk, punch_hole invalidates the page cache, at last mapread
reads the block back and sees non-zero beyond EOF.
Fix it by moving truncate_setsize() to before writeback so the page
cache invalidation zeros the partial page at the new EOF. This also
triggers "zero data beyond EOF" in xfs_do_writepage() at writeback
time, because newsize has been set and page straddles the newsize.
Also fixed the wrong 'end' param of filemap_write_and_wait_range()
call while we're at it, the 'end' is inclusive and should be
'newsize - 1'.
Suggested-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Eryu Guan <eguan@redhat.com>
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-11-02 04:43:50 +00:00
|
|
|
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
|
2021-03-29 18:11:40 +00:00
|
|
|
ip->i_disk_size, newsize - 1);
|
xfs: truncate pagecache before writeback in xfs_setattr_size()
On truncate down, if new size is not block size aligned, we zero the
rest of block to avoid exposing stale data to user, and
iomap_truncate_page() skips zeroing if the range is already in
unwritten state or a hole. Then we writeback from on-disk i_size to
the new size if this range hasn't been written to disk yet, and
truncate page cache beyond new EOF and set in-core i_size.
The problem is that we could write data between di_size and newsize
before removing the page cache beyond newsize, as the extents may
still be in unwritten state right after a buffer write. As such, the
page of data that newsize lies in has not been zeroed by page cache
invalidation before it is written, and xfs_do_writepage() hasn't
triggered it's "zero data beyond EOF" case because we haven't
updated in-core i_size yet. Then a subsequent mmap read could see
non-zeros past EOF.
I occasionally see this in fsx runs in fstests generic/112, a
simplified fsx operation sequence is like (assuming 4k block size
xfs):
fallocate 0x0 0x1000 0x0 keep_size
write 0x0 0x1000 0x0
truncate 0x0 0x800 0x1000
punch_hole 0x0 0x800 0x800
mapread 0x0 0x800 0x800
where fallocate allocates unwritten extent but doesn't update
i_size, buffer write populates the page cache and extent is still
unwritten, truncate skips zeroing page past new EOF and writes the
page to disk, punch_hole invalidates the page cache, at last mapread
reads the block back and sees non-zero beyond EOF.
Fix it by moving truncate_setsize() to before writeback so the page
cache invalidation zeros the partial page at the new EOF. This also
triggers "zero data beyond EOF" in xfs_do_writepage() at writeback
time, because newsize has been set and page straddles the newsize.
Also fixed the wrong 'end' param of filemap_write_and_wait_range()
call while we're at it, the 'end' is inclusive and should be
'newsize - 1'.
Suggested-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Eryu Guan <eguan@redhat.com>
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-11-02 04:43:50 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2016-04-05 23:19:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
|
2011-07-08 12:34:23 +00:00
|
|
|
if (error)
|
2016-04-05 23:19:55 +00:00
|
|
|
return error;
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
lock_flags |= XFS_ILOCK_EXCL;
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2011-09-19 15:00:54 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, 0);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only change the c/mtime if we are changing the size or we are
|
|
|
|
* explicitly asked to change it. This handles the semantic difference
|
|
|
|
* between truncate() and ftruncate() as implemented in the VFS.
|
|
|
|
*
|
|
|
|
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
|
|
|
|
* special case where we need to update the times despite not having
|
|
|
|
* these flags set. For all other operations the VFS set these flags
|
|
|
|
* explicitly if it wants a timestamp update.
|
|
|
|
*/
|
2014-02-09 23:35:22 +00:00
|
|
|
if (newsize != oldsize &&
|
|
|
|
!(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
|
2011-07-08 12:34:23 +00:00
|
|
|
iattr->ia_ctime = iattr->ia_mtime =
|
2016-09-14 14:48:06 +00:00
|
|
|
current_time(inode);
|
2014-02-09 23:35:22 +00:00
|
|
|
iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
2011-12-18 20:00:04 +00:00
|
|
|
/*
|
|
|
|
* The first thing we do is set the size to new_size permanently on
|
|
|
|
* disk. This way we don't have to worry about anyone ever being able
|
|
|
|
* to look at the data being freed even in the face of a crash.
|
|
|
|
* What we're getting around here is the case where we free a block, it
|
|
|
|
* is allocated to another file, it is written to, and then we crash.
|
|
|
|
* If the new data gets written to the file but the log buffers
|
|
|
|
* containing the free and reallocation don't, then we'd end up with
|
|
|
|
* garbage in the blocks being freed. As long as we make the new size
|
|
|
|
* permanent before actually freeing any blocks it doesn't matter if
|
|
|
|
* they get written to.
|
|
|
|
*/
|
2021-03-29 18:11:40 +00:00
|
|
|
ip->i_disk_size = newsize;
|
2011-12-18 20:00:04 +00:00
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
|
|
|
if (newsize <= oldsize) {
|
|
|
|
error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize);
|
2011-07-08 12:34:23 +00:00
|
|
|
if (error)
|
2015-06-04 03:47:56 +00:00
|
|
|
goto out_trans_cancel;
|
2011-07-08 12:34:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncated "down", so we're removing references to old data
|
|
|
|
* here - if we delay flushing for a long time, we expose
|
|
|
|
* ourselves unduly to the notorious NULL files problem. So,
|
|
|
|
* we mark this inode and flush it when the file is closed,
|
|
|
|
* and do not wait the usual (long) time for writeout.
|
|
|
|
*/
|
|
|
|
xfs_iflags_set(ip, XFS_ITRUNCATED);
|
2012-11-06 14:50:38 +00:00
|
|
|
|
|
|
|
/* A truncate down always removes post-EOF blocks. */
|
|
|
|
xfs_inode_clear_eofblocks_tag(ip);
|
2011-07-08 12:34:23 +00:00
|
|
|
}
|
|
|
|
|
2022-03-08 18:51:16 +00:00
|
|
|
ASSERT(!(iattr->ia_valid & (ATTR_UID | ATTR_GID)));
|
2023-01-13 11:49:11 +00:00
|
|
|
setattr_copy(idmap, inode, iattr);
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
|
|
|
|
2015-10-12 07:21:22 +00:00
|
|
|
XFS_STATS_INC(mp, xs_ig_attrchg);
|
2011-07-08 12:34:23 +00:00
|
|
|
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_wsync(mp))
|
2011-07-08 12:34:23 +00:00
|
|
|
xfs_trans_set_sync(tp);
|
|
|
|
|
2015-06-04 03:48:08 +00:00
|
|
|
error = xfs_trans_commit(tp);
|
2011-07-08 12:34:23 +00:00
|
|
|
out_unlock:
|
|
|
|
if (lock_flags)
|
|
|
|
xfs_iunlock(ip, lock_flags);
|
|
|
|
return error;
|
|
|
|
|
|
|
|
out_trans_cancel:
|
2015-06-04 03:47:56 +00:00
|
|
|
xfs_trans_cancel(tp);
|
2011-07-08 12:34:23 +00:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2016-05-26 12:46:43 +00:00
|
|
|
int
|
|
|
|
xfs_vn_setattr_size(
|
2023-01-13 11:49:11 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2016-05-26 12:46:43 +00:00
|
|
|
struct dentry *dentry,
|
|
|
|
struct iattr *iattr)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(d_inode(dentry));
|
|
|
|
int error;
|
|
|
|
|
|
|
|
trace_xfs_setattr(ip);
|
|
|
|
|
2023-01-13 11:49:11 +00:00
|
|
|
error = xfs_vn_change_ok(idmap, dentry, iattr);
|
2016-05-26 12:46:43 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
2023-01-13 11:49:11 +00:00
|
|
|
return xfs_setattr_size(idmap, dentry, ip, iattr);
|
2016-05-26 12:46:43 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
STATIC int
|
2006-03-14 03:00:51 +00:00
|
|
|
xfs_vn_setattr(
|
2023-01-13 11:49:11 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2013-10-14 14:09:35 +00:00
|
|
|
struct dentry *dentry,
|
|
|
|
struct iattr *iattr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2020-12-11 04:00:38 +00:00
|
|
|
struct inode *inode = d_inode(dentry);
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
2013-10-14 14:09:35 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (iattr->ia_valid & ATTR_SIZE) {
|
2018-03-12 21:12:29 +00:00
|
|
|
uint iolock;
|
2015-02-16 00:59:50 +00:00
|
|
|
|
2018-03-12 21:12:29 +00:00
|
|
|
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
|
|
|
|
iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
|
2015-02-16 00:59:50 +00:00
|
|
|
|
2018-03-20 21:42:38 +00:00
|
|
|
error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
|
2018-03-12 21:12:29 +00:00
|
|
|
if (error) {
|
|
|
|
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
2016-11-30 03:33:25 +00:00
|
|
|
return error;
|
2018-03-12 21:12:29 +00:00
|
|
|
}
|
2015-02-23 10:45:32 +00:00
|
|
|
|
2023-01-13 11:49:11 +00:00
|
|
|
error = xfs_vn_setattr_size(idmap, dentry, iattr);
|
2016-11-30 03:33:25 +00:00
|
|
|
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
2013-10-14 14:09:35 +00:00
|
|
|
} else {
|
2020-12-11 04:00:38 +00:00
|
|
|
trace_xfs_setattr(ip);
|
|
|
|
|
2023-01-13 11:49:11 +00:00
|
|
|
error = xfs_vn_change_ok(idmap, dentry, iattr);
|
2020-12-11 04:00:38 +00:00
|
|
|
if (!error)
|
2023-01-13 11:49:11 +00:00
|
|
|
error = xfs_setattr_nonsize(idmap, dentry, ip, iattr);
|
2013-10-14 14:09:35 +00:00
|
|
|
}
|
|
|
|
|
2014-06-25 04:58:08 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-06-06 21:01:28 +00:00
|
|
|
STATIC int
|
|
|
|
xfs_vn_update_time(
|
|
|
|
struct inode *inode,
|
vfs: change inode times to use struct timespec64
struct timespec is not y2038 safe. Transition vfs to use
y2038 safe struct timespec64 instead.
The change was made with the help of the following cocinelle
script. This catches about 80% of the changes.
All the header file and logic changes are included in the
first 5 rules. The rest are trivial substitutions.
I avoid changing any of the function signatures or any other
filesystem specific data structures to keep the patch simple
for review.
The script can be a little shorter by combining different cases.
But, this version was sufficient for my usecase.
virtual patch
@ depends on patch @
identifier now;
@@
- struct timespec
+ struct timespec64
current_time ( ... )
{
- struct timespec now = current_kernel_time();
+ struct timespec64 now = current_kernel_time64();
...
- return timespec_trunc(
+ return timespec64_trunc(
... );
}
@ depends on patch @
identifier xtime;
@@
struct \( iattr \| inode \| kstat \) {
...
- struct timespec xtime;
+ struct timespec64 xtime;
...
}
@ depends on patch @
identifier t;
@@
struct inode_operations {
...
int (*update_time) (...,
- struct timespec t,
+ struct timespec64 t,
...);
...
}
@ depends on patch @
identifier t;
identifier fn_update_time =~ "update_time$";
@@
fn_update_time (...,
- struct timespec *t,
+ struct timespec64 *t,
...) { ... }
@ depends on patch @
identifier t;
@@
lease_get_mtime( ... ,
- struct timespec *t
+ struct timespec64 *t
) { ... }
@te depends on patch forall@
identifier ts;
local idexpression struct inode *inode_node;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn_update_time =~ "update_time$";
identifier fn;
expression e, E3;
local idexpression struct inode *node1;
local idexpression struct inode *node2;
local idexpression struct iattr *attr1;
local idexpression struct iattr *attr2;
local idexpression struct iattr attr;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
@@
(
(
- struct timespec ts;
+ struct timespec64 ts;
|
- struct timespec ts = current_time(inode_node);
+ struct timespec64 ts = current_time(inode_node);
)
<+... when != ts
(
- timespec_equal(&inode_node->i_xtime, &ts)
+ timespec64_equal(&inode_node->i_xtime, &ts)
|
- timespec_equal(&ts, &inode_node->i_xtime)
+ timespec64_equal(&ts, &inode_node->i_xtime)
|
- timespec_compare(&inode_node->i_xtime, &ts)
+ timespec64_compare(&inode_node->i_xtime, &ts)
|
- timespec_compare(&ts, &inode_node->i_xtime)
+ timespec64_compare(&ts, &inode_node->i_xtime)
|
ts = current_time(e)
|
fn_update_time(..., &ts,...)
|
inode_node->i_xtime = ts
|
node1->i_xtime = ts
|
ts = inode_node->i_xtime
|
<+... attr1->ia_xtime ...+> = ts
|
ts = attr1->ia_xtime
|
ts.tv_sec
|
ts.tv_nsec
|
btrfs_set_stack_timespec_sec(..., ts.tv_sec)
|
btrfs_set_stack_timespec_nsec(..., ts.tv_nsec)
|
- ts = timespec64_to_timespec(
+ ts =
...
-)
|
- ts = ktime_to_timespec(
+ ts = ktime_to_timespec64(
...)
|
- ts = E3
+ ts = timespec_to_timespec64(E3)
|
- ktime_get_real_ts(&ts)
+ ktime_get_real_ts64(&ts)
|
fn(...,
- ts
+ timespec64_to_timespec(ts)
,...)
)
...+>
(
<... when != ts
- return ts;
+ return timespec64_to_timespec(ts);
...>
)
|
- timespec_equal(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_equal(&node1->i_xtime2, &node2->i_xtime2)
|
- timespec_equal(&node1->i_xtime1, &attr2->ia_xtime2)
+ timespec64_equal(&node1->i_xtime2, &attr2->ia_xtime2)
|
- timespec_compare(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_compare(&node1->i_xtime1, &node2->i_xtime2)
|
node1->i_xtime1 =
- timespec_trunc(attr1->ia_xtime1,
+ timespec64_trunc(attr1->ia_xtime1,
...)
|
- attr1->ia_xtime1 = timespec_trunc(attr2->ia_xtime2,
+ attr1->ia_xtime1 = timespec64_trunc(attr2->ia_xtime2,
...)
|
- ktime_get_real_ts(&attr1->ia_xtime1)
+ ktime_get_real_ts64(&attr1->ia_xtime1)
|
- ktime_get_real_ts(&attr.ia_xtime1)
+ ktime_get_real_ts64(&attr.ia_xtime1)
)
@ depends on patch @
struct inode *node;
struct iattr *attr;
identifier fn;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
expression e;
@@
(
- fn(node->i_xtime);
+ fn(timespec64_to_timespec(node->i_xtime));
|
fn(...,
- node->i_xtime);
+ timespec64_to_timespec(node->i_xtime));
|
- e = fn(attr->ia_xtime);
+ e = fn(timespec64_to_timespec(attr->ia_xtime));
)
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
fn (...,
- &attr->ia_xtime,
+ &ts,
...);
)
...+>
}
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
struct kstat *stat;
identifier ia_xtime =~ "^ia_[acm]time$";
identifier i_xtime =~ "^i_[acm]time$";
identifier xtime =~ "^[acm]time$";
identifier fn, ret;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(stat->xtime);
ret = fn (...,
- &stat->xtime);
+ &ts);
)
...+>
}
@ depends on patch @
struct inode *node;
struct inode *node2;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier i_xtime3 =~ "^i_[acm]time$";
struct iattr *attrp;
struct iattr *attrp2;
struct iattr attr ;
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
struct kstat *stat;
struct kstat stat1;
struct timespec64 ts;
identifier xtime =~ "^[acmb]time$";
expression e;
@@
(
( node->i_xtime2 \| attrp->ia_xtime2 \| attr.ia_xtime2 \) = node->i_xtime1 ;
|
node->i_xtime2 = \( node2->i_xtime1 \| timespec64_trunc(...) \);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
stat->xtime = node2->i_xtime1;
|
stat1.xtime = node2->i_xtime1;
|
( node->i_xtime2 \| attrp->ia_xtime2 \) = attrp->ia_xtime1 ;
|
( attrp->ia_xtime1 \| attr.ia_xtime1 \) = attrp2->ia_xtime2;
|
- e = node->i_xtime1;
+ e = timespec64_to_timespec( node->i_xtime1 );
|
- e = attrp->ia_xtime1;
+ e = timespec64_to_timespec( attrp->ia_xtime1 );
|
node->i_xtime1 = current_time(...);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
- node->i_xtime1 = e;
+ node->i_xtime1 = timespec_to_timespec64(e);
)
Signed-off-by: Deepa Dinamani <deepa.kernel@gmail.com>
Cc: <anton@tuxera.com>
Cc: <balbi@kernel.org>
Cc: <bfields@fieldses.org>
Cc: <darrick.wong@oracle.com>
Cc: <dhowells@redhat.com>
Cc: <dsterba@suse.com>
Cc: <dwmw2@infradead.org>
Cc: <hch@lst.de>
Cc: <hirofumi@mail.parknet.co.jp>
Cc: <hubcap@omnibond.com>
Cc: <jack@suse.com>
Cc: <jaegeuk@kernel.org>
Cc: <jaharkes@cs.cmu.edu>
Cc: <jslaby@suse.com>
Cc: <keescook@chromium.org>
Cc: <mark@fasheh.com>
Cc: <miklos@szeredi.hu>
Cc: <nico@linaro.org>
Cc: <reiserfs-devel@vger.kernel.org>
Cc: <richard@nod.at>
Cc: <sage@redhat.com>
Cc: <sfrench@samba.org>
Cc: <swhiteho@redhat.com>
Cc: <tj@kernel.org>
Cc: <trond.myklebust@primarydata.com>
Cc: <tytso@mit.edu>
Cc: <viro@zeniv.linux.org.uk>
2018-05-09 02:36:02 +00:00
|
|
|
struct timespec64 *now,
|
2012-06-06 21:01:28 +00:00
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
2018-03-07 01:04:00 +00:00
|
|
|
int log_flags = XFS_ILOG_TIMESTAMP;
|
2012-06-06 21:01:28 +00:00
|
|
|
struct xfs_trans *tp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
trace_xfs_update_time(ip);
|
|
|
|
|
2018-03-07 01:04:00 +00:00
|
|
|
if (inode->i_sb->s_flags & SB_LAZYTIME) {
|
|
|
|
if (!((flags & S_VERSION) &&
|
|
|
|
inode_maybe_inc_iversion(inode, false)))
|
|
|
|
return generic_update_time(inode, now, flags);
|
|
|
|
|
|
|
|
/* Capture the iversion update that just occurred */
|
|
|
|
log_flags |= XFS_ILOG_CORE;
|
|
|
|
}
|
|
|
|
|
2016-04-05 23:19:55 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
|
|
|
|
if (error)
|
2014-06-25 04:58:08 +00:00
|
|
|
return error;
|
2012-06-06 21:01:28 +00:00
|
|
|
|
|
|
|
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
2016-02-09 05:54:58 +00:00
|
|
|
if (flags & S_CTIME)
|
2012-06-06 21:01:28 +00:00
|
|
|
inode->i_ctime = *now;
|
2016-02-09 05:54:58 +00:00
|
|
|
if (flags & S_MTIME)
|
2012-06-06 21:01:28 +00:00
|
|
|
inode->i_mtime = *now;
|
2016-02-09 05:54:58 +00:00
|
|
|
if (flags & S_ATIME)
|
2012-06-06 21:01:28 +00:00
|
|
|
inode->i_atime = *now;
|
2016-02-09 05:54:58 +00:00
|
|
|
|
2012-06-06 21:01:28 +00:00
|
|
|
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
2018-03-07 01:04:00 +00:00
|
|
|
xfs_trans_log_inode(tp, ip, log_flags);
|
2015-06-04 03:48:08 +00:00
|
|
|
return xfs_trans_commit(tp);
|
2012-06-06 21:01:28 +00:00
|
|
|
}
|
|
|
|
|
2008-11-28 03:23:35 +00:00
|
|
|
STATIC int
|
|
|
|
xfs_vn_fiemap(
|
|
|
|
struct inode *inode,
|
|
|
|
struct fiemap_extent_info *fieinfo,
|
|
|
|
u64 start,
|
|
|
|
u64 length)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2016-06-20 23:54:53 +00:00
|
|
|
xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
|
2016-08-16 22:45:30 +00:00
|
|
|
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
|
|
|
|
fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
|
|
|
|
error = iomap_fiemap(inode, fieinfo, start, length,
|
|
|
|
&xfs_xattr_iomap_ops);
|
|
|
|
} else {
|
|
|
|
error = iomap_fiemap(inode, fieinfo, start, length,
|
2019-10-19 16:09:45 +00:00
|
|
|
&xfs_read_iomap_ops);
|
2016-08-16 22:45:30 +00:00
|
|
|
}
|
2016-06-20 23:54:53 +00:00
|
|
|
xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
|
2008-11-28 03:23:35 +00:00
|
|
|
|
2016-06-20 23:54:53 +00:00
|
|
|
return error;
|
2008-11-28 03:23:35 +00:00
|
|
|
}
|
|
|
|
|
2013-12-18 00:22:40 +00:00
|
|
|
STATIC int
|
|
|
|
xfs_vn_tmpfile(
|
2021-01-21 13:19:43 +00:00
|
|
|
struct user_namespace *mnt_userns,
|
|
|
|
struct inode *dir,
|
2022-09-24 05:00:00 +00:00
|
|
|
struct file *file,
|
2021-01-21 13:19:43 +00:00
|
|
|
umode_t mode)
|
2013-12-18 00:22:40 +00:00
|
|
|
{
|
2022-09-24 05:00:00 +00:00
|
|
|
int err = xfs_generic_create(mnt_userns, dir, file->f_path.dentry, mode, 0, file);
|
|
|
|
|
|
|
|
return finish_open_simple(file, err);
|
2013-12-18 00:22:40 +00:00
|
|
|
}
|
|
|
|
|
2008-08-13 06:23:13 +00:00
|
|
|
static const struct inode_operations xfs_inode_operations = {
|
2022-09-22 15:17:00 +00:00
|
|
|
.get_inode_acl = xfs_get_acl,
|
2013-12-20 13:16:50 +00:00
|
|
|
.set_acl = xfs_set_acl,
|
2006-03-14 03:00:51 +00:00
|
|
|
.getattr = xfs_vn_getattr,
|
|
|
|
.setattr = xfs_vn_setattr,
|
|
|
|
.listxattr = xfs_vn_listxattr,
|
2008-11-28 03:23:35 +00:00
|
|
|
.fiemap = xfs_vn_fiemap,
|
2012-06-06 21:01:28 +00:00
|
|
|
.update_time = xfs_vn_update_time,
|
2021-04-07 12:36:43 +00:00
|
|
|
.fileattr_get = xfs_fileattr_get,
|
|
|
|
.fileattr_set = xfs_fileattr_set,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2008-08-13 06:23:13 +00:00
|
|
|
static const struct inode_operations xfs_dir_inode_operations = {
|
2006-03-14 03:00:51 +00:00
|
|
|
.create = xfs_vn_create,
|
|
|
|
.lookup = xfs_vn_lookup,
|
|
|
|
.link = xfs_vn_link,
|
|
|
|
.unlink = xfs_vn_unlink,
|
|
|
|
.symlink = xfs_vn_symlink,
|
|
|
|
.mkdir = xfs_vn_mkdir,
|
2008-06-23 03:25:17 +00:00
|
|
|
/*
|
|
|
|
* Yes, XFS uses the same method for rmdir and unlink.
|
|
|
|
*
|
|
|
|
* There are some subtile differences deeper in the code,
|
|
|
|
* but we use S_ISDIR to check for those.
|
|
|
|
*/
|
|
|
|
.rmdir = xfs_vn_unlink,
|
2006-03-14 03:00:51 +00:00
|
|
|
.mknod = xfs_vn_mknod,
|
2016-09-27 09:03:58 +00:00
|
|
|
.rename = xfs_vn_rename,
|
2022-09-22 15:17:00 +00:00
|
|
|
.get_inode_acl = xfs_get_acl,
|
2013-12-20 13:16:50 +00:00
|
|
|
.set_acl = xfs_set_acl,
|
2006-03-14 03:00:51 +00:00
|
|
|
.getattr = xfs_vn_getattr,
|
|
|
|
.setattr = xfs_vn_setattr,
|
|
|
|
.listxattr = xfs_vn_listxattr,
|
2012-06-06 21:01:28 +00:00
|
|
|
.update_time = xfs_vn_update_time,
|
2013-12-18 00:22:40 +00:00
|
|
|
.tmpfile = xfs_vn_tmpfile,
|
2021-04-07 12:36:43 +00:00
|
|
|
.fileattr_get = xfs_fileattr_get,
|
|
|
|
.fileattr_set = xfs_fileattr_set,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2008-08-13 06:23:13 +00:00
|
|
|
static const struct inode_operations xfs_dir_ci_inode_operations = {
|
2008-05-21 06:58:22 +00:00
|
|
|
.create = xfs_vn_create,
|
|
|
|
.lookup = xfs_vn_ci_lookup,
|
|
|
|
.link = xfs_vn_link,
|
|
|
|
.unlink = xfs_vn_unlink,
|
|
|
|
.symlink = xfs_vn_symlink,
|
|
|
|
.mkdir = xfs_vn_mkdir,
|
2008-06-23 03:25:17 +00:00
|
|
|
/*
|
|
|
|
* Yes, XFS uses the same method for rmdir and unlink.
|
|
|
|
*
|
|
|
|
* There are some subtile differences deeper in the code,
|
|
|
|
* but we use S_ISDIR to check for those.
|
|
|
|
*/
|
|
|
|
.rmdir = xfs_vn_unlink,
|
2008-05-21 06:58:22 +00:00
|
|
|
.mknod = xfs_vn_mknod,
|
2016-09-27 09:03:58 +00:00
|
|
|
.rename = xfs_vn_rename,
|
2022-09-22 15:17:00 +00:00
|
|
|
.get_inode_acl = xfs_get_acl,
|
2013-12-20 13:16:50 +00:00
|
|
|
.set_acl = xfs_set_acl,
|
2008-05-21 06:58:22 +00:00
|
|
|
.getattr = xfs_vn_getattr,
|
|
|
|
.setattr = xfs_vn_setattr,
|
|
|
|
.listxattr = xfs_vn_listxattr,
|
2012-06-06 21:01:28 +00:00
|
|
|
.update_time = xfs_vn_update_time,
|
2013-12-18 00:22:40 +00:00
|
|
|
.tmpfile = xfs_vn_tmpfile,
|
2021-04-07 12:36:43 +00:00
|
|
|
.fileattr_get = xfs_fileattr_get,
|
|
|
|
.fileattr_set = xfs_fileattr_set,
|
2008-05-21 06:58:22 +00:00
|
|
|
};
|
|
|
|
|
2008-08-13 06:23:13 +00:00
|
|
|
static const struct inode_operations xfs_symlink_inode_operations = {
|
2015-11-17 15:20:54 +00:00
|
|
|
.get_link = xfs_vn_get_link,
|
2006-03-14 03:00:51 +00:00
|
|
|
.getattr = xfs_vn_getattr,
|
|
|
|
.setattr = xfs_vn_setattr,
|
|
|
|
.listxattr = xfs_vn_listxattr,
|
2012-06-06 21:01:28 +00:00
|
|
|
.update_time = xfs_vn_update_time,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2008-08-13 06:23:13 +00:00
|
|
|
|
2018-05-30 20:03:45 +00:00
|
|
|
/* Figure out if this file actually supports DAX. */
|
|
|
|
static bool
|
|
|
|
xfs_inode_supports_dax(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
|
|
2020-05-04 16:02:42 +00:00
|
|
|
/* Only supported on regular files. */
|
|
|
|
if (!S_ISREG(VFS_I(ip)->i_mode))
|
2018-05-30 20:03:45 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Block size must match page size */
|
|
|
|
if (mp->m_sb.sb_blocksize != PAGE_SIZE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Device has to support DAX too. */
|
2019-10-25 05:25:38 +00:00
|
|
|
return xfs_inode_buftarg(ip)->bt_daxdev != NULL;
|
2018-05-30 20:03:45 +00:00
|
|
|
}
|
|
|
|
|
2020-05-04 16:02:42 +00:00
|
|
|
static bool
|
|
|
|
xfs_inode_should_enable_dax(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_FS_DAX))
|
|
|
|
return false;
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_dax_never(ip->i_mount))
|
2020-05-04 16:02:42 +00:00
|
|
|
return false;
|
|
|
|
if (!xfs_inode_supports_dax(ip))
|
|
|
|
return false;
|
2021-08-19 01:46:52 +00:00
|
|
|
if (xfs_has_dax_always(ip->i_mount))
|
2020-05-04 16:02:42 +00:00
|
|
|
return true;
|
2021-03-29 18:11:45 +00:00
|
|
|
if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
|
2020-05-04 16:02:42 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-05-04 16:02:43 +00:00
|
|
|
void
|
2008-08-13 06:23:13 +00:00
|
|
|
xfs_diflags_to_iflags(
|
2020-05-04 16:02:43 +00:00
|
|
|
struct xfs_inode *ip,
|
|
|
|
bool init)
|
2008-08-13 06:23:13 +00:00
|
|
|
{
|
2020-05-04 16:02:43 +00:00
|
|
|
struct inode *inode = VFS_I(ip);
|
|
|
|
unsigned int xflags = xfs_ip2xflags(ip);
|
|
|
|
unsigned int flags = 0;
|
|
|
|
|
|
|
|
ASSERT(!(IS_DAX(inode) && init));
|
|
|
|
|
|
|
|
if (xflags & FS_XFLAG_IMMUTABLE)
|
|
|
|
flags |= S_IMMUTABLE;
|
|
|
|
if (xflags & FS_XFLAG_APPEND)
|
|
|
|
flags |= S_APPEND;
|
|
|
|
if (xflags & FS_XFLAG_SYNC)
|
|
|
|
flags |= S_SYNC;
|
|
|
|
if (xflags & FS_XFLAG_NOATIME)
|
|
|
|
flags |= S_NOATIME;
|
|
|
|
if (init && xfs_inode_should_enable_dax(ip))
|
|
|
|
flags |= S_DAX;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* S_DAX can only be set during inode initialization and is never set by
|
|
|
|
* the VFS, so we cannot mask off S_DAX in i_flags.
|
|
|
|
*/
|
|
|
|
inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | S_NOATIME);
|
|
|
|
inode->i_flags |= flags;
|
2008-08-13 06:23:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-04-05 21:48:27 +00:00
|
|
|
* Initialize the Linux inode.
|
2008-10-30 06:36:14 +00:00
|
|
|
*
|
2015-02-23 11:38:08 +00:00
|
|
|
* When reading existing inodes from disk this is called directly from xfs_iget,
|
2021-12-21 17:38:19 +00:00
|
|
|
* when creating a new inode it is called from xfs_init_new_inode after setting
|
|
|
|
* up the inode. These callers have different criteria for clearing XFS_INEW, so
|
|
|
|
* leave it up to the caller to deal with unlocking the inode appropriately.
|
2008-08-13 06:23:13 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_setup_inode(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
2008-10-30 06:36:14 +00:00
|
|
|
struct inode *inode = &ip->i_vnode;
|
2013-10-29 11:11:57 +00:00
|
|
|
gfp_t gfp_mask;
|
2008-10-30 06:36:14 +00:00
|
|
|
|
|
|
|
inode->i_ino = ip->i_ino;
|
2021-08-25 02:13:04 +00:00
|
|
|
inode->i_state |= I_NEW;
|
2010-10-23 11:15:32 +00:00
|
|
|
|
|
|
|
inode_sb_list_add(inode);
|
2010-11-06 11:43:08 +00:00
|
|
|
/* make the inode look hashed for the writeback code */
|
2018-06-29 23:36:57 +00:00
|
|
|
inode_fake_hash(inode);
|
2008-08-13 06:23:13 +00:00
|
|
|
|
2021-03-29 18:11:40 +00:00
|
|
|
i_size_write(inode, ip->i_disk_size);
|
2020-05-04 16:02:43 +00:00
|
|
|
xfs_diflags_to_iflags(ip, true);
|
2008-08-13 06:23:13 +00:00
|
|
|
|
2016-04-05 21:48:27 +00:00
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
2018-06-07 14:36:08 +00:00
|
|
|
/*
|
|
|
|
* We set the i_rwsem class here to avoid potential races with
|
|
|
|
* lockdep_annotate_inode_mutex_key() reinitialising the lock
|
|
|
|
* after a filehandle lookup has already found the inode in
|
|
|
|
* cache before it has been unlocked via unlock_new_inode().
|
|
|
|
*/
|
|
|
|
lockdep_set_class(&inode->i_rwsem,
|
|
|
|
&inode->i_sb->s_type->i_mutex_dir_key);
|
2014-02-27 05:51:39 +00:00
|
|
|
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
|
2016-04-05 21:48:27 +00:00
|
|
|
} else {
|
|
|
|
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
|
2008-08-13 06:23:13 +00:00
|
|
|
}
|
|
|
|
|
2013-10-29 11:11:57 +00:00
|
|
|
/*
|
|
|
|
* Ensure all page cache allocations are done from GFP_NOFS context to
|
|
|
|
* prevent direct reclaim recursion back into the filesystem and blowing
|
|
|
|
* stacks or deadlocking.
|
|
|
|
*/
|
|
|
|
gfp_mask = mapping_gfp_mask(inode->i_mapping);
|
|
|
|
mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
|
|
|
|
|
2011-07-26 15:07:29 +00:00
|
|
|
/*
|
|
|
|
* If there is no attribute fork no ACL can exist on this inode,
|
|
|
|
* and it can't have any file capabilities attached to it either.
|
|
|
|
*/
|
2022-07-09 17:56:06 +00:00
|
|
|
if (!xfs_inode_has_attr_fork(ip)) {
|
2011-07-26 15:07:29 +00:00
|
|
|
inode_has_no_xattr(inode);
|
2011-07-23 15:36:50 +00:00
|
|
|
cache_no_acl(inode);
|
2011-07-26 15:07:29 +00:00
|
|
|
}
|
2008-08-13 06:23:13 +00:00
|
|
|
}
|
2016-04-05 21:48:27 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
xfs_setup_iops(
|
|
|
|
struct xfs_inode *ip)
|
|
|
|
{
|
|
|
|
struct inode *inode = &ip->i_vnode;
|
|
|
|
|
2008-08-13 06:23:13 +00:00
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFREG:
|
|
|
|
inode->i_op = &xfs_inode_operations;
|
|
|
|
inode->i_fop = &xfs_file_operations;
|
2018-03-07 23:26:44 +00:00
|
|
|
if (IS_DAX(inode))
|
|
|
|
inode->i_mapping->a_ops = &xfs_dax_aops;
|
|
|
|
else
|
|
|
|
inode->i_mapping->a_ops = &xfs_address_space_operations;
|
2008-08-13 06:23:13 +00:00
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
2021-08-19 01:46:37 +00:00
|
|
|
if (xfs_has_asciici(XFS_M(inode->i_sb)))
|
2008-08-13 06:23:13 +00:00
|
|
|
inode->i_op = &xfs_dir_ci_inode_operations;
|
|
|
|
else
|
|
|
|
inode->i_op = &xfs_dir_inode_operations;
|
|
|
|
inode->i_fop = &xfs_dir_file_operations;
|
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
2021-12-15 20:07:41 +00:00
|
|
|
inode->i_op = &xfs_symlink_inode_operations;
|
2008-08-13 06:23:13 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
inode->i_op = &xfs_inode_operations;
|
|
|
|
init_special_inode(inode, inode->i_mode, inode->i_rdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|