2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-11-02 03:58:39 +00:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-16 22:20:36 +00:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 03:58:39 +00:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-11-02 03:58:39 +00:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_TRANS_H__
|
|
|
|
#define __XFS_TRANS_H__
|
|
|
|
|
2008-10-30 06:05:38 +00:00
|
|
|
struct xfs_log_item;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This is the structure written in the log at the head of
|
|
|
|
* every transaction. It identifies the type and id of the
|
|
|
|
* transaction, and contains the number of items logged by
|
|
|
|
* the transaction so we know how many to expect during recovery.
|
|
|
|
*
|
|
|
|
* Do not change the below structure without redoing the code in
|
|
|
|
* xlog_recover_add_to_trans() and xlog_recover_add_to_cont_trans().
|
|
|
|
*/
|
|
|
|
typedef struct xfs_trans_header {
|
|
|
|
uint th_magic; /* magic number */
|
|
|
|
uint th_type; /* transaction type */
|
|
|
|
__int32_t th_tid; /* transaction id (unused) */
|
|
|
|
uint th_num_items; /* num items logged by trans */
|
|
|
|
} xfs_trans_header_t;
|
|
|
|
|
|
|
|
#define XFS_TRANS_HEADER_MAGIC 0x5452414e /* TRAN */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Log item types.
|
|
|
|
*/
|
|
|
|
#define XFS_LI_EFI 0x1236
|
|
|
|
#define XFS_LI_EFD 0x1237
|
|
|
|
#define XFS_LI_IUNLINK 0x1238
|
|
|
|
#define XFS_LI_INODE 0x123b /* aligned ino chunks, var-size ibufs */
|
|
|
|
#define XFS_LI_BUF 0x123c /* v2 bufs, variable sized inode bufs */
|
|
|
|
#define XFS_LI_DQUOT 0x123d
|
|
|
|
#define XFS_LI_QUOTAOFF 0x123e
|
|
|
|
|
2010-04-13 05:06:46 +00:00
|
|
|
#define XFS_LI_TYPE_DESC \
|
|
|
|
{ XFS_LI_EFI, "XFS_LI_EFI" }, \
|
|
|
|
{ XFS_LI_EFD, "XFS_LI_EFD" }, \
|
|
|
|
{ XFS_LI_IUNLINK, "XFS_LI_IUNLINK" }, \
|
|
|
|
{ XFS_LI_INODE, "XFS_LI_INODE" }, \
|
|
|
|
{ XFS_LI_BUF, "XFS_LI_BUF" }, \
|
|
|
|
{ XFS_LI_DQUOT, "XFS_LI_DQUOT" }, \
|
|
|
|
{ XFS_LI_QUOTAOFF, "XFS_LI_QUOTAOFF" }
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Transaction types. Used to distinguish types of buffers.
|
|
|
|
*/
|
|
|
|
#define XFS_TRANS_SETATTR_NOT_SIZE 1
|
|
|
|
#define XFS_TRANS_SETATTR_SIZE 2
|
|
|
|
#define XFS_TRANS_INACTIVE 3
|
|
|
|
#define XFS_TRANS_CREATE 4
|
|
|
|
#define XFS_TRANS_CREATE_TRUNC 5
|
|
|
|
#define XFS_TRANS_TRUNCATE_FILE 6
|
|
|
|
#define XFS_TRANS_REMOVE 7
|
|
|
|
#define XFS_TRANS_LINK 8
|
|
|
|
#define XFS_TRANS_RENAME 9
|
|
|
|
#define XFS_TRANS_MKDIR 10
|
|
|
|
#define XFS_TRANS_RMDIR 11
|
|
|
|
#define XFS_TRANS_SYMLINK 12
|
|
|
|
#define XFS_TRANS_SET_DMATTRS 13
|
|
|
|
#define XFS_TRANS_GROWFS 14
|
|
|
|
#define XFS_TRANS_STRAT_WRITE 15
|
|
|
|
#define XFS_TRANS_DIOSTRAT 16
|
2009-09-01 00:00:31 +00:00
|
|
|
/* 17 was XFS_TRANS_WRITE_SYNC */
|
2005-04-16 22:20:36 +00:00
|
|
|
#define XFS_TRANS_WRITEID 18
|
|
|
|
#define XFS_TRANS_ADDAFORK 19
|
|
|
|
#define XFS_TRANS_ATTRINVAL 20
|
|
|
|
#define XFS_TRANS_ATRUNCATE 21
|
|
|
|
#define XFS_TRANS_ATTR_SET 22
|
|
|
|
#define XFS_TRANS_ATTR_RM 23
|
|
|
|
#define XFS_TRANS_ATTR_FLAG 24
|
|
|
|
#define XFS_TRANS_CLEAR_AGI_BUCKET 25
|
|
|
|
#define XFS_TRANS_QM_SBCHANGE 26
|
|
|
|
/*
|
|
|
|
* Dummy entries since we use the transaction type to index into the
|
|
|
|
* trans_type[] in xlog_recover_print_trans_head()
|
|
|
|
*/
|
|
|
|
#define XFS_TRANS_DUMMY1 27
|
|
|
|
#define XFS_TRANS_DUMMY2 28
|
|
|
|
#define XFS_TRANS_QM_QUOTAOFF 29
|
|
|
|
#define XFS_TRANS_QM_DQALLOC 30
|
|
|
|
#define XFS_TRANS_QM_SETQLIM 31
|
|
|
|
#define XFS_TRANS_QM_DQCLUSTER 32
|
|
|
|
#define XFS_TRANS_QM_QINOCREATE 33
|
|
|
|
#define XFS_TRANS_QM_QUOTAOFF_END 34
|
|
|
|
#define XFS_TRANS_SB_UNIT 35
|
|
|
|
#define XFS_TRANS_FSYNC_TS 36
|
|
|
|
#define XFS_TRANS_GROWFSRT_ALLOC 37
|
|
|
|
#define XFS_TRANS_GROWFSRT_ZERO 38
|
|
|
|
#define XFS_TRANS_GROWFSRT_FREE 39
|
|
|
|
#define XFS_TRANS_SWAPEXT 40
|
[XFS] Lazy Superblock Counters
When we have a couple of hundred transactions on the fly at once, they all
typically modify the on disk superblock in some way.
create/unclink/mkdir/rmdir modify inode counts, allocation/freeing modify
free block counts.
When these counts are modified in a transaction, they must eventually lock
the superblock buffer and apply the mods. The buffer then remains locked
until the transaction is committed into the incore log buffer. The result
of this is that with enough transactions on the fly the incore superblock
buffer becomes a bottleneck.
The result of contention on the incore superblock buffer is that
transaction rates fall - the more pressure that is put on the superblock
buffer, the slower things go.
The key to removing the contention is to not require the superblock fields
in question to be locked. We do that by not marking the superblock dirty
in the transaction. IOWs, we modify the incore superblock but do not
modify the cached superblock buffer. In short, we do not log superblock
modifications to critical fields in the superblock on every transaction.
In fact we only do it just before we write the superblock to disk every
sync period or just before unmount.
This creates an interesting problem - if we don't log or write out the
fields in every transaction, then how do the values get recovered after a
crash? the answer is simple - we keep enough duplicate, logged information
in other structures that we can reconstruct the correct count after log
recovery has been performed.
It is the AGF and AGI structures that contain the duplicate information;
after recovery, we walk every AGI and AGF and sum their individual
counters to get the correct value, and we do a transaction into the log to
correct them. An optimisation of this is that if we have a clean unmount
record, we know the value in the superblock is correct, so we can avoid
the summation walk under normal conditions and so mount/recovery times do
not change under normal operation.
One wrinkle that was discovered during development was that the blocks
used in the freespace btrees are never accounted for in the AGF counters.
This was once a valid optimisation to make; when the filesystem is full,
the free space btrees are empty and consume no space. Hence when it
matters, the "accounting" is correct. But that means the when we do the
AGF summations, we would not have a correct count and xfs_check would
complain. Hence a new counter was added to track the number of blocks used
by the free space btrees. This is an *on-disk format change*.
As a result of this, lazy superblock counters are a mkfs option and at the
moment on linux there is no way to convert an old filesystem. This is
possible - xfs_db can be used to twiddle the right bits and then
xfs_repair will do the format conversion for you. Similarly, you can
convert backwards as well. At some point we'll add functionality to
xfs_admin to do the bit twiddling easily....
SGI-PV: 964999
SGI-Modid: xfs-linux-melb:xfs-kern:28652a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Tim Shimmin <tes@sgi.com>
2007-05-24 05:26:31 +00:00
|
|
|
#define XFS_TRANS_SB_COUNT 41
|
xfs: Introduce delayed logging core code
The delayed logging code only changes in-memory structures and as
such can be enabled and disabled with a mount option. Add the mount
option and emit a warning that this is an experimental feature that
should not be used in production yet.
We also need infrastructure to track committed items that have not
yet been written to the log. This is what the Committed Item List
(CIL) is for.
The log item also needs to be extended to track the current log
vector, the associated memory buffer and it's location in the Commit
Item List. Extend the log item and log vector structures to enable
this tracking.
To maintain the current log format for transactions with delayed
logging, we need to introduce a checkpoint transaction and a context
for tracking each checkpoint from initiation to transaction
completion. This includes adding a log ticket for tracking space
log required/used by the context checkpoint.
To track all the changes we need an io vector array per log item,
rather than a single array for the entire transaction. Using the new
log vector structure for this requires two passes - the first to
allocate the log vector structures and chain them together, and the
second to fill them out. This log vector chain can then be passed
to the CIL for formatting, pinning and insertion into the CIL.
Formatting of the log vector chain is relatively simple - it's just
a loop over the iovecs on each log vector, but it is made slightly
more complex because we re-write the iovec after the copy to point
back at the memory buffer we just copied into.
This code also needs to pin log items. If the log item is not
already tracked in this checkpoint context, then it needs to be
pinned. Otherwise it is already pinned and we don't need to pin it
again.
The only other complexity is calculating the amount of new log space
the formatting has consumed. This needs to be accounted to the
transaction in progress, and the accounting is made more complex
becase we need also to steal space from it for log metadata in the
checkpoint transaction. Calculate all this at insert time and update
all the tickets, counters, etc correctly.
Once we've formatted all the log items in the transaction, attach
the busy extents to the checkpoint context so the busy extents live
until checkpoint completion and can be processed at that point in
time. Transactions can then be freed at this point in time.
Now we need to issue checkpoints - we are tracking the amount of log space
used by the items in the CIL, so we can trigger background checkpoints when the
space usage gets to a certain threshold. Otherwise, checkpoints need ot be
triggered when a log synchronisation point is reached - a log force event.
Because the log write code already handles chained log vectors, writing the
transaction is trivial, too. Construct a transaction header, add it
to the head of the chain and write it into the log, then issue a
commit record write. Then we can release the checkpoint log ticket
and attach the context to the log buffer so it can be called during
Io completion to complete the checkpoint.
We also need to allow for synchronising multiple in-flight
checkpoints. This is needed for two things - the first is to ensure
that checkpoint commit records appear in the log in the correct
sequence order (so they are replayed in the correct order). The
second is so that xfs_log_force_lsn() operates correctly and only
flushes and/or waits for the specific sequence it was provided with.
To do this we need a wait variable and a list tracking the
checkpoint commits in progress. We can walk this list and wait for
the checkpoints to change state or complete easily, an this provides
the necessary synchronisation for correct operation in both cases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 04:37:18 +00:00
|
|
|
#define XFS_TRANS_CHECKPOINT 42
|
|
|
|
#define XFS_TRANS_TYPE_MAX 42
|
2005-04-16 22:20:36 +00:00
|
|
|
/* new transaction types need to be reflected in xfs_logprint(8) */
|
|
|
|
|
2009-12-14 23:14:59 +00:00
|
|
|
#define XFS_TRANS_TYPES \
|
|
|
|
{ XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
|
|
|
|
{ XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
|
|
|
|
{ XFS_TRANS_INACTIVE, "INACTIVE" }, \
|
|
|
|
{ XFS_TRANS_CREATE, "CREATE" }, \
|
|
|
|
{ XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
|
|
|
|
{ XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
|
|
|
|
{ XFS_TRANS_REMOVE, "REMOVE" }, \
|
|
|
|
{ XFS_TRANS_LINK, "LINK" }, \
|
|
|
|
{ XFS_TRANS_RENAME, "RENAME" }, \
|
|
|
|
{ XFS_TRANS_MKDIR, "MKDIR" }, \
|
|
|
|
{ XFS_TRANS_RMDIR, "RMDIR" }, \
|
|
|
|
{ XFS_TRANS_SYMLINK, "SYMLINK" }, \
|
|
|
|
{ XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
|
|
|
|
{ XFS_TRANS_GROWFS, "GROWFS" }, \
|
|
|
|
{ XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
|
|
|
|
{ XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
|
|
|
|
{ XFS_TRANS_WRITEID, "WRITEID" }, \
|
|
|
|
{ XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
|
|
|
|
{ XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
|
|
|
|
{ XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
|
|
|
|
{ XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
|
|
|
|
{ XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
|
|
|
|
{ XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
|
|
|
|
{ XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
|
|
|
|
{ XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
|
|
|
|
{ XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
|
|
|
|
{ XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
|
|
|
|
{ XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
|
|
|
|
{ XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
|
|
|
|
{ XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
|
|
|
|
{ XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
|
|
|
|
{ XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
|
|
|
|
{ XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
|
|
|
|
{ XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
|
|
|
|
{ XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
|
|
|
|
{ XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
|
|
|
|
{ XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
|
|
|
|
{ XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
|
xfs: Introduce delayed logging core code
The delayed logging code only changes in-memory structures and as
such can be enabled and disabled with a mount option. Add the mount
option and emit a warning that this is an experimental feature that
should not be used in production yet.
We also need infrastructure to track committed items that have not
yet been written to the log. This is what the Committed Item List
(CIL) is for.
The log item also needs to be extended to track the current log
vector, the associated memory buffer and it's location in the Commit
Item List. Extend the log item and log vector structures to enable
this tracking.
To maintain the current log format for transactions with delayed
logging, we need to introduce a checkpoint transaction and a context
for tracking each checkpoint from initiation to transaction
completion. This includes adding a log ticket for tracking space
log required/used by the context checkpoint.
To track all the changes we need an io vector array per log item,
rather than a single array for the entire transaction. Using the new
log vector structure for this requires two passes - the first to
allocate the log vector structures and chain them together, and the
second to fill them out. This log vector chain can then be passed
to the CIL for formatting, pinning and insertion into the CIL.
Formatting of the log vector chain is relatively simple - it's just
a loop over the iovecs on each log vector, but it is made slightly
more complex because we re-write the iovec after the copy to point
back at the memory buffer we just copied into.
This code also needs to pin log items. If the log item is not
already tracked in this checkpoint context, then it needs to be
pinned. Otherwise it is already pinned and we don't need to pin it
again.
The only other complexity is calculating the amount of new log space
the formatting has consumed. This needs to be accounted to the
transaction in progress, and the accounting is made more complex
becase we need also to steal space from it for log metadata in the
checkpoint transaction. Calculate all this at insert time and update
all the tickets, counters, etc correctly.
Once we've formatted all the log items in the transaction, attach
the busy extents to the checkpoint context so the busy extents live
until checkpoint completion and can be processed at that point in
time. Transactions can then be freed at this point in time.
Now we need to issue checkpoints - we are tracking the amount of log space
used by the items in the CIL, so we can trigger background checkpoints when the
space usage gets to a certain threshold. Otherwise, checkpoints need ot be
triggered when a log synchronisation point is reached - a log force event.
Because the log write code already handles chained log vectors, writing the
transaction is trivial, too. Construct a transaction header, add it
to the head of the chain and write it into the log, then issue a
commit record write. Then we can release the checkpoint log ticket
and attach the context to the log buffer so it can be called during
Io completion to complete the checkpoint.
We also need to allow for synchronising multiple in-flight
checkpoints. This is needed for two things - the first is to ensure
that checkpoint commit records appear in the log in the correct
sequence order (so they are replayed in the correct order). The
second is so that xfs_log_force_lsn() operates correctly and only
flushes and/or waits for the specific sequence it was provided with.
To do this we need a wait variable and a list tracking the
checkpoint commits in progress. We can walk this list and wait for
the checkpoints to change state or complete easily, an this provides
the necessary synchronisation for correct operation in both cases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 04:37:18 +00:00
|
|
|
{ XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
|
2009-12-14 23:14:59 +00:00
|
|
|
{ XFS_TRANS_DUMMY1, "DUMMY1" }, \
|
|
|
|
{ XFS_TRANS_DUMMY2, "DUMMY2" }, \
|
|
|
|
{ XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This structure is used to track log items associated with
|
|
|
|
* a transaction. It points to the log item and keeps some
|
|
|
|
* flags to track the state of the log item. It also tracks
|
|
|
|
* the amount of space needed to log the item it describes
|
|
|
|
* once we get to commit processing (see xfs_trans_commit()).
|
|
|
|
*/
|
2010-06-23 08:11:15 +00:00
|
|
|
struct xfs_log_item_desc {
|
2008-10-30 06:05:38 +00:00
|
|
|
struct xfs_log_item *lid_item;
|
2010-06-23 08:11:15 +00:00
|
|
|
struct list_head lid_trans;
|
2011-12-06 21:58:09 +00:00
|
|
|
unsigned char lid_flags;
|
2010-06-23 08:11:15 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define XFS_LID_DIRTY 0x1
|
|
|
|
|
|
|
|
#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */
|
|
|
|
/*
|
|
|
|
* Values for t_flags.
|
|
|
|
*/
|
|
|
|
#define XFS_TRANS_DIRTY 0x01 /* something needs to be logged */
|
|
|
|
#define XFS_TRANS_SB_DIRTY 0x02 /* superblock is modified */
|
|
|
|
#define XFS_TRANS_PERM_LOG_RES 0x04 /* xact took a permanent log res */
|
|
|
|
#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */
|
|
|
|
#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */
|
|
|
|
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
|
2012-06-12 14:20:39 +00:00
|
|
|
#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer
|
|
|
|
count in superblock */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Values for call flags parameter.
|
|
|
|
*/
|
|
|
|
#define XFS_TRANS_RELEASE_LOG_RES 0x4
|
|
|
|
#define XFS_TRANS_ABORT 0x8
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Field values for xfs_trans_mod_sb.
|
|
|
|
*/
|
|
|
|
#define XFS_TRANS_SB_ICOUNT 0x00000001
|
|
|
|
#define XFS_TRANS_SB_IFREE 0x00000002
|
|
|
|
#define XFS_TRANS_SB_FDBLOCKS 0x00000004
|
|
|
|
#define XFS_TRANS_SB_RES_FDBLOCKS 0x00000008
|
|
|
|
#define XFS_TRANS_SB_FREXTENTS 0x00000010
|
|
|
|
#define XFS_TRANS_SB_RES_FREXTENTS 0x00000020
|
|
|
|
#define XFS_TRANS_SB_DBLOCKS 0x00000040
|
|
|
|
#define XFS_TRANS_SB_AGCOUNT 0x00000080
|
|
|
|
#define XFS_TRANS_SB_IMAXPCT 0x00000100
|
|
|
|
#define XFS_TRANS_SB_REXTSIZE 0x00000200
|
|
|
|
#define XFS_TRANS_SB_RBMBLOCKS 0x00000400
|
|
|
|
#define XFS_TRANS_SB_RBLOCKS 0x00000800
|
|
|
|
#define XFS_TRANS_SB_REXTENTS 0x00001000
|
|
|
|
#define XFS_TRANS_SB_REXTSLOG 0x00002000
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-extent log reservation for the allocation btree changes
|
|
|
|
* involved in freeing or allocating an extent.
|
2013-05-03 07:41:19 +00:00
|
|
|
* 2 trees * (2 blocks/level * max depth - 1)
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
|
|
|
|
((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1)))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-directory log reservation for any directory change.
|
2013-05-03 07:41:19 +00:00
|
|
|
* dir blocks: (1 btree block per level + data block + free block)
|
|
|
|
* bmap btree: (levels + 2) * max depth
|
2005-04-16 22:20:36 +00:00
|
|
|
* v2 directory blocks can be fragmented below the dirblksize down to the fsb
|
|
|
|
* size, so account for that in the DAENTER macros.
|
|
|
|
*/
|
|
|
|
#define XFS_DIROP_LOG_COUNT(mp) \
|
|
|
|
(XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \
|
|
|
|
XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)
|
|
|
|
|
|
|
|
|
|
|
|
#define XFS_WRITE_LOG_RES(mp) ((mp)->m_reservations.tr_write)
|
|
|
|
#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate)
|
|
|
|
#define XFS_RENAME_LOG_RES(mp) ((mp)->m_reservations.tr_rename)
|
|
|
|
#define XFS_LINK_LOG_RES(mp) ((mp)->m_reservations.tr_link)
|
|
|
|
#define XFS_REMOVE_LOG_RES(mp) ((mp)->m_reservations.tr_remove)
|
|
|
|
#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink)
|
|
|
|
#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create)
|
|
|
|
#define XFS_MKDIR_LOG_RES(mp) ((mp)->m_reservations.tr_mkdir)
|
|
|
|
#define XFS_IFREE_LOG_RES(mp) ((mp)->m_reservations.tr_ifree)
|
|
|
|
#define XFS_ICHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_ichange)
|
|
|
|
#define XFS_GROWDATA_LOG_RES(mp) ((mp)->m_reservations.tr_growdata)
|
|
|
|
#define XFS_GROWRTALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_growrtalloc)
|
|
|
|
#define XFS_GROWRTZERO_LOG_RES(mp) ((mp)->m_reservations.tr_growrtzero)
|
|
|
|
#define XFS_GROWRTFREE_LOG_RES(mp) ((mp)->m_reservations.tr_growrtfree)
|
|
|
|
#define XFS_SWRITE_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
|
|
|
|
/*
|
|
|
|
* Logging the inode timestamps on an fsync -- same as SWRITE
|
|
|
|
* as long as SWRITE logs the entire inode core
|
|
|
|
*/
|
|
|
|
#define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
|
2013-01-28 13:27:53 +00:00
|
|
|
#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
|
2005-04-16 22:20:36 +00:00
|
|
|
#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork)
|
|
|
|
#define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval)
|
2013-01-28 13:27:53 +00:00
|
|
|
#define XFS_ATTRSETM_LOG_RES(mp) ((mp)->m_reservations.tr_attrsetm)
|
|
|
|
#define XFS_ATTRSETRT_LOG_RES(mp) ((mp)->m_reservations.tr_attrsetrt)
|
|
|
|
#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm)
|
2005-04-16 22:20:36 +00:00
|
|
|
#define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi)
|
2013-01-28 13:26:16 +00:00
|
|
|
#define XFS_QM_SBCHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_qm_sbchange)
|
2013-01-28 13:26:49 +00:00
|
|
|
#define XFS_QM_SETQLIM_LOG_RES(mp) ((mp)->m_reservations.tr_qm_setqlim)
|
2013-01-28 13:27:04 +00:00
|
|
|
#define XFS_QM_DQALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_qm_dqalloc)
|
2013-01-28 13:27:15 +00:00
|
|
|
#define XFS_QM_QUOTAOFF_LOG_RES(mp) ((mp)->m_reservations.tr_qm_quotaoff)
|
2013-01-28 13:27:21 +00:00
|
|
|
#define XFS_QM_QUOTAOFF_END_LOG_RES(mp) ((mp)->m_reservations.tr_qm_equotaoff)
|
2013-01-28 13:27:25 +00:00
|
|
|
#define XFS_SB_LOG_RES(mp) ((mp)->m_reservations.tr_sb)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Various log count values.
|
|
|
|
*/
|
|
|
|
#define XFS_DEFAULT_LOG_COUNT 1
|
|
|
|
#define XFS_DEFAULT_PERM_LOG_COUNT 2
|
|
|
|
#define XFS_ITRUNCATE_LOG_COUNT 2
|
|
|
|
#define XFS_INACTIVE_LOG_COUNT 2
|
|
|
|
#define XFS_CREATE_LOG_COUNT 2
|
|
|
|
#define XFS_MKDIR_LOG_COUNT 3
|
|
|
|
#define XFS_SYMLINK_LOG_COUNT 3
|
|
|
|
#define XFS_REMOVE_LOG_COUNT 2
|
|
|
|
#define XFS_LINK_LOG_COUNT 2
|
|
|
|
#define XFS_RENAME_LOG_COUNT 2
|
|
|
|
#define XFS_WRITE_LOG_COUNT 2
|
|
|
|
#define XFS_ADDAFORK_LOG_COUNT 2
|
|
|
|
#define XFS_ATTRINVAL_LOG_COUNT 1
|
|
|
|
#define XFS_ATTRSET_LOG_COUNT 3
|
|
|
|
#define XFS_ATTRRM_LOG_COUNT 3
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here we centralize the specification of XFS meta-data buffer
|
|
|
|
* reference count values. This determine how hard the buffer
|
|
|
|
* cache tries to hold onto the buffer.
|
|
|
|
*/
|
|
|
|
#define XFS_AGF_REF 4
|
|
|
|
#define XFS_AGI_REF 4
|
|
|
|
#define XFS_AGFL_REF 3
|
|
|
|
#define XFS_INO_BTREE_REF 3
|
|
|
|
#define XFS_ALLOC_BTREE_REF 2
|
|
|
|
#define XFS_BMAP_BTREE_REF 2
|
|
|
|
#define XFS_DIR_BTREE_REF 2
|
2010-12-02 05:31:13 +00:00
|
|
|
#define XFS_INO_REF 2
|
2005-04-16 22:20:36 +00:00
|
|
|
#define XFS_ATTR_BTREE_REF 1
|
|
|
|
#define XFS_DQUOT_REF 1
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
2008-10-30 06:05:38 +00:00
|
|
|
|
|
|
|
struct xfs_buf;
|
|
|
|
struct xfs_buftarg;
|
|
|
|
struct xfs_efd_log_item;
|
|
|
|
struct xfs_efi_log_item;
|
|
|
|
struct xfs_inode;
|
|
|
|
struct xfs_item_ops;
|
|
|
|
struct xfs_log_iovec;
|
|
|
|
struct xfs_log_item_desc;
|
|
|
|
struct xfs_mount;
|
|
|
|
struct xfs_trans;
|
|
|
|
struct xfs_dquot_acct;
|
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 02:07:08 +00:00
|
|
|
struct xfs_busy_extent;
|
2008-10-30 06:05:38 +00:00
|
|
|
|
|
|
|
typedef struct xfs_log_item {
|
|
|
|
struct list_head li_ail; /* AIL pointers */
|
|
|
|
xfs_lsn_t li_lsn; /* last on-disk lsn */
|
|
|
|
struct xfs_log_item_desc *li_desc; /* ptr to current desc*/
|
|
|
|
struct xfs_mount *li_mountp; /* ptr to fs mount */
|
2008-10-30 06:39:46 +00:00
|
|
|
struct xfs_ail *li_ailp; /* ptr to AIL */
|
2008-10-30 06:05:38 +00:00
|
|
|
uint li_type; /* item type */
|
|
|
|
uint li_flags; /* misc flags */
|
|
|
|
struct xfs_log_item *li_bio_list; /* buffer item list */
|
|
|
|
void (*li_cb)(struct xfs_buf *,
|
|
|
|
struct xfs_log_item *);
|
|
|
|
/* buffer item iodone */
|
|
|
|
/* callback func */
|
2011-10-28 09:54:24 +00:00
|
|
|
const struct xfs_item_ops *li_ops; /* function list */
|
xfs: Introduce delayed logging core code
The delayed logging code only changes in-memory structures and as
such can be enabled and disabled with a mount option. Add the mount
option and emit a warning that this is an experimental feature that
should not be used in production yet.
We also need infrastructure to track committed items that have not
yet been written to the log. This is what the Committed Item List
(CIL) is for.
The log item also needs to be extended to track the current log
vector, the associated memory buffer and it's location in the Commit
Item List. Extend the log item and log vector structures to enable
this tracking.
To maintain the current log format for transactions with delayed
logging, we need to introduce a checkpoint transaction and a context
for tracking each checkpoint from initiation to transaction
completion. This includes adding a log ticket for tracking space
log required/used by the context checkpoint.
To track all the changes we need an io vector array per log item,
rather than a single array for the entire transaction. Using the new
log vector structure for this requires two passes - the first to
allocate the log vector structures and chain them together, and the
second to fill them out. This log vector chain can then be passed
to the CIL for formatting, pinning and insertion into the CIL.
Formatting of the log vector chain is relatively simple - it's just
a loop over the iovecs on each log vector, but it is made slightly
more complex because we re-write the iovec after the copy to point
back at the memory buffer we just copied into.
This code also needs to pin log items. If the log item is not
already tracked in this checkpoint context, then it needs to be
pinned. Otherwise it is already pinned and we don't need to pin it
again.
The only other complexity is calculating the amount of new log space
the formatting has consumed. This needs to be accounted to the
transaction in progress, and the accounting is made more complex
becase we need also to steal space from it for log metadata in the
checkpoint transaction. Calculate all this at insert time and update
all the tickets, counters, etc correctly.
Once we've formatted all the log items in the transaction, attach
the busy extents to the checkpoint context so the busy extents live
until checkpoint completion and can be processed at that point in
time. Transactions can then be freed at this point in time.
Now we need to issue checkpoints - we are tracking the amount of log space
used by the items in the CIL, so we can trigger background checkpoints when the
space usage gets to a certain threshold. Otherwise, checkpoints need ot be
triggered when a log synchronisation point is reached - a log force event.
Because the log write code already handles chained log vectors, writing the
transaction is trivial, too. Construct a transaction header, add it
to the head of the chain and write it into the log, then issue a
commit record write. Then we can release the checkpoint log ticket
and attach the context to the log buffer so it can be called during
Io completion to complete the checkpoint.
We also need to allow for synchronising multiple in-flight
checkpoints. This is needed for two things - the first is to ensure
that checkpoint commit records appear in the log in the correct
sequence order (so they are replayed in the correct order). The
second is so that xfs_log_force_lsn() operates correctly and only
flushes and/or waits for the specific sequence it was provided with.
To do this we need a wait variable and a list tracking the
checkpoint commits in progress. We can walk this list and wait for
the checkpoints to change state or complete easily, an this provides
the necessary synchronisation for correct operation in both cases.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 04:37:18 +00:00
|
|
|
|
|
|
|
/* delayed logging */
|
|
|
|
struct list_head li_cil; /* CIL pointers */
|
|
|
|
struct xfs_log_vec *li_lv; /* active log vector */
|
2010-05-20 13:19:42 +00:00
|
|
|
xfs_lsn_t li_seq; /* CIL commit seq */
|
2008-10-30 06:05:38 +00:00
|
|
|
} xfs_log_item_t;
|
|
|
|
|
|
|
|
#define XFS_LI_IN_AIL 0x1
|
|
|
|
#define XFS_LI_ABORTED 0x2
|
|
|
|
|
2009-12-14 23:14:59 +00:00
|
|
|
#define XFS_LI_FLAGS \
|
|
|
|
{ XFS_LI_IN_AIL, "IN_AIL" }, \
|
|
|
|
{ XFS_LI_ABORTED, "ABORTED" }
|
|
|
|
|
2011-10-28 09:54:24 +00:00
|
|
|
struct xfs_item_ops {
|
2008-10-30 06:05:38 +00:00
|
|
|
uint (*iop_size)(xfs_log_item_t *);
|
|
|
|
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
|
|
|
|
void (*iop_pin)(xfs_log_item_t *);
|
2010-06-23 08:11:15 +00:00
|
|
|
void (*iop_unpin)(xfs_log_item_t *, int remove);
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
uint (*iop_push)(struct xfs_log_item *, struct list_head *);
|
2008-10-30 06:05:38 +00:00
|
|
|
void (*iop_unlock)(xfs_log_item_t *);
|
|
|
|
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
|
|
|
|
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
|
2011-10-28 09:54:24 +00:00
|
|
|
};
|
2008-10-30 06:05:38 +00:00
|
|
|
|
|
|
|
#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
|
|
|
|
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
|
|
|
|
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
|
2010-06-23 08:11:15 +00:00
|
|
|
#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
#define IOP_PUSH(ip, list) (*(ip)->li_ops->iop_push)(ip, list)
|
2008-10-30 06:05:38 +00:00
|
|
|
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
|
|
|
|
#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn)
|
|
|
|
#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn)
|
|
|
|
|
|
|
|
/*
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
* Return values for the IOP_PUSH() routines.
|
2008-10-30 06:05:38 +00:00
|
|
|
*/
|
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one,
and write back the buffers per-process instead of by waking up xfsbufd.
This is now easily doable given that we have very few places left that write
delwri buffers:
- log recovery:
Only done at mount time, and already forcing out the buffers
synchronously using xfs_flush_buftarg
- quotacheck:
Same story.
- dquot reclaim:
Writes out dirty dquots on the LRU under memory pressure. We might
want to look into doing more of this via xfsaild, but it's already
more optimal than the synchronous inode reclaim that writes each
buffer synchronously.
- xfsaild:
This is the main beneficiary of the change. By keeping a local list
of buffers to write we reduce latency of writing out buffers, and
more importably we can remove all the delwri list promotions which
were hitting the buffer cache hard under sustained metadata loads.
The implementation is very straight forward - xfs_buf_delwri_queue now gets
a new list_head pointer that it adds the delwri buffers to, and all callers
need to eventually submit the list using xfs_buf_delwi_submit or
xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are
skipped in xfs_buf_delwri_queue, assuming they already are on another delwri
list. The biggest change to pass down the buffer list was done to the AIL
pushing. Now that we operate on buffers the trylock, push and pushbuf log
item methods are merged into a single push routine, which tries to lock the
item, and if possible add the buffer that needs writeback to the buffer list.
This leads to much simpler code than the previous split but requires the
individual IOP_PUSH instances to unlock and reacquire the AIL around calls
to blocking routines.
Given that xfsailds now also handle writing out buffers, the conditions for
log forcing and the sleep times needed some small changes. The most
important one is that we consider an AIL busy as long we still have buffers
to push, and the other one is that we do increment the pushed LSN for
buffers that are under flushing at this moment, but still count them towards
the stuck items for restart purposes. Without this we could hammer on stuck
items without ever forcing the log and not make progress under heavy random
delete workloads on fast flash storage devices.
[ Dave Chinner:
- rebase on previous patches.
- improved comments for XBF_DELWRI_Q handling
- fix XBF_ASYNC handling in queue submission (test 106 failure)
- rename delwri submit function buffer list parameters for clarity
- xfs_efd_item_push() should return XFS_ITEM_PINNED ]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-04-23 05:58:39 +00:00
|
|
|
#define XFS_ITEM_SUCCESS 0
|
|
|
|
#define XFS_ITEM_PINNED 1
|
|
|
|
#define XFS_ITEM_LOCKED 2
|
|
|
|
#define XFS_ITEM_FLUSHING 3
|
2008-10-30 06:05:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the type of function which can be given to xfs_trans_callback()
|
|
|
|
* to be called upon the transaction's commit to disk.
|
|
|
|
*/
|
|
|
|
typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the structure maintained for every active transaction.
|
|
|
|
*/
|
|
|
|
typedef struct xfs_trans {
|
|
|
|
unsigned int t_magic; /* magic number */
|
|
|
|
xfs_log_callback_t t_logcb; /* log callback struct */
|
|
|
|
unsigned int t_type; /* transaction type */
|
|
|
|
unsigned int t_log_res; /* amt of log space resvd */
|
|
|
|
unsigned int t_log_count; /* count for perm log res */
|
|
|
|
unsigned int t_blk_res; /* # of blocks resvd */
|
|
|
|
unsigned int t_blk_res_used; /* # of resvd blocks used */
|
|
|
|
unsigned int t_rtx_res; /* # of rt extents resvd */
|
|
|
|
unsigned int t_rtx_res_used; /* # of resvd rt extents used */
|
2010-02-15 23:34:54 +00:00
|
|
|
struct xlog_ticket *t_ticket; /* log mgr ticket */
|
2008-10-30 06:05:38 +00:00
|
|
|
xfs_lsn_t t_lsn; /* log seq num of start of
|
|
|
|
* transaction. */
|
|
|
|
xfs_lsn_t t_commit_lsn; /* log seq num of end of
|
|
|
|
* transaction. */
|
|
|
|
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
|
|
|
|
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
|
|
|
|
unsigned int t_flags; /* misc flags */
|
|
|
|
int64_t t_icount_delta; /* superblock icount change */
|
|
|
|
int64_t t_ifree_delta; /* superblock ifree change */
|
|
|
|
int64_t t_fdblocks_delta; /* superblock fdblocks chg */
|
|
|
|
int64_t t_res_fdblocks_delta; /* on-disk only chg */
|
|
|
|
int64_t t_frextents_delta;/* superblock freextents chg*/
|
|
|
|
int64_t t_res_frextents_delta; /* on-disk only chg */
|
2013-04-30 11:39:34 +00:00
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
2008-10-30 06:05:38 +00:00
|
|
|
int64_t t_ag_freeblks_delta; /* debugging counter */
|
|
|
|
int64_t t_ag_flist_delta; /* debugging counter */
|
|
|
|
int64_t t_ag_btree_delta; /* debugging counter */
|
|
|
|
#endif
|
|
|
|
int64_t t_dblocks_delta;/* superblock dblocks change */
|
|
|
|
int64_t t_agcount_delta;/* superblock agcount change */
|
|
|
|
int64_t t_imaxpct_delta;/* superblock imaxpct change */
|
|
|
|
int64_t t_rextsize_delta;/* superblock rextsize chg */
|
|
|
|
int64_t t_rbmblocks_delta;/* superblock rbmblocks chg */
|
|
|
|
int64_t t_rblocks_delta;/* superblock rblocks change */
|
|
|
|
int64_t t_rextents_delta;/* superblocks rextents chg */
|
|
|
|
int64_t t_rextslog_delta;/* superblocks rextslog chg */
|
2010-06-23 08:11:15 +00:00
|
|
|
struct list_head t_items; /* log item descriptors */
|
2008-10-30 06:05:38 +00:00
|
|
|
xfs_trans_header_t t_header; /* header for in-log trans */
|
xfs: Improve scalability of busy extent tracking
When we free a metadata extent, we record it in the per-AG busy
extent array so that it is not re-used before the freeing
transaction hits the disk. This array is fixed size, so when it
overflows we make further allocation transactions synchronous
because we cannot track more freed extents until those transactions
hit the disk and are completed. Under heavy mixed allocation and
freeing workloads with large log buffers, we can overflow this array
quite easily.
Further, the array is sparsely populated, which means that inserts
need to search for a free slot, and array searches often have to
search many more slots that are actually used to check all the
busy extents. Quite inefficient, really.
To enable this aspect of extent freeing to scale better, we need
a structure that can grow dynamically. While in other areas of
XFS we have used radix trees, the extents being freed are at random
locations on disk so are better suited to being indexed by an rbtree.
So, use a per-AG rbtree indexed by block number to track busy
extents. This incures a memory allocation when marking an extent
busy, but should not occur too often in low memory situations. This
should scale to an arbitrary number of extents so should not be a
limitation for features such as in-memory aggregation of
transactions.
However, there are still situations where we can't avoid allocating
busy extents (such as allocation from the AGFL). To minimise the
overhead of such occurences, we need to avoid doing a synchronous
log force while holding the AGF locked to ensure that the previous
transactions are safely on disk before we use the extent. We can do
this by marking the transaction doing the allocation as synchronous
rather issuing a log force.
Because of the locking involved and the ordering of transactions,
the synchronous transaction provides the same guarantees as a
synchronous log force because it ensures that all the prior
transactions are already on disk when the synchronous transaction
hits the disk. i.e. it preserves the free->allocate order of the
extent correctly in recovery.
By doing this, we avoid holding the AGF locked while log writes are
in progress, hence reducing the length of time the lock is held and
therefore we increase the rate at which we can allocate and free
from the allocation group, thereby increasing overall throughput.
The only problem with this approach is that when a metadata buffer is
marked stale (e.g. a directory block is removed), then buffer remains
pinned and locked until the log goes to disk. The issue here is that
if that stale buffer is reallocated in a subsequent transaction, the
attempt to lock that buffer in the transaction will hang waiting
the log to go to disk to unlock and unpin the buffer. Hence if
someone tries to lock a pinned, stale, locked buffer we need to
push on the log to get it unlocked ASAP. Effectively we are trading
off a guaranteed log force for a much less common trigger for log
force to occur.
Ideally we should not reallocate busy extents. That is a much more
complex fix to the problem as it involves direct intervention in the
allocation btree searches in many places. This is left to a future
set of modifications.
Finally, now that we track busy extents in allocated memory, we
don't need the descriptors in the transaction structure to point to
them. We can replace the complex busy chunk infrastructure with a
simple linked list of busy extents. This allows us to remove a large
chunk of code, making the overall change a net reduction in code
size.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
2010-05-21 02:07:08 +00:00
|
|
|
struct list_head t_busy; /* list of busy extents */
|
2008-10-30 06:05:38 +00:00
|
|
|
unsigned long t_pflags; /* saved process flags state */
|
|
|
|
} xfs_trans_t;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* XFS transaction mechanism exported interfaces that are
|
|
|
|
* actually macros.
|
|
|
|
*/
|
|
|
|
#define xfs_trans_get_log_res(tp) ((tp)->t_log_res)
|
|
|
|
#define xfs_trans_get_log_count(tp) ((tp)->t_log_count)
|
|
|
|
#define xfs_trans_get_block_res(tp) ((tp)->t_blk_res)
|
|
|
|
#define xfs_trans_set_sync(tp) ((tp)->t_flags |= XFS_TRANS_SYNC)
|
|
|
|
|
2013-04-30 11:39:34 +00:00
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
2007-02-10 07:36:10 +00:00
|
|
|
#define xfs_trans_agblocks_delta(tp, d) ((tp)->t_ag_freeblks_delta += (int64_t)d)
|
|
|
|
#define xfs_trans_agflist_delta(tp, d) ((tp)->t_ag_flist_delta += (int64_t)d)
|
|
|
|
#define xfs_trans_agbtree_delta(tp, d) ((tp)->t_ag_btree_delta += (int64_t)d)
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
|
|
|
#define xfs_trans_agblocks_delta(tp, d)
|
|
|
|
#define xfs_trans_agflist_delta(tp, d)
|
|
|
|
#define xfs_trans_agbtree_delta(tp, d)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XFS transaction mechanism exported interfaces.
|
|
|
|
*/
|
|
|
|
xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint);
|
2012-04-02 10:24:04 +00:00
|
|
|
xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
|
2005-04-16 22:20:36 +00:00
|
|
|
xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
|
|
|
|
int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
|
|
|
|
uint, uint);
|
2007-02-10 07:36:10 +00:00
|
|
|
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
|
2012-06-22 08:50:11 +00:00
|
|
|
|
|
|
|
struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
struct xfs_buf_map *map, int nmaps,
|
|
|
|
uint flags);
|
|
|
|
|
|
|
|
static inline struct xfs_buf *
|
|
|
|
xfs_trans_get_buf(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
xfs_daddr_t blkno,
|
|
|
|
int numblks,
|
|
|
|
uint flags)
|
|
|
|
{
|
2012-11-12 11:54:01 +00:00
|
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
2012-06-22 08:50:11 +00:00
|
|
|
return xfs_trans_get_buf_map(tp, target, &map, 1, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
int xfs_trans_read_buf_map(struct xfs_mount *mp,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
struct xfs_buf_map *map, int nmaps,
|
|
|
|
xfs_buf_flags_t flags,
|
2012-11-12 11:54:01 +00:00
|
|
|
struct xfs_buf **bpp,
|
2012-11-14 06:54:40 +00:00
|
|
|
const struct xfs_buf_ops *ops);
|
2012-06-22 08:50:11 +00:00
|
|
|
|
|
|
|
static inline int
|
|
|
|
xfs_trans_read_buf(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_buftarg *target,
|
|
|
|
xfs_daddr_t blkno,
|
|
|
|
int numblks,
|
|
|
|
xfs_buf_flags_t flags,
|
2012-11-12 11:54:01 +00:00
|
|
|
struct xfs_buf **bpp,
|
2012-11-14 06:54:40 +00:00
|
|
|
const struct xfs_buf_ops *ops)
|
2012-06-22 08:50:11 +00:00
|
|
|
{
|
2012-11-12 11:54:01 +00:00
|
|
|
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
|
|
|
|
return xfs_trans_read_buf_map(mp, tp, target, &map, 1,
|
2012-11-14 06:54:40 +00:00
|
|
|
flags, bpp, ops);
|
2012-06-22 08:50:11 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
|
|
|
|
|
|
|
|
void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
|
2005-09-04 22:29:01 +00:00
|
|
|
void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
|
2005-04-16 22:20:36 +00:00
|
|
|
void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
|
|
|
|
void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
|
2013-06-27 06:04:52 +00:00
|
|
|
void xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
|
2005-04-16 22:20:36 +00:00
|
|
|
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
|
|
|
|
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
|
2010-09-28 02:27:25 +00:00
|
|
|
void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
|
2011-09-19 15:00:54 +00:00
|
|
|
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
|
2005-04-16 22:20:36 +00:00
|
|
|
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
|
|
|
|
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
|
|
|
|
struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);
|
|
|
|
void xfs_efi_release(struct xfs_efi_log_item *, uint);
|
|
|
|
void xfs_trans_log_efi_extent(xfs_trans_t *,
|
|
|
|
struct xfs_efi_log_item *,
|
|
|
|
xfs_fsblock_t,
|
|
|
|
xfs_extlen_t);
|
|
|
|
struct xfs_efd_log_item *xfs_trans_get_efd(xfs_trans_t *,
|
|
|
|
struct xfs_efi_log_item *,
|
|
|
|
uint);
|
|
|
|
void xfs_trans_log_efd_extent(xfs_trans_t *,
|
|
|
|
struct xfs_efd_log_item *,
|
|
|
|
xfs_fsblock_t,
|
|
|
|
xfs_extlen_t);
|
2011-09-19 14:55:51 +00:00
|
|
|
int xfs_trans_commit(xfs_trans_t *, uint flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
void xfs_trans_cancel(xfs_trans_t *, int);
|
[XFS] Move AIL pushing into it's own thread
When many hundreds to thousands of threads all try to do simultaneous
transactions and the log is in a tail-pushing situation (i.e. full), we
can get multiple threads walking the AIL list and contending on the AIL
lock.
The AIL push is, in effect, a simple I/O dispatch algorithm complicated by
the ordering constraints placed on it by the transaction subsystem. It
really does not need multiple threads to push on it - even when only a
single CPU is pushing the AIL, it can push the I/O out far faster that
pretty much any disk subsystem can handle.
So, to avoid contention problems stemming from multiple list walkers, move
the list walk off into another thread and simply provide a "target" to
push to. When a thread requires a push, it sets the target and wakes the
push thread, then goes to sleep waiting for the required amount of space
to become available in the log.
This mechanism should also be a lot fairer under heavy load as the waiters
will queue in arrival order, rather than queuing in "who completed a push
first" order.
Also, by moving the pushing to a separate thread we can do more
effectively overload detection and prevention as we can keep context from
loop iteration to loop iteration. That is, we can push only part of the
list each loop and not have to loop back to the start of the list every
time we run. This should also help by reducing the number of items we try
to lock and/or push items that we cannot move.
Note that this patch is not intended to solve the inefficiencies in the
AIL structure and the associated issues with extremely large list
contents. That needs to be addresses separately; parallel access would
cause problems to any new structure as well, so I'm only aiming to isolate
the structure from unbounded parallelism here.
SGI-PV: 972759
SGI-Modid: xfs-linux-melb:xfs-kern:30371a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
2008-02-05 01:13:32 +00:00
|
|
|
int xfs_trans_ail_init(struct xfs_mount *);
|
|
|
|
void xfs_trans_ail_destroy(struct xfs_mount *);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-11-23 05:28:09 +00:00
|
|
|
extern kmem_zone_t *xfs_trans_zone;
|
2010-06-23 08:11:15 +00:00
|
|
|
extern kmem_zone_t *xfs_log_item_desc_zone;
|
2007-11-23 05:28:09 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
2008-10-30 06:05:38 +00:00
|
|
|
void xfs_trans_init(struct xfs_mount *);
|
|
|
|
int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* __XFS_TRANS_H__ */
|