mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
- Most attention this cycle went to optimizing blk-mq request-based DM
(dm-mq) that is used exclussively by DM multipath. - A stable fix for dm-mq that eliminates excessive context switching offers the biggest performance improvement (for both IOPs and throughput). - But more work is needed, during the next cycle, to reduce spinlock contention in DM multipath on large NUMA systems. - A stable fix for a NULL pointer seen when DM stats is enabled on a DM multipath device that must requeue an IO due to path failure. - A stable fix for DM snapshot to disallow the COW and origin devices from being identical. This amounts to graceful failure in the face of userspace error because these devices shouldn't ever be identical. - Stable fixes for DM cache and DM thin provisioning to address crashes seen if/when their respective metadata device experiences failures that cause the transition to 'fail_io' mode. - The DM cache 'mq' policy is now an alias for the 'smq' policy. The 'smq' policy proved to be consistently better than 'mq'. As such 'mq', with all its complex user-facing tunables, has been eliminated. - Improve DM thin provisioning to consistently return -ENOSPC once the thin-pool's data volume is out of space. - Improve DM core to properly handle error propagation if bio_integrity_clone() fails in clone_bio(). - Other small cleanups and improvements to DM core. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJW6WRhAAoJEMUj8QotnQNaXiIH/3UBJ0w/YUrAOcTU9Q58FQoo JophkKbjZ7o9IUNdDmfv9vQSFAJvDOJ8ve2Sb5OXdW0mUWxM+8M+6ioQU4MtI9oN uBZ7MDYU995jzE89d8sYFO9lKrNCmmPKuBiIzoAGNVh1VPx8YK1PvTOfaGEk5VHg Ay5JYGn14PUuV9tOP4euvpuc4XrJn5lqWtnTeMZPLtytcO3LWDIFGDoPoUqoRLI3 yzBO08xzR/xTNiW4+f59U7AJE+80CAONld0EDPRhrbd9kl3d1EcyCULisBQzuVd3 VSL0t77x4tPLWFR7Z1Fsq1FamuwSAUYL1FLLusT6G+5LUXdv0TAm+kXiUDA1Tbc= =1SLF -----END PGP SIGNATURE----- Merge tag 'dm-4.6-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - Most attention this cycle went to optimizing blk-mq request-based DM (dm-mq) that is used exclussively by DM multipath: - A stable fix for dm-mq that eliminates excessive context switching offers the biggest performance improvement (for both IOPs and throughput). - But more work is needed, during the next cycle, to reduce spinlock contention in DM multipath on large NUMA systems. - A stable fix for a NULL pointer seen when DM stats is enabled on a DM multipath device that must requeue an IO due to path failure. - A stable fix for DM snapshot to disallow the COW and origin devices from being identical. This amounts to graceful failure in the face of userspace error because these devices shouldn't ever be identical. - Stable fixes for DM cache and DM thin provisioning to address crashes seen if/when their respective metadata device experiences failures that cause the transition to 'fail_io' mode. - The DM cache 'mq' policy is now an alias for the 'smq' policy. The 'smq' policy proved to be consistently better than 'mq'. As such 'mq', with all its complex user-facing tunables, has been eliminated. - Improve DM thin provisioning to consistently return -ENOSPC once the thin-pool's data volume is out of space. - Improve DM core to properly handle error propagation if bio_integrity_clone() fails in clone_bio(). - Other small cleanups and improvements to DM core. * tag 'dm-4.6-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (41 commits) dm: fix rq_end_stats() NULL pointer in dm_requeue_original_request() dm thin: consistently return -ENOSPC if pool has run out of data space dm cache: bump the target version dm cache: make sure every metadata function checks fail_io dm: add missing newline between DM_DEBUG_BLOCK_STACK_TRACING and DM_BUFIO dm cache policy smq: clarify that mq registration failure was for 'mq' dm: return error if bio_integrity_clone() fails in clone_bio() dm thin metadata: don't issue prefetches if a transaction abort has failed dm snapshot: disallow the COW and origin devices from being identical dm cache: make the 'mq' policy an alias for 'smq' dm: drop unnecessary assignment of md->queue dm: reorder 'struct mapped_device' members to fix alignment and holes dm: remove dummy definition of 'struct dm_table' dm: add 'dm_numa_node' module parameter dm thin metadata: remove needless newline from subtree_dec() DMERR message dm mpath: cleanup reinstate_path() et al based on code review dm mpath: remove __pgpath_busy forward declaration, rename to pgpath_busy dm mpath: switch from 'unsigned' to 'bool' for flags where appropriate dm round robin: use percpu 'repeat_count' and 'current_path' dm path selector: remove 'repeat_count' return from .select_path hook ...
This commit is contained in:
commit
6968e6f832
@ -28,51 +28,16 @@ Overview of supplied cache replacement policies
|
||||
multiqueue (mq)
|
||||
---------------
|
||||
|
||||
This policy has been deprecated in favor of the smq policy (see below).
|
||||
This policy is now an alias for smq (see below).
|
||||
|
||||
The multiqueue policy has three sets of 16 queues: one set for entries
|
||||
waiting for the cache and another two for those in the cache (a set for
|
||||
clean entries and a set for dirty entries).
|
||||
The following tunables are accepted, but have no effect:
|
||||
|
||||
Cache entries in the queues are aged based on logical time. Entry into
|
||||
the cache is based on variable thresholds and queue selection is based
|
||||
on hit count on entry. The policy aims to take different cache miss
|
||||
costs into account and to adjust to varying load patterns automatically.
|
||||
|
||||
Message and constructor argument pairs are:
|
||||
'sequential_threshold <#nr_sequential_ios>'
|
||||
'random_threshold <#nr_random_ios>'
|
||||
'read_promote_adjustment <value>'
|
||||
'write_promote_adjustment <value>'
|
||||
'discard_promote_adjustment <value>'
|
||||
|
||||
The sequential threshold indicates the number of contiguous I/Os
|
||||
required before a stream is treated as sequential. Once a stream is
|
||||
considered sequential it will bypass the cache. The random threshold
|
||||
is the number of intervening non-contiguous I/Os that must be seen
|
||||
before the stream is treated as random again.
|
||||
|
||||
The sequential and random thresholds default to 512 and 4 respectively.
|
||||
|
||||
Large, sequential I/Os are probably better left on the origin device
|
||||
since spindles tend to have good sequential I/O bandwidth. The
|
||||
io_tracker counts contiguous I/Os to try to spot when the I/O is in one
|
||||
of these sequential modes. But there are use-cases for wanting to
|
||||
promote sequential blocks to the cache (e.g. fast application startup).
|
||||
If sequential threshold is set to 0 the sequential I/O detection is
|
||||
disabled and sequential I/O will no longer implicitly bypass the cache.
|
||||
Setting the random threshold to 0 does _not_ disable the random I/O
|
||||
stream detection.
|
||||
|
||||
Internally the mq policy determines a promotion threshold. If the hit
|
||||
count of a block not in the cache goes above this threshold it gets
|
||||
promoted to the cache. The read, write and discard promote adjustment
|
||||
tunables allow you to tweak the promotion threshold by adding a small
|
||||
value based on the io type. They default to 4, 8 and 1 respectively.
|
||||
If you're trying to quickly warm a new cache device you may wish to
|
||||
reduce these to encourage promotion. Remember to switch them back to
|
||||
their defaults after the cache fills though.
|
||||
|
||||
Stochastic multiqueue (smq)
|
||||
---------------------------
|
||||
|
||||
|
@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||
if (q->mq_ops) {
|
||||
if (blk_queue_io_stat(q))
|
||||
blk_account_io_start(rq, true);
|
||||
blk_mq_insert_request(rq, false, true, true);
|
||||
blk_mq_insert_request(rq, false, true, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -249,6 +249,7 @@ config DM_DEBUG_BLOCK_STACK_TRACING
|
||||
block manager locking used by thin provisioning and caching.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DM_BIO_PRISON
|
||||
tristate
|
||||
depends on BLK_DEV_DM
|
||||
@ -304,16 +305,6 @@ config DM_CACHE
|
||||
algorithms used to select which blocks are promoted, demoted,
|
||||
cleaned etc. It supports writeback and writethrough modes.
|
||||
|
||||
config DM_CACHE_MQ
|
||||
tristate "MQ Cache Policy (EXPERIMENTAL)"
|
||||
depends on DM_CACHE
|
||||
default y
|
||||
---help---
|
||||
A cache policy that uses a multiqueue ordered by recent hit
|
||||
count to select which blocks should be promoted and demoted.
|
||||
This is meant to be a general purpose policy. It prioritises
|
||||
reads over writes.
|
||||
|
||||
config DM_CACHE_SMQ
|
||||
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
|
||||
depends on DM_CACHE
|
||||
|
@ -12,7 +12,6 @@ dm-log-userspace-y \
|
||||
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
|
||||
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
|
||||
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
|
||||
dm-cache-mq-y += dm-cache-policy-mq.o
|
||||
dm-cache-smq-y += dm-cache-policy-smq.o
|
||||
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
|
||||
dm-era-y += dm-era-target.o
|
||||
@ -55,7 +54,6 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
|
||||
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
|
||||
obj-$(CONFIG_DM_VERITY) += dm-verity.o
|
||||
obj-$(CONFIG_DM_CACHE) += dm-cache.o
|
||||
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
|
||||
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
|
||||
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
|
||||
obj-$(CONFIG_DM_ERA) += dm-era.o
|
||||
|
@ -867,19 +867,40 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define WRITE_LOCK(cmd) \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
|
||||
#define WRITE_LOCK(cmd) \
|
||||
down_write(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_write(&cmd->root_lock); \
|
||||
return -EINVAL; \
|
||||
down_write(&cmd->root_lock)
|
||||
}
|
||||
|
||||
#define WRITE_LOCK_VOID(cmd) \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
|
||||
down_write(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_write(&cmd->root_lock); \
|
||||
return; \
|
||||
down_write(&cmd->root_lock)
|
||||
}
|
||||
|
||||
#define WRITE_UNLOCK(cmd) \
|
||||
up_write(&cmd->root_lock)
|
||||
|
||||
#define READ_LOCK(cmd) \
|
||||
down_read(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_read(&cmd->root_lock); \
|
||||
return -EINVAL; \
|
||||
}
|
||||
|
||||
#define READ_LOCK_VOID(cmd) \
|
||||
down_read(&cmd->root_lock); \
|
||||
if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
|
||||
up_read(&cmd->root_lock); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define READ_UNLOCK(cmd) \
|
||||
up_read(&cmd->root_lock)
|
||||
|
||||
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
|
||||
{
|
||||
int r;
|
||||
@ -1015,22 +1036,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
|
||||
{
|
||||
int r;
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK(cmd);
|
||||
r = __load_discards(cmd, fn, context);
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
|
||||
int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
|
||||
{
|
||||
dm_cblock_t r;
|
||||
READ_LOCK(cmd);
|
||||
*result = cmd->cache_blocks;
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
r = cmd->cache_blocks;
|
||||
up_read(&cmd->root_lock);
|
||||
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
|
||||
@ -1188,9 +1207,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
|
||||
{
|
||||
int r;
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK(cmd);
|
||||
r = __load_mappings(cmd, policy, fn, context);
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1215,18 +1234,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
|
||||
|
||||
void dm_cache_dump(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK_VOID(cmd);
|
||||
__dump_mappings(cmd);
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
}
|
||||
|
||||
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
int r;
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK(cmd);
|
||||
r = cmd->changed;
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1276,9 +1295,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
|
||||
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
|
||||
struct dm_cache_statistics *stats)
|
||||
{
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK_VOID(cmd);
|
||||
*stats = cmd->stats;
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
}
|
||||
|
||||
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
|
||||
@ -1312,9 +1331,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
|
||||
{
|
||||
int r = -EINVAL;
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK(cmd);
|
||||
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1324,9 +1343,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
|
||||
{
|
||||
int r = -EINVAL;
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
READ_LOCK(cmd);
|
||||
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
|
||||
up_read(&cmd->root_lock);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1417,7 +1436,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
|
||||
|
||||
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
|
||||
{
|
||||
return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
|
||||
int r;
|
||||
|
||||
READ_LOCK(cmd);
|
||||
r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
|
||||
@ -1440,10 +1465,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
|
||||
struct dm_block *sblock;
|
||||
struct cache_disk_superblock *disk_super;
|
||||
|
||||
/*
|
||||
* We ignore fail_io for this function.
|
||||
*/
|
||||
down_write(&cmd->root_lock);
|
||||
WRITE_LOCK(cmd);
|
||||
set_bit(NEEDS_CHECK, &cmd->flags);
|
||||
|
||||
r = superblock_lock(cmd, &sblock);
|
||||
@ -1458,19 +1480,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
out:
|
||||
up_write(&cmd->root_lock);
|
||||
WRITE_UNLOCK(cmd);
|
||||
return r;
|
||||
}
|
||||
|
||||
bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
|
||||
int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
|
||||
{
|
||||
bool needs_check;
|
||||
READ_LOCK(cmd);
|
||||
*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
|
||||
READ_UNLOCK(cmd);
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
|
||||
up_read(&cmd->root_lock);
|
||||
|
||||
return needs_check;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
|
||||
|
@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
|
||||
* origin blocks to map to.
|
||||
*/
|
||||
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
|
||||
dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
|
||||
int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
|
||||
|
||||
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
|
||||
sector_t discard_block_size,
|
||||
@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
|
||||
*/
|
||||
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
|
||||
|
||||
bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
|
||||
int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
|
||||
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
|
||||
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
|
||||
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1567,8 +1567,48 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
|
||||
spin_unlock_irqrestore(&mq->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* smq has no config values, but the old mq policy did. To avoid breaking
|
||||
* software we continue to accept these configurables for the mq policy,
|
||||
* but they have no effect.
|
||||
*/
|
||||
static int mq_set_config_value(struct dm_cache_policy *p,
|
||||
const char *key, const char *value)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
if (kstrtoul(value, 10, &tmp))
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcasecmp(key, "random_threshold") ||
|
||||
!strcasecmp(key, "sequential_threshold") ||
|
||||
!strcasecmp(key, "discard_promote_adjustment") ||
|
||||
!strcasecmp(key, "read_promote_adjustment") ||
|
||||
!strcasecmp(key, "write_promote_adjustment")) {
|
||||
DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
|
||||
unsigned maxlen, ssize_t *sz_ptr)
|
||||
{
|
||||
ssize_t sz = *sz_ptr;
|
||||
|
||||
DMEMIT("10 random_threshold 0 "
|
||||
"sequential_threshold 0 "
|
||||
"discard_promote_adjustment 0 "
|
||||
"read_promote_adjustment 0 "
|
||||
"write_promote_adjustment 0 ");
|
||||
|
||||
*sz_ptr = sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Init the policy plugin interface function pointers. */
|
||||
static void init_policy_functions(struct smq_policy *mq)
|
||||
static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
|
||||
{
|
||||
mq->policy.destroy = smq_destroy;
|
||||
mq->policy.map = smq_map;
|
||||
@ -1583,6 +1623,11 @@ static void init_policy_functions(struct smq_policy *mq)
|
||||
mq->policy.force_mapping = smq_force_mapping;
|
||||
mq->policy.residency = smq_residency;
|
||||
mq->policy.tick = smq_tick;
|
||||
|
||||
if (mimic_mq) {
|
||||
mq->policy.set_config_value = mq_set_config_value;
|
||||
mq->policy.emit_config_values = mq_emit_config_values;
|
||||
}
|
||||
}
|
||||
|
||||
static bool too_many_hotspot_blocks(sector_t origin_size,
|
||||
@ -1606,9 +1651,10 @@ static void calc_hotspot_params(sector_t origin_size,
|
||||
*hotspot_block_size /= 2u;
|
||||
}
|
||||
|
||||
static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size)
|
||||
static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size,
|
||||
bool mimic_mq)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
|
||||
@ -1618,7 +1664,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
|
||||
if (!mq)
|
||||
return NULL;
|
||||
|
||||
init_policy_functions(mq);
|
||||
init_policy_functions(mq, mimic_mq);
|
||||
mq->cache_size = cache_size;
|
||||
mq->cache_block_size = cache_block_size;
|
||||
|
||||
@ -1706,19 +1752,41 @@ bad_pool_init:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size)
|
||||
{
|
||||
return __smq_create(cache_size, origin_size, cache_block_size, false);
|
||||
}
|
||||
|
||||
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
|
||||
sector_t origin_size,
|
||||
sector_t cache_block_size)
|
||||
{
|
||||
return __smq_create(cache_size, origin_size, cache_block_size, true);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
static struct dm_cache_policy_type smq_policy_type = {
|
||||
.name = "smq",
|
||||
.version = {1, 0, 0},
|
||||
.version = {1, 5, 0},
|
||||
.hint_size = 4,
|
||||
.owner = THIS_MODULE,
|
||||
.create = smq_create
|
||||
};
|
||||
|
||||
static struct dm_cache_policy_type mq_policy_type = {
|
||||
.name = "mq",
|
||||
.version = {1, 5, 0},
|
||||
.hint_size = 4,
|
||||
.owner = THIS_MODULE,
|
||||
.create = mq_create,
|
||||
};
|
||||
|
||||
static struct dm_cache_policy_type default_policy_type = {
|
||||
.name = "default",
|
||||
.version = {1, 4, 0},
|
||||
.version = {1, 5, 0},
|
||||
.hint_size = 4,
|
||||
.owner = THIS_MODULE,
|
||||
.create = smq_create,
|
||||
@ -1735,9 +1803,17 @@ static int __init smq_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = dm_cache_policy_register(&mq_policy_type);
|
||||
if (r) {
|
||||
DMERR("register failed (as mq) %d", r);
|
||||
dm_cache_policy_unregister(&smq_policy_type);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = dm_cache_policy_register(&default_policy_type);
|
||||
if (r) {
|
||||
DMERR("register failed (as default) %d", r);
|
||||
dm_cache_policy_unregister(&mq_policy_type);
|
||||
dm_cache_policy_unregister(&smq_policy_type);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1748,6 +1824,7 @@ static int __init smq_init(void)
|
||||
static void __exit smq_exit(void)
|
||||
{
|
||||
dm_cache_policy_unregister(&smq_policy_type);
|
||||
dm_cache_policy_unregister(&mq_policy_type);
|
||||
dm_cache_policy_unregister(&default_policy_type);
|
||||
}
|
||||
|
||||
@ -1759,3 +1836,4 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("smq cache policy");
|
||||
|
||||
MODULE_ALIAS("dm-cache-default");
|
||||
MODULE_ALIAS("dm-cache-mq");
|
||||
|
@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
|
||||
|
||||
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
|
||||
{
|
||||
bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
|
||||
bool needs_check;
|
||||
enum cache_metadata_mode old_mode = get_cache_mode(cache);
|
||||
|
||||
if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
|
||||
DMERR("unable to read needs_check flag, setting failure mode");
|
||||
new_mode = CM_FAIL;
|
||||
}
|
||||
|
||||
if (new_mode == CM_WRITE && needs_check) {
|
||||
DMERR("%s: unable to switch cache to write mode until repaired.",
|
||||
cache_device_name(cache));
|
||||
@ -2771,7 +2776,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
ti->split_discard_bios = false;
|
||||
|
||||
cache->features = ca->features;
|
||||
ti->per_bio_data_size = get_per_bio_data_size(cache);
|
||||
ti->per_io_data_size = get_per_bio_data_size(cache);
|
||||
|
||||
cache->callbacks.congested_fn = cache_is_congested;
|
||||
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
|
||||
@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
|
||||
char buf[BDEVNAME_SIZE];
|
||||
struct cache *cache = ti->private;
|
||||
dm_cblock_t residency;
|
||||
bool needs_check;
|
||||
|
||||
switch (type) {
|
||||
case STATUSTYPE_INFO:
|
||||
@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
|
||||
else
|
||||
DMEMIT("rw ");
|
||||
|
||||
if (dm_cache_metadata_needs_check(cache->cmd))
|
||||
r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
|
||||
|
||||
if (r || needs_check)
|
||||
DMEMIT("needs_check ");
|
||||
else
|
||||
DMEMIT("- ");
|
||||
@ -3806,7 +3814,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
static struct target_type cache_target = {
|
||||
.name = "cache",
|
||||
.version = {1, 8, 0},
|
||||
.version = {1, 9, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = cache_ctr,
|
||||
.dtr = cache_dtr,
|
||||
|
@ -1788,7 +1788,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
cc->per_bio_data_size = ti->per_bio_data_size =
|
||||
cc->per_bio_data_size = ti->per_io_data_size =
|
||||
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
|
||||
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
|
||||
ARCH_KMALLOC_MINALIGN);
|
||||
|
@ -204,7 +204,7 @@ out:
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->per_bio_data_size = sizeof(struct dm_delay_info);
|
||||
ti->per_io_data_size = sizeof(struct dm_delay_info);
|
||||
ti->private = dc;
|
||||
return 0;
|
||||
|
||||
|
@ -220,7 +220,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->per_bio_data_size = sizeof(struct per_bio_data);
|
||||
ti->per_io_data_size = sizeof(struct per_bio_data);
|
||||
ti->private = fc;
|
||||
return 0;
|
||||
|
||||
|
@ -1291,7 +1291,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
||||
|
||||
immutable_target_type = dm_get_immutable_target_type(md);
|
||||
if (immutable_target_type &&
|
||||
(immutable_target_type != dm_table_get_immutable_target_type(t))) {
|
||||
(immutable_target_type != dm_table_get_immutable_target_type(t)) &&
|
||||
!dm_table_get_wildcard_target(t)) {
|
||||
DMWARN("can't replace immutable target type %s",
|
||||
immutable_target_type->name);
|
||||
r = -EINVAL;
|
||||
@ -1303,7 +1304,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
|
||||
dm_set_md_type(md, dm_table_get_type(t));
|
||||
|
||||
/* setup md->queue to reflect md's type (may block) */
|
||||
r = dm_setup_md_queue(md);
|
||||
r = dm_setup_md_queue(md, t);
|
||||
if (r) {
|
||||
DMWARN("unable to set up device queue for new table.");
|
||||
goto err_unlock_md_type;
|
||||
|
@ -475,7 +475,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
ti->flush_supported = true;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->discards_supported = true;
|
||||
ti->per_bio_data_size = sizeof(struct per_bio_data);
|
||||
ti->per_io_data_size = sizeof(struct per_bio_data);
|
||||
ti->private = lc;
|
||||
return 0;
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <scsi/scsi_dh.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/blk-mq.h>
|
||||
|
||||
#define DM_MSG_PREFIX "multipath"
|
||||
#define DM_PG_INIT_DELAY_MSECS 2000
|
||||
@ -33,11 +34,12 @@ struct pgpath {
|
||||
struct list_head list;
|
||||
|
||||
struct priority_group *pg; /* Owning PG */
|
||||
unsigned is_active; /* Path status */
|
||||
unsigned fail_count; /* Cumulative failure count */
|
||||
|
||||
struct dm_path path;
|
||||
struct delayed_work activate_path;
|
||||
|
||||
bool is_active:1; /* Path status */
|
||||
};
|
||||
|
||||
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
|
||||
@ -53,10 +55,10 @@ struct priority_group {
|
||||
struct path_selector ps;
|
||||
|
||||
unsigned pg_num; /* Reference number */
|
||||
unsigned bypassed; /* Temporarily bypass this PG? */
|
||||
|
||||
unsigned nr_pgpaths; /* Number of paths in PG */
|
||||
struct list_head pgpaths;
|
||||
|
||||
bool bypassed:1; /* Temporarily bypass this PG? */
|
||||
};
|
||||
|
||||
/* Multipath context */
|
||||
@ -74,21 +76,20 @@ struct multipath {
|
||||
|
||||
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
|
||||
|
||||
unsigned pg_init_required; /* pg_init needs calling? */
|
||||
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
|
||||
unsigned pg_init_delay_retry; /* Delay pg_init retry? */
|
||||
|
||||
unsigned nr_valid_paths; /* Total number of usable paths */
|
||||
struct pgpath *current_pgpath;
|
||||
struct priority_group *current_pg;
|
||||
struct priority_group *next_pg; /* Switch to this PG if set */
|
||||
unsigned repeat_count; /* I/Os left before calling PS again */
|
||||
|
||||
unsigned queue_io:1; /* Must we queue all I/O? */
|
||||
unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
|
||||
unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
|
||||
unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
|
||||
unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
|
||||
bool queue_io:1; /* Must we queue all I/O? */
|
||||
bool queue_if_no_path:1; /* Queue I/O if last path fails? */
|
||||
bool saved_queue_if_no_path:1; /* Saved state during suspension */
|
||||
bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
|
||||
bool pg_init_disabled:1; /* pg_init is not currently allowed */
|
||||
bool pg_init_required:1; /* pg_init needs calling? */
|
||||
bool pg_init_delay_retry:1; /* Delay pg_init retry? */
|
||||
|
||||
unsigned pg_init_retries; /* Number of times to retry pg_init */
|
||||
unsigned pg_init_count; /* Number of times pg_init called */
|
||||
@ -120,7 +121,6 @@ static struct kmem_cache *_mpio_cache;
|
||||
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
|
||||
static void trigger_event(struct work_struct *work);
|
||||
static void activate_path(struct work_struct *work);
|
||||
static int __pgpath_busy(struct pgpath *pgpath);
|
||||
|
||||
|
||||
/*-----------------------------------------------
|
||||
@ -132,7 +132,7 @@ static struct pgpath *alloc_pgpath(void)
|
||||
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
|
||||
|
||||
if (pgpath) {
|
||||
pgpath->is_active = 1;
|
||||
pgpath->is_active = true;
|
||||
INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
|
||||
}
|
||||
|
||||
@ -181,25 +181,31 @@ static void free_priority_group(struct priority_group *pg,
|
||||
kfree(pg);
|
||||
}
|
||||
|
||||
static struct multipath *alloc_multipath(struct dm_target *ti)
|
||||
static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
|
||||
{
|
||||
struct multipath *m;
|
||||
unsigned min_ios = dm_get_reserved_rq_based_ios();
|
||||
|
||||
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
||||
if (m) {
|
||||
INIT_LIST_HEAD(&m->priority_groups);
|
||||
spin_lock_init(&m->lock);
|
||||
m->queue_io = 1;
|
||||
m->queue_io = true;
|
||||
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
|
||||
INIT_WORK(&m->trigger_event, trigger_event);
|
||||
init_waitqueue_head(&m->pg_init_wait);
|
||||
mutex_init(&m->work_mutex);
|
||||
m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
|
||||
if (!m->mpio_pool) {
|
||||
kfree(m);
|
||||
return NULL;
|
||||
|
||||
m->mpio_pool = NULL;
|
||||
if (!use_blk_mq) {
|
||||
unsigned min_ios = dm_get_reserved_rq_based_ios();
|
||||
|
||||
m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
|
||||
if (!m->mpio_pool) {
|
||||
kfree(m);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
m->ti = ti;
|
||||
ti->private = m;
|
||||
}
|
||||
@ -222,26 +228,41 @@ static void free_multipath(struct multipath *m)
|
||||
kfree(m);
|
||||
}
|
||||
|
||||
static int set_mapinfo(struct multipath *m, union map_info *info)
|
||||
static struct dm_mpath_io *get_mpio(union map_info *info)
|
||||
{
|
||||
return info->ptr;
|
||||
}
|
||||
|
||||
static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
|
||||
{
|
||||
struct dm_mpath_io *mpio;
|
||||
|
||||
if (!m->mpio_pool) {
|
||||
/* Use blk-mq pdu memory requested via per_io_data_size */
|
||||
mpio = get_mpio(info);
|
||||
memset(mpio, 0, sizeof(*mpio));
|
||||
return mpio;
|
||||
}
|
||||
|
||||
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
|
||||
if (!mpio)
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
|
||||
memset(mpio, 0, sizeof(*mpio));
|
||||
info->ptr = mpio;
|
||||
|
||||
return 0;
|
||||
return mpio;
|
||||
}
|
||||
|
||||
static void clear_mapinfo(struct multipath *m, union map_info *info)
|
||||
static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
|
||||
{
|
||||
struct dm_mpath_io *mpio = info->ptr;
|
||||
/* Only needed for non blk-mq (.request_fn) multipath */
|
||||
if (m->mpio_pool) {
|
||||
struct dm_mpath_io *mpio = info->ptr;
|
||||
|
||||
info->ptr = NULL;
|
||||
mempool_free(mpio, m->mpio_pool);
|
||||
info->ptr = NULL;
|
||||
mempool_free(mpio, m->mpio_pool);
|
||||
}
|
||||
}
|
||||
|
||||
/*-----------------------------------------------
|
||||
@ -257,7 +278,7 @@ static int __pg_init_all_paths(struct multipath *m)
|
||||
return 0;
|
||||
|
||||
m->pg_init_count++;
|
||||
m->pg_init_required = 0;
|
||||
m->pg_init_required = false;
|
||||
|
||||
/* Check here to reset pg_init_required */
|
||||
if (!m->current_pg)
|
||||
@ -283,11 +304,11 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
|
||||
|
||||
/* Must we initialise the PG first, and queue I/O till it's ready? */
|
||||
if (m->hw_handler_name) {
|
||||
m->pg_init_required = 1;
|
||||
m->queue_io = 1;
|
||||
m->pg_init_required = true;
|
||||
m->queue_io = true;
|
||||
} else {
|
||||
m->pg_init_required = 0;
|
||||
m->queue_io = 0;
|
||||
m->pg_init_required = false;
|
||||
m->queue_io = false;
|
||||
}
|
||||
|
||||
m->pg_init_count = 0;
|
||||
@ -298,7 +319,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
|
||||
{
|
||||
struct dm_path *path;
|
||||
|
||||
path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
|
||||
path = pg->ps.type->select_path(&pg->ps, nr_bytes);
|
||||
if (!path)
|
||||
return -ENXIO;
|
||||
|
||||
@ -313,10 +334,10 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
|
||||
static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
|
||||
{
|
||||
struct priority_group *pg;
|
||||
unsigned bypassed = 1;
|
||||
bool bypassed = true;
|
||||
|
||||
if (!m->nr_valid_paths) {
|
||||
m->queue_io = 0;
|
||||
m->queue_io = false;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -344,7 +365,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
|
||||
continue;
|
||||
if (!__choose_path_in_pg(m, pg, nr_bytes)) {
|
||||
if (!bypassed)
|
||||
m->pg_init_delay_retry = 1;
|
||||
m->pg_init_delay_retry = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -380,7 +401,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context,
|
||||
struct request *rq, struct request **__clone)
|
||||
{
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
struct multipath *m = ti->private;
|
||||
int r = DM_MAPIO_REQUEUE;
|
||||
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
|
||||
struct pgpath *pgpath;
|
||||
@ -390,8 +411,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
|
||||
spin_lock_irq(&m->lock);
|
||||
|
||||
/* Do we need to select a new pgpath? */
|
||||
if (!m->current_pgpath ||
|
||||
(!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
|
||||
if (!m->current_pgpath || !m->queue_io)
|
||||
__choose_pgpath(m, nr_bytes);
|
||||
|
||||
pgpath = m->current_pgpath;
|
||||
@ -405,11 +425,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (set_mapinfo(m, map_context) < 0)
|
||||
mpio = set_mpio(m, map_context);
|
||||
if (!mpio)
|
||||
/* ENOMEM, requeue */
|
||||
goto out_unlock;
|
||||
|
||||
mpio = map_context->ptr;
|
||||
mpio->pgpath = pgpath;
|
||||
mpio->nr_bytes = nr_bytes;
|
||||
|
||||
@ -418,17 +438,24 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
|
||||
spin_unlock_irq(&m->lock);
|
||||
|
||||
if (clone) {
|
||||
/* Old request-based interface: allocated clone is passed in */
|
||||
/*
|
||||
* Old request-based interface: allocated clone is passed in.
|
||||
* Used by: .request_fn stacked on .request_fn path(s).
|
||||
*/
|
||||
clone->q = bdev_get_queue(bdev);
|
||||
clone->rq_disk = bdev->bd_disk;
|
||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
} else {
|
||||
/* blk-mq request-based interface */
|
||||
*__clone = blk_get_request(bdev_get_queue(bdev),
|
||||
rq_data_dir(rq), GFP_ATOMIC);
|
||||
/*
|
||||
* blk-mq request-based interface; used by both:
|
||||
* .request_fn stacked on blk-mq path(s) and
|
||||
* blk-mq stacked on blk-mq path(s).
|
||||
*/
|
||||
*__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
|
||||
rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(*__clone)) {
|
||||
/* ENOMEM, requeue */
|
||||
clear_mapinfo(m, map_context);
|
||||
clear_request_fn_mpio(m, map_context);
|
||||
return r;
|
||||
}
|
||||
(*__clone)->bio = (*__clone)->biotail = NULL;
|
||||
@ -463,14 +490,14 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
||||
|
||||
static void multipath_release_clone(struct request *clone)
|
||||
{
|
||||
blk_put_request(clone);
|
||||
blk_mq_free_request(clone);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we run out of usable paths, should we queue I/O or error it?
|
||||
*/
|
||||
static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
|
||||
unsigned save_old_value)
|
||||
static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
|
||||
bool save_old_value)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -776,12 +803,12 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
|
||||
argc--;
|
||||
|
||||
if (!strcasecmp(arg_name, "queue_if_no_path")) {
|
||||
r = queue_if_no_path(m, 1, 0);
|
||||
r = queue_if_no_path(m, true, false);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
|
||||
m->retain_attached_hw_handler = 1;
|
||||
m->retain_attached_hw_handler = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -820,11 +847,12 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
|
||||
struct dm_arg_set as;
|
||||
unsigned pg_count = 0;
|
||||
unsigned next_pg_num;
|
||||
bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
|
||||
|
||||
as.argc = argc;
|
||||
as.argv = argv;
|
||||
|
||||
m = alloc_multipath(ti);
|
||||
m = alloc_multipath(ti, use_blk_mq);
|
||||
if (!m) {
|
||||
ti->error = "can't allocate multipath";
|
||||
return -EINVAL;
|
||||
@ -880,6 +908,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
|
||||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->num_write_same_bios = 1;
|
||||
if (use_blk_mq)
|
||||
ti->per_io_data_size = sizeof(struct dm_mpath_io);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -917,7 +947,7 @@ static void flush_multipath_work(struct multipath *m)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
m->pg_init_disabled = 1;
|
||||
m->pg_init_disabled = true;
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
flush_workqueue(kmpath_handlerd);
|
||||
@ -926,7 +956,7 @@ static void flush_multipath_work(struct multipath *m)
|
||||
flush_work(&m->trigger_event);
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
m->pg_init_disabled = 0;
|
||||
m->pg_init_disabled = false;
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
}
|
||||
|
||||
@ -954,7 +984,7 @@ static int fail_path(struct pgpath *pgpath)
|
||||
DMWARN("Failing path %s.", pgpath->path.dev->name);
|
||||
|
||||
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
|
||||
pgpath->is_active = 0;
|
||||
pgpath->is_active = false;
|
||||
pgpath->fail_count++;
|
||||
|
||||
m->nr_valid_paths--;
|
||||
@ -987,18 +1017,13 @@ static int reinstate_path(struct pgpath *pgpath)
|
||||
if (pgpath->is_active)
|
||||
goto out;
|
||||
|
||||
if (!pgpath->pg->ps.type->reinstate_path) {
|
||||
DMWARN("Reinstate path not supported by path selector %s",
|
||||
pgpath->pg->ps.type->name);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
DMWARN("Reinstating path %s.", pgpath->path.dev->name);
|
||||
|
||||
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
pgpath->is_active = 1;
|
||||
pgpath->is_active = true;
|
||||
|
||||
if (!m->nr_valid_paths++) {
|
||||
m->current_pgpath = NULL;
|
||||
@ -1045,7 +1070,7 @@ static int action_dev(struct multipath *m, struct dm_dev *dev,
|
||||
* Temporarily try to avoid having to use the specified PG
|
||||
*/
|
||||
static void bypass_pg(struct multipath *m, struct priority_group *pg,
|
||||
int bypassed)
|
||||
bool bypassed)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -1078,7 +1103,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
list_for_each_entry(pg, &m->priority_groups, list) {
|
||||
pg->bypassed = 0;
|
||||
pg->bypassed = false;
|
||||
if (--pgnum)
|
||||
continue;
|
||||
|
||||
@ -1096,7 +1121,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
|
||||
* Set/clear bypassed status of a PG.
|
||||
* PGs are numbered upwards from 1 in the order they were declared.
|
||||
*/
|
||||
static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
|
||||
static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
|
||||
{
|
||||
struct priority_group *pg;
|
||||
unsigned pgnum;
|
||||
@ -1120,17 +1145,17 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
|
||||
/*
|
||||
* Should we retry pg_init immediately?
|
||||
*/
|
||||
static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
|
||||
static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
|
||||
{
|
||||
unsigned long flags;
|
||||
int limit_reached = 0;
|
||||
bool limit_reached = false;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
|
||||
if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
|
||||
m->pg_init_required = 1;
|
||||
m->pg_init_required = true;
|
||||
else
|
||||
limit_reached = 1;
|
||||
limit_reached = true;
|
||||
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
@ -1143,7 +1168,7 @@ static void pg_init_done(void *data, int errors)
|
||||
struct priority_group *pg = pgpath->pg;
|
||||
struct multipath *m = pg->m;
|
||||
unsigned long flags;
|
||||
unsigned delay_retry = 0;
|
||||
bool delay_retry = false;
|
||||
|
||||
/* device or driver problems */
|
||||
switch (errors) {
|
||||
@ -1166,7 +1191,7 @@ static void pg_init_done(void *data, int errors)
|
||||
* Probably doing something like FW upgrade on the
|
||||
* controller so try the other pg.
|
||||
*/
|
||||
bypass_pg(m, pg, 1);
|
||||
bypass_pg(m, pg, true);
|
||||
break;
|
||||
case SCSI_DH_RETRY:
|
||||
/* Wait before retrying. */
|
||||
@ -1177,6 +1202,7 @@ static void pg_init_done(void *data, int errors)
|
||||
fail_path(pgpath);
|
||||
errors = 0;
|
||||
break;
|
||||
case SCSI_DH_DEV_OFFLINED:
|
||||
default:
|
||||
/*
|
||||
* We probably do not want to fail the path for a device
|
||||
@ -1194,7 +1220,7 @@ static void pg_init_done(void *data, int errors)
|
||||
m->current_pg = NULL;
|
||||
}
|
||||
} else if (!m->pg_init_required)
|
||||
pg->bypassed = 0;
|
||||
pg->bypassed = false;
|
||||
|
||||
if (--m->pg_init_in_progress)
|
||||
/* Activations of other paths are still on going */
|
||||
@ -1205,7 +1231,7 @@ static void pg_init_done(void *data, int errors)
|
||||
if (__pg_init_all_paths(m))
|
||||
goto out;
|
||||
}
|
||||
m->queue_io = 0;
|
||||
m->queue_io = false;
|
||||
|
||||
/*
|
||||
* Wake up any thread waiting to suspend.
|
||||
@ -1291,21 +1317,21 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
|
||||
int error, union map_info *map_context)
|
||||
{
|
||||
struct multipath *m = ti->private;
|
||||
struct dm_mpath_io *mpio = map_context->ptr;
|
||||
struct dm_mpath_io *mpio = get_mpio(map_context);
|
||||
struct pgpath *pgpath;
|
||||
struct path_selector *ps;
|
||||
int r;
|
||||
|
||||
BUG_ON(!mpio);
|
||||
|
||||
r = do_end_io(m, clone, error, mpio);
|
||||
r = do_end_io(m, clone, error, mpio);
|
||||
pgpath = mpio->pgpath;
|
||||
if (pgpath) {
|
||||
ps = &pgpath->pg->ps;
|
||||
if (ps->type->end_io)
|
||||
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
|
||||
}
|
||||
clear_mapinfo(m, map_context);
|
||||
clear_request_fn_mpio(m, map_context);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1318,9 +1344,9 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
|
||||
*/
|
||||
static void multipath_presuspend(struct dm_target *ti)
|
||||
{
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
struct multipath *m = ti->private;
|
||||
|
||||
queue_if_no_path(m, 0, 1);
|
||||
queue_if_no_path(m, false, true);
|
||||
}
|
||||
|
||||
static void multipath_postsuspend(struct dm_target *ti)
|
||||
@ -1337,7 +1363,7 @@ static void multipath_postsuspend(struct dm_target *ti)
|
||||
*/
|
||||
static void multipath_resume(struct dm_target *ti)
|
||||
{
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
struct multipath *m = ti->private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
@ -1366,7 +1392,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
|
||||
{
|
||||
int sz = 0;
|
||||
unsigned long flags;
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
struct multipath *m = ti->private;
|
||||
struct priority_group *pg;
|
||||
struct pgpath *p;
|
||||
unsigned pg_num;
|
||||
@ -1474,7 +1500,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
||||
{
|
||||
int r = -EINVAL;
|
||||
struct dm_dev *dev;
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
struct multipath *m = ti->private;
|
||||
action_fn action;
|
||||
|
||||
mutex_lock(&m->work_mutex);
|
||||
@ -1486,10 +1512,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
||||
|
||||
if (argc == 1) {
|
||||
if (!strcasecmp(argv[0], "queue_if_no_path")) {
|
||||
r = queue_if_no_path(m, 1, 0);
|
||||
r = queue_if_no_path(m, true, false);
|
||||
goto out;
|
||||
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
|
||||
r = queue_if_no_path(m, 0, 0);
|
||||
r = queue_if_no_path(m, false, false);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -1500,10 +1526,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
||||
}
|
||||
|
||||
if (!strcasecmp(argv[0], "disable_group")) {
|
||||
r = bypass_pg_num(m, argv[1], 1);
|
||||
r = bypass_pg_num(m, argv[1], true);
|
||||
goto out;
|
||||
} else if (!strcasecmp(argv[0], "enable_group")) {
|
||||
r = bypass_pg_num(m, argv[1], 0);
|
||||
r = bypass_pg_num(m, argv[1], false);
|
||||
goto out;
|
||||
} else if (!strcasecmp(argv[0], "switch_group")) {
|
||||
r = switch_pg_num(m, argv[1]);
|
||||
@ -1604,7 +1630,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __pgpath_busy(struct pgpath *pgpath)
|
||||
static int pgpath_busy(struct pgpath *pgpath)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
|
||||
|
||||
@ -1621,7 +1647,7 @@ static int __pgpath_busy(struct pgpath *pgpath)
|
||||
*/
|
||||
static int multipath_busy(struct dm_target *ti)
|
||||
{
|
||||
int busy = 0, has_active = 0;
|
||||
bool busy = false, has_active = false;
|
||||
struct multipath *m = ti->private;
|
||||
struct priority_group *pg;
|
||||
struct pgpath *pgpath;
|
||||
@ -1632,7 +1658,7 @@ static int multipath_busy(struct dm_target *ti)
|
||||
/* pg_init in progress or no paths available */
|
||||
if (m->pg_init_in_progress ||
|
||||
(!m->nr_valid_paths && m->queue_if_no_path)) {
|
||||
busy = 1;
|
||||
busy = true;
|
||||
goto out;
|
||||
}
|
||||
/* Guess which priority_group will be used at next mapping time */
|
||||
@ -1654,13 +1680,12 @@ static int multipath_busy(struct dm_target *ti)
|
||||
* If there is one non-busy active path at least, the path selector
|
||||
* will be able to select it. So we consider such a pg as not busy.
|
||||
*/
|
||||
busy = 1;
|
||||
busy = true;
|
||||
list_for_each_entry(pgpath, &pg->pgpaths, list)
|
||||
if (pgpath->is_active) {
|
||||
has_active = 1;
|
||||
|
||||
if (!__pgpath_busy(pgpath)) {
|
||||
busy = 0;
|
||||
has_active = true;
|
||||
if (!pgpath_busy(pgpath)) {
|
||||
busy = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1671,7 +1696,7 @@ static int multipath_busy(struct dm_target *ti)
|
||||
* the current_pg will be changed at next mapping time.
|
||||
* We need to try mapping to determine it.
|
||||
*/
|
||||
busy = 0;
|
||||
busy = false;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
@ -1684,7 +1709,8 @@ out:
|
||||
*---------------------------------------------------------------*/
|
||||
static struct target_type multipath_target = {
|
||||
.name = "multipath",
|
||||
.version = {1, 10, 0},
|
||||
.version = {1, 11, 0},
|
||||
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
|
||||
.module = THIS_MODULE,
|
||||
.ctr = multipath_ctr,
|
||||
.dtr = multipath_dtr,
|
||||
|
@ -50,13 +50,8 @@ struct path_selector_type {
|
||||
/*
|
||||
* Chooses a path for this io, if no paths are available then
|
||||
* NULL will be returned.
|
||||
*
|
||||
* repeat_count is the number of times to use the path before
|
||||
* calling the function again. 0 means don't call it again unless
|
||||
* the path fails.
|
||||
*/
|
||||
struct dm_path *(*select_path) (struct path_selector *ps,
|
||||
unsigned *repeat_count,
|
||||
size_t nr_bytes);
|
||||
|
||||
/*
|
||||
|
@ -23,12 +23,13 @@
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#define DM_MSG_PREFIX "multipath queue-length"
|
||||
#define QL_MIN_IO 128
|
||||
#define QL_VERSION "0.1.0"
|
||||
#define QL_MIN_IO 1
|
||||
#define QL_VERSION "0.2.0"
|
||||
|
||||
struct selector {
|
||||
struct list_head valid_paths;
|
||||
struct list_head failed_paths;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct path_info {
|
||||
@ -45,6 +46,7 @@ static struct selector *alloc_selector(void)
|
||||
if (s) {
|
||||
INIT_LIST_HEAD(&s->valid_paths);
|
||||
INIT_LIST_HEAD(&s->failed_paths);
|
||||
spin_lock_init(&s->lock);
|
||||
}
|
||||
|
||||
return s;
|
||||
@ -113,6 +115,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
struct path_info *pi;
|
||||
unsigned repeat_count = QL_MIN_IO;
|
||||
char dummy;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Arguments: [<repeat_count>]
|
||||
@ -129,6 +132,11 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (repeat_count > 1) {
|
||||
DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
|
||||
repeat_count = 1;
|
||||
}
|
||||
|
||||
/* Allocate the path information structure */
|
||||
pi = kmalloc(sizeof(*pi), GFP_KERNEL);
|
||||
if (!pi) {
|
||||
@ -142,7 +150,9 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
|
||||
path->pscontext = pi;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_add_tail(&pi->list, &s->valid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -151,16 +161,22 @@ static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
|
||||
{
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = path->pscontext;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_move(&pi->list, &s->failed_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
}
|
||||
|
||||
static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
|
||||
{
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = path->pscontext;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_move_tail(&pi->list, &s->valid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -168,14 +184,16 @@ static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
|
||||
/*
|
||||
* Select a path having the minimum number of in-flight I/Os
|
||||
*/
|
||||
static struct dm_path *ql_select_path(struct path_selector *ps,
|
||||
unsigned *repeat_count, size_t nr_bytes)
|
||||
static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
|
||||
{
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = NULL, *best = NULL;
|
||||
struct dm_path *ret = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
if (list_empty(&s->valid_paths))
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
/* Change preferred (first in list) path to evenly balance. */
|
||||
list_move_tail(s->valid_paths.next, &s->valid_paths);
|
||||
@ -190,11 +208,12 @@ static struct dm_path *ql_select_path(struct path_selector *ps,
|
||||
}
|
||||
|
||||
if (!best)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
*repeat_count = best->repeat_count;
|
||||
|
||||
return best->path;
|
||||
ret = best->path;
|
||||
out:
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ql_start_io(struct path_selector *ps, struct dm_path *path,
|
||||
|
@ -1121,7 +1121,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->num_discard_bios = 1;
|
||||
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
|
||||
ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
|
||||
ti->discard_zeroes_data_unsupported = true;
|
||||
|
||||
ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#define DM_MSG_PREFIX "multipath round-robin"
|
||||
#define RR_MIN_IO 1000
|
||||
#define RR_VERSION "1.1.0"
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Path-handling code, paths are held in lists
|
||||
@ -41,23 +43,48 @@ static void free_paths(struct list_head *paths)
|
||||
* Round-robin selector
|
||||
*---------------------------------------------------------------*/
|
||||
|
||||
#define RR_MIN_IO 1000
|
||||
|
||||
struct selector {
|
||||
struct list_head valid_paths;
|
||||
struct list_head invalid_paths;
|
||||
spinlock_t lock;
|
||||
struct dm_path * __percpu *current_path;
|
||||
struct percpu_counter repeat_count;
|
||||
};
|
||||
|
||||
static void set_percpu_current_path(struct selector *s, struct dm_path *path)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
*per_cpu_ptr(s->current_path, cpu) = path;
|
||||
}
|
||||
|
||||
static struct selector *alloc_selector(void)
|
||||
{
|
||||
struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
|
||||
if (s) {
|
||||
INIT_LIST_HEAD(&s->valid_paths);
|
||||
INIT_LIST_HEAD(&s->invalid_paths);
|
||||
}
|
||||
if (!s)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&s->valid_paths);
|
||||
INIT_LIST_HEAD(&s->invalid_paths);
|
||||
spin_lock_init(&s->lock);
|
||||
|
||||
s->current_path = alloc_percpu(struct dm_path *);
|
||||
if (!s->current_path)
|
||||
goto out_current_path;
|
||||
set_percpu_current_path(s, NULL);
|
||||
|
||||
if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
|
||||
goto out_repeat_count;
|
||||
|
||||
return s;
|
||||
|
||||
out_repeat_count:
|
||||
free_percpu(s->current_path);
|
||||
out_current_path:
|
||||
kfree(s);
|
||||
return NULL;;
|
||||
}
|
||||
|
||||
static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
|
||||
@ -74,10 +101,12 @@ static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
|
||||
|
||||
static void rr_destroy(struct path_selector *ps)
|
||||
{
|
||||
struct selector *s = (struct selector *) ps->context;
|
||||
struct selector *s = ps->context;
|
||||
|
||||
free_paths(&s->valid_paths);
|
||||
free_paths(&s->invalid_paths);
|
||||
free_percpu(s->current_path);
|
||||
percpu_counter_destroy(&s->repeat_count);
|
||||
kfree(s);
|
||||
ps->context = NULL;
|
||||
}
|
||||
@ -111,10 +140,11 @@ static int rr_status(struct path_selector *ps, struct dm_path *path,
|
||||
static int rr_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
int argc, char **argv, char **error)
|
||||
{
|
||||
struct selector *s = (struct selector *) ps->context;
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi;
|
||||
unsigned repeat_count = RR_MIN_IO;
|
||||
char dummy;
|
||||
unsigned long flags;
|
||||
|
||||
if (argc > 1) {
|
||||
*error = "round-robin ps: incorrect number of arguments";
|
||||
@ -139,42 +169,65 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
|
||||
path->pscontext = pi;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_add_tail(&pi->list, &s->valid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
|
||||
{
|
||||
struct selector *s = (struct selector *) ps->context;
|
||||
unsigned long flags;
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = p->pscontext;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
if (p == *this_cpu_ptr(s->current_path))
|
||||
set_percpu_current_path(s, NULL);
|
||||
|
||||
list_move(&pi->list, &s->invalid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
}
|
||||
|
||||
static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
|
||||
{
|
||||
struct selector *s = (struct selector *) ps->context;
|
||||
unsigned long flags;
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = p->pscontext;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_move(&pi->list, &s->valid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dm_path *rr_select_path(struct path_selector *ps,
|
||||
unsigned *repeat_count, size_t nr_bytes)
|
||||
static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
|
||||
{
|
||||
struct selector *s = (struct selector *) ps->context;
|
||||
unsigned long flags;
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = NULL;
|
||||
struct dm_path *current_path = NULL;
|
||||
|
||||
current_path = *this_cpu_ptr(s->current_path);
|
||||
if (current_path) {
|
||||
percpu_counter_dec(&s->repeat_count);
|
||||
if (percpu_counter_read_positive(&s->repeat_count) > 0)
|
||||
return current_path;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
if (!list_empty(&s->valid_paths)) {
|
||||
pi = list_entry(s->valid_paths.next, struct path_info, list);
|
||||
list_move_tail(&pi->list, &s->valid_paths);
|
||||
*repeat_count = pi->repeat_count;
|
||||
percpu_counter_set(&s->repeat_count, pi->repeat_count);
|
||||
set_percpu_current_path(s, pi->path);
|
||||
current_path = pi->path;
|
||||
}
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return pi ? pi->path : NULL;
|
||||
return current_path;
|
||||
}
|
||||
|
||||
static struct path_selector_type rr_ps = {
|
||||
@ -198,7 +251,7 @@ static int __init dm_rr_init(void)
|
||||
if (r < 0)
|
||||
DMERR("register failed %d", r);
|
||||
|
||||
DMINFO("version 1.0.0 loaded");
|
||||
DMINFO("version " RR_VERSION " loaded");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -19,11 +19,12 @@
|
||||
#define ST_MAX_RELATIVE_THROUGHPUT 100
|
||||
#define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7
|
||||
#define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT)
|
||||
#define ST_VERSION "0.2.0"
|
||||
#define ST_VERSION "0.3.0"
|
||||
|
||||
struct selector {
|
||||
struct list_head valid_paths;
|
||||
struct list_head failed_paths;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct path_info {
|
||||
@ -41,6 +42,7 @@ static struct selector *alloc_selector(void)
|
||||
if (s) {
|
||||
INIT_LIST_HEAD(&s->valid_paths);
|
||||
INIT_LIST_HEAD(&s->failed_paths);
|
||||
spin_lock_init(&s->lock);
|
||||
}
|
||||
|
||||
return s;
|
||||
@ -111,6 +113,7 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
unsigned repeat_count = ST_MIN_IO;
|
||||
unsigned relative_throughput = 1;
|
||||
char dummy;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Arguments: [<repeat_count> [<relative_throughput>]]
|
||||
@ -134,6 +137,11 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (repeat_count > 1) {
|
||||
DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
|
||||
repeat_count = 1;
|
||||
}
|
||||
|
||||
if ((argc == 2) &&
|
||||
(sscanf(argv[1], "%u%c", &relative_throughput, &dummy) != 1 ||
|
||||
relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
|
||||
@ -155,7 +163,9 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
|
||||
|
||||
path->pscontext = pi;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_add_tail(&pi->list, &s->valid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -164,16 +174,22 @@ static void st_fail_path(struct path_selector *ps, struct dm_path *path)
|
||||
{
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = path->pscontext;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_move(&pi->list, &s->failed_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
}
|
||||
|
||||
static int st_reinstate_path(struct path_selector *ps, struct dm_path *path)
|
||||
{
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = path->pscontext;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
list_move_tail(&pi->list, &s->valid_paths);
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -255,14 +271,16 @@ static int st_compare_load(struct path_info *pi1, struct path_info *pi2,
|
||||
return pi2->relative_throughput - pi1->relative_throughput;
|
||||
}
|
||||
|
||||
static struct dm_path *st_select_path(struct path_selector *ps,
|
||||
unsigned *repeat_count, size_t nr_bytes)
|
||||
static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
|
||||
{
|
||||
struct selector *s = ps->context;
|
||||
struct path_info *pi = NULL, *best = NULL;
|
||||
struct dm_path *ret = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&s->lock, flags);
|
||||
if (list_empty(&s->valid_paths))
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
/* Change preferred (first in list) path to evenly balance. */
|
||||
list_move_tail(s->valid_paths.next, &s->valid_paths);
|
||||
@ -272,11 +290,12 @@ static struct dm_path *st_select_path(struct path_selector *ps,
|
||||
best = pi;
|
||||
|
||||
if (!best)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
*repeat_count = best->repeat_count;
|
||||
|
||||
return best->path;
|
||||
ret = best->path;
|
||||
out:
|
||||
spin_unlock_irqrestore(&s->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int st_start_io(struct path_selector *ps, struct dm_path *path,
|
||||
|
@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
int i;
|
||||
int r = -EINVAL;
|
||||
char *origin_path, *cow_path;
|
||||
dev_t origin_dev, cow_dev;
|
||||
unsigned args_used, num_flush_bios = 1;
|
||||
fmode_t origin_mode = FMODE_READ;
|
||||
|
||||
@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
ti->error = "Cannot get origin device";
|
||||
goto bad_origin;
|
||||
}
|
||||
origin_dev = s->origin->bdev->bd_dev;
|
||||
|
||||
cow_path = argv[0];
|
||||
argv++;
|
||||
argc--;
|
||||
|
||||
cow_dev = dm_get_dev_t(cow_path);
|
||||
if (cow_dev && cow_dev == origin_dev) {
|
||||
ti->error = "COW device cannot be the same as origin device";
|
||||
r = -EINVAL;
|
||||
goto bad_cow;
|
||||
}
|
||||
|
||||
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
|
||||
if (r) {
|
||||
ti->error = "Cannot get COW device";
|
||||
@ -1201,7 +1210,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
|
||||
ti->private = s;
|
||||
ti->num_flush_bios = num_flush_bios;
|
||||
ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
|
||||
ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
|
||||
|
||||
/* Add snapshot to the list of snapshots for this origin */
|
||||
/* Exceptions aren't triggered till snapshot_resume() is called */
|
||||
|
@ -364,6 +364,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert the path to a device
|
||||
*/
|
||||
dev_t dm_get_dev_t(const char *path)
|
||||
{
|
||||
dev_t uninitialized_var(dev);
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = lookup_bdev(path);
|
||||
if (IS_ERR(bdev))
|
||||
dev = name_to_dev_t(path);
|
||||
else {
|
||||
dev = bdev->bd_dev;
|
||||
bdput(bdev);
|
||||
}
|
||||
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_get_dev_t);
|
||||
|
||||
/*
|
||||
* Add a device to the list, or just increment the usage count if
|
||||
* it's already present.
|
||||
@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
|
||||
struct dm_dev **result)
|
||||
{
|
||||
int r;
|
||||
dev_t uninitialized_var(dev);
|
||||
dev_t dev;
|
||||
struct dm_dev_internal *dd;
|
||||
struct dm_table *t = ti->table;
|
||||
struct block_device *bdev;
|
||||
|
||||
BUG_ON(!t);
|
||||
|
||||
/* convert the path to a device */
|
||||
bdev = lookup_bdev(path);
|
||||
if (IS_ERR(bdev)) {
|
||||
dev = name_to_dev_t(path);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
} else {
|
||||
dev = bdev->bd_dev;
|
||||
bdput(bdev);
|
||||
}
|
||||
dev = dm_get_dev_t(path);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
dd = find_device(&t->devices, dev);
|
||||
if (!dd) {
|
||||
@ -920,6 +932,30 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
|
||||
return t->immutable_target_type;
|
||||
}
|
||||
|
||||
struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
|
||||
{
|
||||
/* Immutable target is implicitly a singleton */
|
||||
if (t->num_targets > 1 ||
|
||||
!dm_target_is_immutable(t->targets[0].type))
|
||||
return NULL;
|
||||
|
||||
return t->targets;
|
||||
}
|
||||
|
||||
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *uninitialized_var(ti);
|
||||
unsigned i = 0;
|
||||
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
if (dm_target_is_wildcard(ti->type))
|
||||
return ti;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool dm_table_request_based(struct dm_table *t)
|
||||
{
|
||||
return __table_type_request_based(dm_table_get_type(t));
|
||||
@ -933,7 +969,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
|
||||
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
|
||||
{
|
||||
unsigned type = dm_table_get_type(t);
|
||||
unsigned per_bio_data_size = 0;
|
||||
unsigned per_io_data_size = 0;
|
||||
struct dm_target *tgt;
|
||||
unsigned i;
|
||||
|
||||
@ -945,10 +981,10 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
|
||||
if (type == DM_TYPE_BIO_BASED)
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
tgt = t->targets + i;
|
||||
per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
|
||||
per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
|
||||
}
|
||||
|
||||
t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
|
||||
t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
|
||||
if (!t->mempools)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -150,7 +150,8 @@ static void io_err_release_clone_rq(struct request *clone)
|
||||
|
||||
static struct target_type error_target = {
|
||||
.name = "error",
|
||||
.version = {1, 3, 0},
|
||||
.version = {1, 4, 0},
|
||||
.features = DM_TARGET_WILDCARD,
|
||||
.ctr = io_err_ctr,
|
||||
.dtr = io_err_dtr,
|
||||
.map = io_err_map,
|
||||
|
@ -344,7 +344,7 @@ static void subtree_dec(void *context, const void *value)
|
||||
memcpy(&root_le, value, sizeof(root_le));
|
||||
root = le64_to_cpu(root_le);
|
||||
if (dm_btree_del(info, root))
|
||||
DMERR("btree delete failed\n");
|
||||
DMERR("btree delete failed");
|
||||
}
|
||||
|
||||
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
|
||||
@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
|
||||
|
||||
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
dm_tm_issue_prefetches(pmd->tm);
|
||||
down_read(&pmd->root_lock);
|
||||
if (!pmd->fail_io)
|
||||
dm_tm_issue_prefetches(pmd->tm);
|
||||
up_read(&pmd->root_lock);
|
||||
}
|
||||
|
@ -235,6 +235,7 @@ struct pool {
|
||||
struct pool_features pf;
|
||||
bool low_water_triggered:1; /* A dm event has been sent */
|
||||
bool suspended:1;
|
||||
bool out_of_data_space:1;
|
||||
|
||||
struct dm_bio_prison *prison;
|
||||
struct dm_kcopyd_client *copier;
|
||||
@ -461,9 +462,16 @@ static void cell_error_with_code(struct pool *pool,
|
||||
dm_bio_prison_free_cell(pool->prison, cell);
|
||||
}
|
||||
|
||||
static int get_pool_io_error_code(struct pool *pool)
|
||||
{
|
||||
return pool->out_of_data_space ? -ENOSPC : -EIO;
|
||||
}
|
||||
|
||||
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
{
|
||||
cell_error_with_code(pool, cell, -EIO);
|
||||
int error = get_pool_io_error_code(pool);
|
||||
|
||||
cell_error_with_code(pool, cell, error);
|
||||
}
|
||||
|
||||
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
@ -622,7 +630,9 @@ static void error_retry_list_with_code(struct pool *pool, int error)
|
||||
|
||||
static void error_retry_list(struct pool *pool)
|
||||
{
|
||||
return error_retry_list_with_code(pool, -EIO);
|
||||
int error = get_pool_io_error_code(pool);
|
||||
|
||||
return error_retry_list_with_code(pool, error);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2419,6 +2429,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
||||
*/
|
||||
if (old_mode != new_mode)
|
||||
notify_of_pool_mode_change_to_oods(pool);
|
||||
pool->out_of_data_space = true;
|
||||
pool->process_bio = process_bio_read_only;
|
||||
pool->process_discard = process_discard_bio;
|
||||
pool->process_cell = process_cell_read_only;
|
||||
@ -2432,6 +2443,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
|
||||
case PM_WRITE:
|
||||
if (old_mode != new_mode)
|
||||
notify_of_pool_mode_change(pool, "write");
|
||||
pool->out_of_data_space = false;
|
||||
pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
|
||||
dm_pool_metadata_read_write(pool->pmd);
|
||||
pool->process_bio = process_bio;
|
||||
@ -2832,6 +2844,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
||||
INIT_LIST_HEAD(&pool->active_thins);
|
||||
pool->low_water_triggered = false;
|
||||
pool->suspended = true;
|
||||
pool->out_of_data_space = false;
|
||||
|
||||
pool->shared_read_ds = dm_deferred_set_create();
|
||||
if (!pool->shared_read_ds) {
|
||||
@ -3886,7 +3899,7 @@ static struct target_type pool_target = {
|
||||
.name = "thin-pool",
|
||||
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
||||
DM_TARGET_IMMUTABLE,
|
||||
.version = {1, 17, 0},
|
||||
.version = {1, 18, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = pool_ctr,
|
||||
.dtr = pool_dtr,
|
||||
@ -4037,7 +4050,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->flush_supported = true;
|
||||
ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
|
||||
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
|
||||
|
||||
/* In case the pool supports discards, pass them on. */
|
||||
ti->discard_zeroes_data_unsupported = true;
|
||||
@ -4260,7 +4273,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
static struct target_type thin_target = {
|
||||
.name = "thin",
|
||||
.version = {1, 17, 0},
|
||||
.version = {1, 18, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = thin_ctr,
|
||||
.dtr = thin_dtr,
|
||||
|
@ -812,7 +812,7 @@ int verity_fec_ctr(struct dm_verity *v)
|
||||
}
|
||||
|
||||
/* Reserve space for our per-bio data */
|
||||
ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
|
||||
ti->per_io_data_size += sizeof(struct dm_verity_fec_io);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
|
||||
size_t len))
|
||||
{
|
||||
unsigned todo = 1 << v->data_dev_block_bits;
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||
|
||||
do {
|
||||
int r;
|
||||
@ -460,7 +460,7 @@ static int verity_verify_io(struct dm_verity_io *io)
|
||||
static void verity_finish_io(struct dm_verity_io *io, int error)
|
||||
{
|
||||
struct dm_verity *v = io->v;
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
|
||||
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
|
||||
|
||||
bio->bi_end_io = io->orig_bi_end_io;
|
||||
bio->bi_error = error;
|
||||
@ -574,7 +574,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
|
||||
if (bio_data_dir(bio) == WRITE)
|
||||
return -EIO;
|
||||
|
||||
io = dm_per_bio_data(bio, ti->per_bio_data_size);
|
||||
io = dm_per_bio_data(bio, ti->per_io_data_size);
|
||||
io->v = v;
|
||||
io->orig_bi_end_io = bio->bi_end_io;
|
||||
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
|
||||
@ -1036,15 +1036,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
ti->per_bio_data_size = sizeof(struct dm_verity_io) +
|
||||
ti->per_io_data_size = sizeof(struct dm_verity_io) +
|
||||
v->shash_descsize + v->digest_size * 2;
|
||||
|
||||
r = verity_fec_ctr(v);
|
||||
if (r)
|
||||
goto bad;
|
||||
|
||||
ti->per_bio_data_size = roundup(ti->per_bio_data_size,
|
||||
__alignof__(struct dm_verity_io));
|
||||
ti->per_io_data_size = roundup(ti->per_io_data_size,
|
||||
__alignof__(struct dm_verity_io));
|
||||
|
||||
return 0;
|
||||
|
||||
|
600
drivers/md/dm.c
600
drivers/md/dm.c
File diff suppressed because it is too large
Load Diff
@ -73,6 +73,8 @@ int dm_table_resume_targets(struct dm_table *t);
|
||||
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
|
||||
unsigned dm_table_get_type(struct dm_table *t);
|
||||
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
|
||||
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
|
||||
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
|
||||
bool dm_table_request_based(struct dm_table *t);
|
||||
bool dm_table_mq_request_based(struct dm_table *t);
|
||||
void dm_table_free_md_mempools(struct dm_table *t);
|
||||
@ -84,7 +86,7 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
|
||||
unsigned dm_get_md_type(struct mapped_device *md);
|
||||
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
|
||||
|
||||
int dm_setup_md_queue(struct mapped_device *md);
|
||||
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
|
||||
|
||||
/*
|
||||
* To check the return value from dm_table_find_target().
|
||||
|
@ -124,6 +124,8 @@ struct dm_dev {
|
||||
char name[16];
|
||||
};
|
||||
|
||||
dev_t dm_get_dev_t(const char *path);
|
||||
|
||||
/*
|
||||
* Constructors should call these functions to ensure destination devices
|
||||
* are opened/closed correctly.
|
||||
@ -189,6 +191,13 @@ struct target_type {
|
||||
#define DM_TARGET_IMMUTABLE 0x00000004
|
||||
#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
|
||||
|
||||
/*
|
||||
* Indicates that a target may replace any target; even immutable targets.
|
||||
* .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
|
||||
*/
|
||||
#define DM_TARGET_WILDCARD 0x00000008
|
||||
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
|
||||
|
||||
/*
|
||||
* Some targets need to be sent the same WRITE bio severals times so
|
||||
* that they can send copies of it to different devices. This function
|
||||
@ -231,10 +240,10 @@ struct dm_target {
|
||||
unsigned num_write_same_bios;
|
||||
|
||||
/*
|
||||
* The minimum number of extra bytes allocated in each bio for the
|
||||
* target to use. dm_per_bio_data returns the data location.
|
||||
* The minimum number of extra bytes allocated in each io for the
|
||||
* target to use.
|
||||
*/
|
||||
unsigned per_bio_data_size;
|
||||
unsigned per_io_data_size;
|
||||
|
||||
/*
|
||||
* If defined, this function is called to find out how many
|
||||
|
Loading…
Reference in New Issue
Block a user