2023-01-25 20:00:44 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2003 Sistina Software Limited.
|
|
|
|
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
2008-10-21 16:44:59 +00:00
|
|
|
#include <linux/device-mapper.h>
|
|
|
|
|
2016-05-12 20:28:10 +00:00
|
|
|
#include "dm-rq.h"
|
2016-05-19 20:15:14 +00:00
|
|
|
#include "dm-bio-record.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "dm-path-selector.h"
|
2007-10-19 21:48:02 +00:00
|
|
|
#include "dm-uevent.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-12-18 02:08:12 +00:00
|
|
|
#include <linux/blkdev.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mempool.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/time.h>
|
2020-01-13 22:41:27 +00:00
|
|
|
#include <linux/timer.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/workqueue.h>
|
2012-06-02 23:29:58 +00:00
|
|
|
#include <linux/delay.h>
|
2008-05-01 21:50:11 +00:00
|
|
|
#include <scsi/scsi_dh.h>
|
2011-07-26 23:09:06 +00:00
|
|
|
#include <linux/atomic.h>
|
2016-01-31 22:38:28 +00:00
|
|
|
#include <linux/blk-mq.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-04-20 05:12:26 +00:00
|
|
|
static struct workqueue_struct *dm_mpath_wq;
|
|
|
|
|
2006-06-26 07:27:35 +00:00
|
|
|
#define DM_MSG_PREFIX "multipath"
|
2011-01-13 20:00:01 +00:00
|
|
|
#define DM_PG_INIT_DELAY_MSECS 2000
|
2023-01-25 20:14:58 +00:00
|
|
|
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
|
2020-01-13 22:41:27 +00:00
|
|
|
#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
|
|
|
|
|
|
|
|
static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Path properties */
|
|
|
|
struct pgpath {
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
struct priority_group *pg; /* Owning PG */
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int fail_count; /* Cumulative failure count */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-12-08 10:36:33 +00:00
|
|
|
struct dm_path path;
|
2011-01-13 20:00:01 +00:00
|
|
|
struct delayed_work activate_path;
|
2016-02-10 18:02:21 +00:00
|
|
|
|
|
|
|
bool is_active:1; /* Path status */
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Paths are grouped into Priority Groups and numbered from 1 upwards.
|
|
|
|
* Each has a path selector which controls which path gets used.
|
|
|
|
*/
|
|
|
|
struct priority_group {
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
struct multipath *m; /* Owning multipath instance */
|
|
|
|
struct path_selector ps;
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pg_num; /* Reference number */
|
|
|
|
unsigned int nr_pgpaths; /* Number of paths in PG */
|
2005-04-16 22:20:36 +00:00
|
|
|
struct list_head pgpaths;
|
2016-02-10 18:02:21 +00:00
|
|
|
|
|
|
|
bool bypassed:1; /* Temporarily bypass this PG? */
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Multipath context */
|
|
|
|
struct multipath {
|
2017-12-10 20:37:21 +00:00
|
|
|
unsigned long flags; /* Multipath state flags */
|
2011-01-13 20:00:01 +00:00
|
|
|
|
2012-06-02 23:29:43 +00:00
|
|
|
spinlock_t lock;
|
2017-12-10 20:37:21 +00:00
|
|
|
enum dm_queue_mode queue_mode;
|
2011-01-13 20:00:01 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pgpath *current_pgpath;
|
|
|
|
struct priority_group *current_pg;
|
|
|
|
struct priority_group *next_pg; /* Switch to this PG if set */
|
|
|
|
|
2017-12-10 20:37:21 +00:00
|
|
|
atomic_t nr_valid_paths; /* Total number of usable paths */
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int nr_priority_groups;
|
2017-12-10 20:37:21 +00:00
|
|
|
struct list_head priority_groups;
|
2012-06-02 23:29:43 +00:00
|
|
|
|
2017-12-10 20:37:21 +00:00
|
|
|
const char *hw_handler_name;
|
|
|
|
char *hw_handler_params;
|
|
|
|
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pg_init_retries; /* Number of times to retry pg_init */
|
|
|
|
unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
|
|
|
|
atomic_t pg_init_count; /* Number of times pg_init called */
|
|
|
|
|
2009-12-10 23:52:21 +00:00
|
|
|
struct mutex work_mutex;
|
2016-03-17 21:13:10 +00:00
|
|
|
struct work_struct trigger_event;
|
2017-12-10 20:37:21 +00:00
|
|
|
struct dm_target *ti;
|
2016-05-19 20:15:14 +00:00
|
|
|
|
|
|
|
struct work_struct process_queued_bios;
|
|
|
|
struct bio_list queued_bios;
|
2020-01-13 22:41:27 +00:00
|
|
|
|
|
|
|
struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2016-05-19 20:15:14 +00:00
|
|
|
* Context information attached to each io we process.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-07-12 16:26:32 +00:00
|
|
|
struct dm_mpath_io {
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pgpath *pgpath;
|
2009-06-22 09:12:27 +00:00
|
|
|
size_t nr_bytes;
|
dm mpath: provide high-resolution timer to HST for bio-based
The precision loss of reading IO start_time with jiffies_to_nsecs
instead of using a high resolution timer degrades HST path prediction
for BIO-based mpath on high load workloads.
Below, I show the utilization percentage of a 10 disk multipath with
asymmetrical disk access cost, while being exercised by a randwrite FIO
benchmark with high submission queue depth (depth=64). It is possible
to see that the HST path selection degrades heavily for high-iops in
BIO-mpath, underutilizing the slower paths way beyond expected. This
seems to be caused by the start_time truncation, which makes some IO to
seem much slower than it actually is. In this scenario ST outperforms
HST for bio-mpath, but not for mq-mpath, which already uses ktime_get_ns().
The third column shows utilization with this patch applied. It is easy
to see that now HST prediction is much closer to the ideal distribution
(calculated considering the real cost of each path).
| | ST | HST (orig) | HST(ktime) | Best |
| sdd | 0.17 | 0.20 | 0.17 | 0.18 |
| sde | 0.17 | 0.20 | 0.17 | 0.18 |
| sdf | 0.17 | 0.20 | 0.17 | 0.18 |
| sdg | 0.06 | 0.00 | 0.06 | 0.04 |
| sdh | 0.03 | 0.00 | 0.03 | 0.02 |
| sdi | 0.03 | 0.00 | 0.03 | 0.02 |
| sdj | 0.02 | 0.00 | 0.01 | 0.01 |
| sdk | 0.02 | 0.00 | 0.01 | 0.01 |
| sdl | 0.17 | 0.20 | 0.17 | 0.18 |
| sdm | 0.17 | 0.20 | 0.17 | 0.18 |
This issue was originally discussed [1] when we first merged HST, and
this patch was left as a low hanging fruit to be solved later.
Regarding the implementation, as suggested by Mike in that mail thread,
in order to avoid the overhead of ktime_get_ns for other selectors, this
patch adds a flag for the selector code to request the high-resolution
timer.
I tested this using the same benchmark used in the original HST submission.
Full test and benchmark scripts are available here:
https://people.collabora.com/~krisman/HST-BIO-MPATH/
[1] https://lore.kernel.org/lkml/85tv0am9de.fsf@collabora.com/T/
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
[snitzer: cleaned up various implementation details]
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2022-04-27 16:57:10 +00:00
|
|
|
u64 start_time_ns;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef int (*action_fn) (struct pgpath *pgpath);
|
|
|
|
|
2008-05-01 21:50:22 +00:00
|
|
|
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
|
2006-11-22 14:57:56 +00:00
|
|
|
static void trigger_event(struct work_struct *work);
|
2017-04-27 17:11:14 +00:00
|
|
|
static void activate_or_offline_path(struct pgpath *pgpath);
|
|
|
|
static void activate_path_work(struct work_struct *work);
|
2016-05-19 20:15:14 +00:00
|
|
|
static void process_queued_bios(struct work_struct *work);
|
2020-01-13 22:41:27 +00:00
|
|
|
static void queue_if_no_path_timeout_work(struct timer_list *t);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-01-26 14:48:30 +00:00
|
|
|
/*
|
|
|
|
*-----------------------------------------------
|
2016-03-17 20:32:10 +00:00
|
|
|
* Multipath state flags.
|
2023-01-26 14:48:30 +00:00
|
|
|
*-----------------------------------------------
|
|
|
|
*/
|
2016-03-17 20:32:10 +00:00
|
|
|
#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
|
|
|
|
#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
|
|
|
|
#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
|
|
|
|
#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
|
|
|
|
#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
|
|
|
|
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
|
|
|
|
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-15 15:31:04 +00:00
|
|
|
static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
|
|
|
|
{
|
|
|
|
bool r = test_bit(MPATHF_bit, &m->flags);
|
|
|
|
|
|
|
|
if (r) {
|
|
|
|
unsigned long flags;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-06-15 15:31:04 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
r = test_bit(MPATHF_bit, &m->flags);
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2023-01-26 14:48:30 +00:00
|
|
|
/*
|
|
|
|
*-----------------------------------------------
|
2005-04-16 22:20:36 +00:00
|
|
|
* Allocation routines
|
2023-01-26 14:48:30 +00:00
|
|
|
*-----------------------------------------------
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct pgpath *alloc_pgpath(void)
|
|
|
|
{
|
2006-10-03 08:15:34 +00:00
|
|
|
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-12-10 20:37:21 +00:00
|
|
|
if (!pgpath)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
pgpath->is_active = true;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return pgpath;
|
|
|
|
}
|
|
|
|
|
2007-07-12 16:26:32 +00:00
|
|
|
static void free_pgpath(struct pgpath *pgpath)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
kfree(pgpath);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct priority_group *alloc_priority_group(void)
|
|
|
|
{
|
|
|
|
struct priority_group *pg;
|
|
|
|
|
2006-10-03 08:15:34 +00:00
|
|
|
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-03 08:15:34 +00:00
|
|
|
if (pg)
|
|
|
|
INIT_LIST_HEAD(&pg->pgpaths);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return pg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct pgpath *pgpath, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
|
|
|
|
list_del(&pgpath->list);
|
|
|
|
dm_put_device(ti, pgpath->path.dev);
|
|
|
|
free_pgpath(pgpath);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_priority_group(struct priority_group *pg,
|
|
|
|
struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct path_selector *ps = &pg->ps;
|
|
|
|
|
|
|
|
if (ps->type) {
|
|
|
|
ps->type->destroy(ps);
|
|
|
|
dm_put_path_selector(ps->type);
|
|
|
|
}
|
|
|
|
|
|
|
|
free_pgpaths(&pg->pgpaths, ti);
|
|
|
|
kfree(pg);
|
|
|
|
}
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
static struct multipath *alloc_multipath(struct dm_target *ti)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct multipath *m;
|
|
|
|
|
2006-10-03 08:15:34 +00:00
|
|
|
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (m) {
|
|
|
|
INIT_LIST_HEAD(&m->priority_groups);
|
|
|
|
spin_lock_init(&m->lock);
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_set(&m->nr_valid_paths, 0);
|
2006-11-22 14:57:56 +00:00
|
|
|
INIT_WORK(&m->trigger_event, trigger_event);
|
2009-12-10 23:52:21 +00:00
|
|
|
mutex_init(&m->work_mutex);
|
2016-01-31 17:08:36 +00:00
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
m->queue_mode = DM_TYPE_NONE;
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2006-10-03 08:15:33 +00:00
|
|
|
m->ti = ti;
|
|
|
|
ti->private = m;
|
2020-01-13 22:41:27 +00:00
|
|
|
|
|
|
|
timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
|
|
|
|
{
|
|
|
|
if (m->queue_mode == DM_TYPE_NONE) {
|
2018-10-11 15:06:29 +00:00
|
|
|
m->queue_mode = DM_TYPE_REQUEST_BASED;
|
2018-03-05 19:10:11 +00:00
|
|
|
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
|
2016-05-25 01:16:51 +00:00
|
|
|
INIT_WORK(&m->process_queued_bios, process_queued_bios);
|
2018-03-05 19:10:11 +00:00
|
|
|
/*
|
|
|
|
* bio-based doesn't support any direct scsi_dh management;
|
|
|
|
* it just discovers if a scsi_dh is attached.
|
|
|
|
*/
|
|
|
|
set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
|
2016-05-25 01:16:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dm_table_set_type(ti->table, m->queue_mode);
|
|
|
|
|
2018-03-13 00:30:43 +00:00
|
|
|
/*
|
|
|
|
* Init fields that are only used when a scsi_dh is attached
|
|
|
|
* - must do this unconditionally (really doesn't hurt non-SCSI uses)
|
|
|
|
*/
|
|
|
|
set_bit(MPATHF_QUEUE_IO, &m->flags);
|
|
|
|
atomic_set(&m->pg_init_in_progress, 0);
|
|
|
|
atomic_set(&m->pg_init_count, 0);
|
|
|
|
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
|
|
|
|
init_waitqueue_head(&m->pg_init_wait);
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static void free_multipath(struct multipath *m)
|
|
|
|
{
|
|
|
|
struct priority_group *pg, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
|
|
|
|
list_del(&pg->list);
|
|
|
|
free_priority_group(pg, m->ti);
|
|
|
|
}
|
|
|
|
|
2008-05-01 21:50:11 +00:00
|
|
|
kfree(m->hw_handler_name);
|
2009-08-03 19:42:45 +00:00
|
|
|
kfree(m->hw_handler_params);
|
2018-01-06 02:17:20 +00:00
|
|
|
mutex_destroy(&m->work_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(m);
|
|
|
|
}
|
|
|
|
|
2016-02-03 14:13:14 +00:00
|
|
|
static struct dm_mpath_io *get_mpio(union map_info *info)
|
|
|
|
{
|
|
|
|
return info->ptr;
|
|
|
|
}
|
|
|
|
|
2016-05-24 19:48:08 +00:00
|
|
|
static size_t multipath_per_bio_data_size(void)
|
|
|
|
{
|
|
|
|
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
|
|
|
|
}
|
|
|
|
|
2016-05-19 20:15:14 +00:00
|
|
|
static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
|
|
|
|
{
|
2016-05-24 19:48:08 +00:00
|
|
|
return dm_per_bio_data(bio, multipath_per_bio_data_size());
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 21:08:54 +00:00
|
|
|
static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
|
2016-05-19 20:15:14 +00:00
|
|
|
{
|
2016-05-24 19:48:08 +00:00
|
|
|
/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
|
|
|
|
void *bio_details = mpio + 1;
|
|
|
|
return bio_details;
|
|
|
|
}
|
|
|
|
|
2017-12-05 19:10:33 +00:00
|
|
|
static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
|
2016-05-24 19:48:08 +00:00
|
|
|
{
|
|
|
|
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
|
2017-12-11 21:08:54 +00:00
|
|
|
struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2017-12-11 20:58:41 +00:00
|
|
|
mpio->nr_bytes = bio->bi_iter.bi_size;
|
|
|
|
mpio->pgpath = NULL;
|
dm mpath: provide high-resolution timer to HST for bio-based
The precision loss of reading IO start_time with jiffies_to_nsecs
instead of using a high resolution timer degrades HST path prediction
for BIO-based mpath on high load workloads.
Below, I show the utilization percentage of a 10 disk multipath with
asymmetrical disk access cost, while being exercised by a randwrite FIO
benchmark with high submission queue depth (depth=64). It is possible
to see that the HST path selection degrades heavily for high-iops in
BIO-mpath, underutilizing the slower paths way beyond expected. This
seems to be caused by the start_time truncation, which makes some IO to
seem much slower than it actually is. In this scenario ST outperforms
HST for bio-mpath, but not for mq-mpath, which already uses ktime_get_ns().
The third column shows utilization with this patch applied. It is easy
to see that now HST prediction is much closer to the ideal distribution
(calculated considering the real cost of each path).
| | ST | HST (orig) | HST(ktime) | Best |
| sdd | 0.17 | 0.20 | 0.17 | 0.18 |
| sde | 0.17 | 0.20 | 0.17 | 0.18 |
| sdf | 0.17 | 0.20 | 0.17 | 0.18 |
| sdg | 0.06 | 0.00 | 0.06 | 0.04 |
| sdh | 0.03 | 0.00 | 0.03 | 0.02 |
| sdi | 0.03 | 0.00 | 0.03 | 0.02 |
| sdj | 0.02 | 0.00 | 0.01 | 0.01 |
| sdk | 0.02 | 0.00 | 0.01 | 0.01 |
| sdl | 0.17 | 0.20 | 0.17 | 0.18 |
| sdm | 0.17 | 0.20 | 0.17 | 0.18 |
This issue was originally discussed [1] when we first merged HST, and
this patch was left as a low hanging fruit to be solved later.
Regarding the implementation, as suggested by Mike in that mail thread,
in order to avoid the overhead of ktime_get_ns for other selectors, this
patch adds a flag for the selector code to request the high-resolution
timer.
I tested this using the same benchmark used in the original HST submission.
Full test and benchmark scripts are available here:
https://people.collabora.com/~krisman/HST-BIO-MPATH/
[1] https://lore.kernel.org/lkml/85tv0am9de.fsf@collabora.com/T/
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
[snitzer: cleaned up various implementation details]
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2022-04-27 16:57:10 +00:00
|
|
|
mpio->start_time_ns = 0;
|
2017-12-11 20:58:41 +00:00
|
|
|
*mpio_p = mpio;
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2017-12-11 20:58:41 +00:00
|
|
|
dm_bio_record(bio_details, bio);
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
|
2023-01-26 14:48:30 +00:00
|
|
|
/*
|
|
|
|
*-----------------------------------------------
|
2005-04-16 22:20:36 +00:00
|
|
|
* Path selection
|
2023-01-26 14:48:30 +00:00
|
|
|
*-----------------------------------------------
|
|
|
|
*/
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
static int __pg_init_all_paths(struct multipath *m)
|
2010-03-06 02:32:18 +00:00
|
|
|
{
|
|
|
|
struct pgpath *pgpath;
|
2011-01-13 20:00:01 +00:00
|
|
|
unsigned long pg_init_delay = 0;
|
2010-03-06 02:32:18 +00:00
|
|
|
|
2017-04-27 17:11:22 +00:00
|
|
|
lockdep_assert_held(&m->lock);
|
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
return 0;
|
2014-02-28 14:33:42 +00:00
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_inc(&m->pg_init_count);
|
2016-03-17 20:32:10 +00:00
|
|
|
clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
|
|
|
|
/* Check here to reset pg_init_required */
|
|
|
|
if (!m->current_pg)
|
|
|
|
return 0;
|
|
|
|
|
2016-03-17 20:32:10 +00:00
|
|
|
if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
|
2011-01-13 20:00:01 +00:00
|
|
|
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
|
|
|
|
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
|
2010-03-06 02:32:18 +00:00
|
|
|
list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
|
|
|
|
/* Skip failed paths */
|
|
|
|
if (!pgpath->is_active)
|
|
|
|
continue;
|
2011-01-13 20:00:01 +00:00
|
|
|
if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
|
|
|
|
pg_init_delay))
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_inc(&m->pg_init_in_progress);
|
2010-03-06 02:32:18 +00:00
|
|
|
}
|
2016-03-17 21:10:15 +00:00
|
|
|
return atomic_read(&m->pg_init_in_progress);
|
2010-03-06 02:32:18 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 17:11:16 +00:00
|
|
|
static int pg_init_all_paths(struct multipath *m)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-04-27 17:11:16 +00:00
|
|
|
int ret;
|
2016-03-17 22:38:17 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2017-04-27 17:11:16 +00:00
|
|
|
ret = __pg_init_all_paths(m);
|
2016-03-17 22:38:17 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2017-04-27 17:11:16 +00:00
|
|
|
|
|
|
|
return ret;
|
2016-03-17 22:38:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __switch_pg(struct multipath *m, struct priority_group *pg)
|
|
|
|
{
|
2020-06-10 19:02:37 +00:00
|
|
|
lockdep_assert_held(&m->lock);
|
|
|
|
|
2016-03-17 22:38:17 +00:00
|
|
|
m->current_pg = pg;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Must we initialise the PG first, and queue I/O till it's ready? */
|
2008-05-01 21:50:11 +00:00
|
|
|
if (m->hw_handler_name) {
|
2016-03-17 20:32:10 +00:00
|
|
|
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
|
|
|
|
set_bit(MPATHF_QUEUE_IO, &m->flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
2016-03-17 20:32:10 +00:00
|
|
|
clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
|
|
|
|
clear_bit(MPATHF_QUEUE_IO, &m->flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-10-19 21:47:53 +00:00
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_set(&m->pg_init_count, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2016-03-17 22:38:17 +00:00
|
|
|
static struct pgpath *choose_path_in_pg(struct multipath *m,
|
|
|
|
struct priority_group *pg,
|
|
|
|
size_t nr_bytes)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2016-03-17 22:38:17 +00:00
|
|
|
unsigned long flags;
|
2006-12-08 10:36:33 +00:00
|
|
|
struct dm_path *path;
|
2016-03-17 22:38:17 +00:00
|
|
|
struct pgpath *pgpath;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-02-18 02:29:17 +00:00
|
|
|
path = pg->ps.type->select_path(&pg->ps, nr_bytes);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!path)
|
2016-03-17 22:38:17 +00:00
|
|
|
return ERR_PTR(-ENXIO);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-03-17 22:38:17 +00:00
|
|
|
pgpath = path_to_pgpath(path);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-10-24 10:22:48 +00:00
|
|
|
if (unlikely(READ_ONCE(m->current_pg) != pg)) {
|
2016-03-17 22:38:17 +00:00
|
|
|
/* Only update current_pgpath if pg changed */
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
m->current_pgpath = pgpath;
|
|
|
|
__switch_pg(m, pg);
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-03-17 22:38:17 +00:00
|
|
|
return pgpath;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2016-03-17 22:38:17 +00:00
|
|
|
static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2016-03-17 22:38:17 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct priority_group *pg;
|
2016-03-17 22:38:17 +00:00
|
|
|
struct pgpath *pgpath;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int bypassed = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
if (!atomic_read(&m->nr_valid_paths)) {
|
2020-06-10 19:02:37 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2018-03-05 19:10:11 +00:00
|
|
|
clear_bit(MPATHF_QUEUE_IO, &m->flags);
|
2020-06-10 19:02:37 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
goto failed;
|
2014-08-13 18:53:42 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Were we instructed to switch PG? */
|
2017-10-24 10:22:48 +00:00
|
|
|
if (READ_ONCE(m->next_pg)) {
|
2016-03-17 22:38:17 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
pg = m->next_pg;
|
2016-03-17 22:38:17 +00:00
|
|
|
if (!pg) {
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
goto check_current_pg;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
m->next_pg = NULL;
|
2016-03-17 22:38:17 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
pgpath = choose_path_in_pg(m, pg, nr_bytes);
|
|
|
|
if (!IS_ERR_OR_NULL(pgpath))
|
|
|
|
return pgpath;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Don't change PG until it has no remaining paths */
|
2016-03-17 22:38:17 +00:00
|
|
|
check_current_pg:
|
2017-10-24 10:22:48 +00:00
|
|
|
pg = READ_ONCE(m->current_pg);
|
2016-03-17 22:38:17 +00:00
|
|
|
if (pg) {
|
|
|
|
pgpath = choose_path_in_pg(m, pg, nr_bytes);
|
|
|
|
if (!IS_ERR_OR_NULL(pgpath))
|
|
|
|
return pgpath;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop through priority groups until we find a valid path.
|
|
|
|
* First time we skip PGs marked 'bypassed'.
|
2012-06-02 23:29:45 +00:00
|
|
|
* Second time we only try the ones we skipped, but set
|
|
|
|
* pg_init_delay_retry so we do not hammer controllers.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
do {
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
2017-01-06 20:33:14 +00:00
|
|
|
if (pg->bypassed == !!bypassed)
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
2016-03-17 22:38:17 +00:00
|
|
|
pgpath = choose_path_in_pg(m, pg, nr_bytes);
|
|
|
|
if (!IS_ERR_OR_NULL(pgpath)) {
|
2020-06-10 19:02:37 +00:00
|
|
|
if (!bypassed) {
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2016-03-17 20:32:10 +00:00
|
|
|
set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
|
2020-06-10 19:02:37 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
2016-03-17 22:38:17 +00:00
|
|
|
return pgpath;
|
2012-06-02 23:29:45 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
} while (bypassed--);
|
|
|
|
|
|
|
|
failed:
|
2016-03-17 22:38:17 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
m->current_pgpath = NULL;
|
|
|
|
m->current_pg = NULL;
|
2016-03-17 22:38:17 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
|
|
|
return NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-12-08 10:41:10 +00:00
|
|
|
/*
|
2020-05-14 16:55:39 +00:00
|
|
|
* dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
|
2017-04-27 17:11:26 +00:00
|
|
|
* report the function name and line number of the function from which
|
|
|
|
* it has been invoked.
|
2006-12-08 10:41:10 +00:00
|
|
|
*/
|
2017-04-27 17:11:26 +00:00
|
|
|
#define dm_report_EIO(m) \
|
2020-05-14 16:55:39 +00:00
|
|
|
DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
|
2020-09-19 17:36:58 +00:00
|
|
|
dm_table_device_name((m)->ti->table), \
|
2020-05-14 16:55:39 +00:00
|
|
|
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
|
|
|
|
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
|
2023-02-07 21:07:22 +00:00
|
|
|
dm_noflush_suspending((m)->ti))
|
2006-12-08 10:41:10 +00:00
|
|
|
|
2017-12-08 03:42:27 +00:00
|
|
|
/*
|
|
|
|
* Check whether bios must be queued in the device-mapper core rather
|
|
|
|
* than here in the target.
|
|
|
|
*/
|
2020-05-26 20:06:56 +00:00
|
|
|
static bool __must_push_back(struct multipath *m)
|
2017-12-08 03:42:27 +00:00
|
|
|
{
|
2020-05-26 20:06:56 +00:00
|
|
|
return dm_noflush_suspending(m->ti);
|
2017-12-08 03:42:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool must_push_back_rq(struct multipath *m)
|
|
|
|
{
|
2020-06-10 20:51:32 +00:00
|
|
|
unsigned long flags;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
2017-12-08 03:42:27 +00:00
|
|
|
}
|
|
|
|
|
2014-02-28 14:33:47 +00:00
|
|
|
/*
|
2016-05-19 20:15:14 +00:00
|
|
|
* Map cloned requests (request-based multipath)
|
2014-02-28 14:33:47 +00:00
|
|
|
*/
|
2017-01-22 17:32:46 +00:00
|
|
|
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
|
|
|
union map_info *map_context,
|
|
|
|
struct request **__clone)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2016-02-03 02:53:15 +00:00
|
|
|
struct multipath *m = ti->private;
|
2017-01-22 17:32:46 +00:00
|
|
|
size_t nr_bytes = blk_rq_bytes(rq);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pgpath *pgpath;
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
struct block_device *bdev;
|
2017-01-22 17:32:46 +00:00
|
|
|
struct dm_mpath_io *mpio = get_mpio(map_context);
|
2017-04-27 17:11:15 +00:00
|
|
|
struct request_queue *q;
|
2017-01-22 17:32:46 +00:00
|
|
|
struct request *clone;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Do we need to select a new pgpath? */
|
2017-10-24 10:22:48 +00:00
|
|
|
pgpath = READ_ONCE(m->current_pgpath);
|
2020-06-15 15:31:04 +00:00
|
|
|
if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
|
2016-03-17 22:38:17 +00:00
|
|
|
pgpath = choose_pgpath(m, nr_bytes);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-02-28 14:33:48 +00:00
|
|
|
if (!pgpath) {
|
2017-12-08 03:42:27 +00:00
|
|
|
if (must_push_back_rq(m))
|
2016-09-09 23:26:19 +00:00
|
|
|
return DM_MAPIO_DELAY_REQUEUE;
|
2017-05-15 15:28:37 +00:00
|
|
|
dm_report_EIO(m); /* Failed */
|
2017-05-15 15:28:38 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2020-06-15 15:31:04 +00:00
|
|
|
} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
|
|
|
|
mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
|
2018-01-11 06:01:55 +00:00
|
|
|
pg_init_all_paths(m);
|
|
|
|
return DM_MAPIO_DELAY_REQUEUE;
|
2014-02-28 14:33:48 +00:00
|
|
|
}
|
2014-07-08 15:55:09 +00:00
|
|
|
|
2014-10-17 23:46:36 +00:00
|
|
|
mpio->pgpath = pgpath;
|
|
|
|
mpio->nr_bytes = nr_bytes;
|
|
|
|
|
2014-02-28 14:33:48 +00:00
|
|
|
bdev = pgpath->path.dev->bdev;
|
2017-04-27 17:11:15 +00:00
|
|
|
q = bdev_get_queue(bdev);
|
2021-10-25 07:05:07 +00:00
|
|
|
clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
|
2018-05-09 07:54:05 +00:00
|
|
|
BLK_MQ_REQ_NOWAIT);
|
2017-01-22 17:32:46 +00:00
|
|
|
if (IS_ERR(clone)) {
|
|
|
|
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
|
2017-12-10 20:37:21 +00:00
|
|
|
if (blk_queue_dying(q)) {
|
2017-04-27 17:11:15 +00:00
|
|
|
atomic_inc(&m->pg_init_in_progress);
|
|
|
|
activate_or_offline_path(pgpath);
|
2018-01-11 06:01:56 +00:00
|
|
|
return DM_MAPIO_DELAY_REQUEUE;
|
2017-04-27 17:11:15 +00:00
|
|
|
}
|
2018-01-11 06:01:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* blk-mq's SCHED_RESTART can cover this requeue, so we
|
|
|
|
* needn't deal with it by DELAY_REQUEUE. More importantly,
|
|
|
|
* we have to return DM_MAPIO_REQUEUE so that blk-mq can
|
|
|
|
* get the queue busy feedback (via BLK_STS_RESOURCE),
|
|
|
|
* otherwise I/O merging can suffer.
|
|
|
|
*/
|
2018-10-11 02:49:26 +00:00
|
|
|
return DM_MAPIO_REQUEUE;
|
2014-12-18 02:08:12 +00:00
|
|
|
}
|
2017-01-22 17:32:46 +00:00
|
|
|
clone->bio = clone->biotail = NULL;
|
|
|
|
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
|
|
|
*__clone = clone;
|
2014-12-18 02:08:12 +00:00
|
|
|
|
2014-02-28 14:33:48 +00:00
|
|
|
if (pgpath->pg->ps.type->start_io)
|
|
|
|
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
|
|
|
|
&pgpath->path,
|
|
|
|
nr_bytes);
|
2014-10-17 23:46:36 +00:00
|
|
|
return DM_MAPIO_REMAPPED;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-04-24 15:19:05 +00:00
|
|
|
static void multipath_release_clone(struct request *clone,
|
|
|
|
union map_info *map_context)
|
2014-12-18 02:08:12 +00:00
|
|
|
{
|
2019-04-24 15:19:05 +00:00
|
|
|
if (unlikely(map_context)) {
|
|
|
|
/*
|
|
|
|
* non-NULL map_context means caller is still map
|
|
|
|
* method; must undo multipath_clone_and_map()
|
|
|
|
*/
|
|
|
|
struct dm_mpath_io *mpio = get_mpio(map_context);
|
|
|
|
struct pgpath *pgpath = mpio->pgpath;
|
|
|
|
|
|
|
|
if (pgpath && pgpath->pg->ps.type->end_io)
|
|
|
|
pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
|
|
|
|
&pgpath->path,
|
2020-04-30 20:48:29 +00:00
|
|
|
mpio->nr_bytes,
|
|
|
|
clone->io_start_time_ns);
|
2019-04-24 15:19:05 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 07:05:07 +00:00
|
|
|
blk_mq_free_request(clone);
|
2014-12-18 02:08:12 +00:00
|
|
|
}
|
|
|
|
|
2016-05-19 20:15:14 +00:00
|
|
|
/*
|
|
|
|
* Map cloned bios (bio-based multipath)
|
|
|
|
*/
|
2017-12-11 16:02:29 +00:00
|
|
|
|
2020-06-11 00:19:56 +00:00
|
|
|
static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
|
2020-06-11 00:03:19 +00:00
|
|
|
{
|
|
|
|
/* Queue for the daemon to resubmit */
|
|
|
|
bio_list_add(&m->queued_bios, bio);
|
|
|
|
if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
|
|
|
|
queue_work(kmultipathd, &m->process_queued_bios);
|
2020-06-11 00:19:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void multipath_queue_bio(struct multipath *m, struct bio *bio)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
__multipath_queue_bio(m, bio);
|
2020-06-11 00:03:19 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-12-11 16:02:29 +00:00
|
|
|
static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
|
2016-05-19 20:15:14 +00:00
|
|
|
{
|
|
|
|
struct pgpath *pgpath;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Do we need to select a new pgpath? */
|
2017-10-24 10:22:48 +00:00
|
|
|
pgpath = READ_ONCE(m->current_pgpath);
|
2020-06-15 15:31:04 +00:00
|
|
|
if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
|
2017-12-11 16:02:29 +00:00
|
|
|
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2020-06-11 00:19:56 +00:00
|
|
|
if (!pgpath) {
|
2016-05-19 20:15:14 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2020-06-11 00:19:56 +00:00
|
|
|
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
|
|
|
__multipath_queue_bio(m, bio);
|
|
|
|
pgpath = ERR_PTR(-EAGAIN);
|
|
|
|
}
|
2016-05-19 20:15:14 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2017-12-10 20:37:21 +00:00
|
|
|
|
2020-06-15 15:31:04 +00:00
|
|
|
} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
|
|
|
|
mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
|
2020-06-11 00:03:19 +00:00
|
|
|
multipath_queue_bio(m, bio);
|
2020-06-11 00:19:56 +00:00
|
|
|
pg_init_all_paths(m);
|
2017-12-11 16:02:29 +00:00
|
|
|
return ERR_PTR(-EAGAIN);
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 16:02:29 +00:00
|
|
|
return pgpath;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __multipath_map_bio(struct multipath *m, struct bio *bio,
|
|
|
|
struct dm_mpath_io *mpio)
|
|
|
|
{
|
2019-11-26 15:08:29 +00:00
|
|
|
struct pgpath *pgpath = __map_bio(m, bio);
|
2017-12-10 20:37:21 +00:00
|
|
|
|
2017-12-11 16:02:29 +00:00
|
|
|
if (IS_ERR(pgpath))
|
2016-05-19 20:15:14 +00:00
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
|
|
|
|
|
if (!pgpath) {
|
2020-05-26 20:06:56 +00:00
|
|
|
if (__must_push_back(m))
|
2017-04-27 17:11:24 +00:00
|
|
|
return DM_MAPIO_REQUEUE;
|
2017-05-15 15:28:37 +00:00
|
|
|
dm_report_EIO(m);
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mpio->pgpath = pgpath;
|
|
|
|
|
dm mpath: provide high-resolution timer to HST for bio-based
The precision loss of reading IO start_time with jiffies_to_nsecs
instead of using a high resolution timer degrades HST path prediction
for BIO-based mpath on high load workloads.
Below, I show the utilization percentage of a 10 disk multipath with
asymmetrical disk access cost, while being exercised by a randwrite FIO
benchmark with high submission queue depth (depth=64). It is possible
to see that the HST path selection degrades heavily for high-iops in
BIO-mpath, underutilizing the slower paths way beyond expected. This
seems to be caused by the start_time truncation, which makes some IO to
seem much slower than it actually is. In this scenario ST outperforms
HST for bio-mpath, but not for mq-mpath, which already uses ktime_get_ns().
The third column shows utilization with this patch applied. It is easy
to see that now HST prediction is much closer to the ideal distribution
(calculated considering the real cost of each path).
| | ST | HST (orig) | HST(ktime) | Best |
| sdd | 0.17 | 0.20 | 0.17 | 0.18 |
| sde | 0.17 | 0.20 | 0.17 | 0.18 |
| sdf | 0.17 | 0.20 | 0.17 | 0.18 |
| sdg | 0.06 | 0.00 | 0.06 | 0.04 |
| sdh | 0.03 | 0.00 | 0.03 | 0.02 |
| sdi | 0.03 | 0.00 | 0.03 | 0.02 |
| sdj | 0.02 | 0.00 | 0.01 | 0.01 |
| sdk | 0.02 | 0.00 | 0.01 | 0.01 |
| sdl | 0.17 | 0.20 | 0.17 | 0.18 |
| sdm | 0.17 | 0.20 | 0.17 | 0.18 |
This issue was originally discussed [1] when we first merged HST, and
this patch was left as a low hanging fruit to be solved later.
Regarding the implementation, as suggested by Mike in that mail thread,
in order to avoid the overhead of ktime_get_ns for other selectors, this
patch adds a flag for the selector code to request the high-resolution
timer.
I tested this using the same benchmark used in the original HST submission.
Full test and benchmark scripts are available here:
https://people.collabora.com/~krisman/HST-BIO-MPATH/
[1] https://lore.kernel.org/lkml/85tv0am9de.fsf@collabora.com/T/
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
[snitzer: cleaned up various implementation details]
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2022-04-27 16:57:10 +00:00
|
|
|
if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
|
|
|
|
mpio->start_time_ns = ktime_get_ns();
|
|
|
|
|
2017-06-03 07:38:06 +00:00
|
|
|
bio->bi_status = 0;
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(bio, pgpath->path.dev->bdev);
|
2016-08-05 21:35:16 +00:00
|
|
|
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
|
2016-05-19 20:15:14 +00:00
|
|
|
|
|
|
|
if (pgpath->pg->ps.type->start_io)
|
|
|
|
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
|
|
|
|
&pgpath->path,
|
2017-12-11 20:58:41 +00:00
|
|
|
mpio->nr_bytes);
|
2016-05-19 20:15:14 +00:00
|
|
|
return DM_MAPIO_REMAPPED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
|
|
|
|
{
|
|
|
|
struct multipath *m = ti->private;
|
2016-05-24 19:48:08 +00:00
|
|
|
struct dm_mpath_io *mpio = NULL;
|
|
|
|
|
2017-12-05 19:10:33 +00:00
|
|
|
multipath_init_per_bio_data(bio, &mpio);
|
2016-05-19 20:15:14 +00:00
|
|
|
return __multipath_map_bio(m, bio, mpio);
|
|
|
|
}
|
|
|
|
|
2016-09-14 14:47:03 +00:00
|
|
|
static void process_queued_io_list(struct multipath *m)
|
2016-05-19 20:15:14 +00:00
|
|
|
{
|
2018-10-11 15:06:29 +00:00
|
|
|
if (m->queue_mode == DM_TYPE_REQUEST_BASED)
|
2016-09-14 14:47:03 +00:00
|
|
|
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
|
2018-03-05 19:10:11 +00:00
|
|
|
else if (m->queue_mode == DM_TYPE_BIO_BASED)
|
2016-05-19 20:15:14 +00:00
|
|
|
queue_work(kmultipathd, &m->process_queued_bios);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void process_queued_bios(struct work_struct *work)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
unsigned long flags;
|
|
|
|
struct bio *bio;
|
|
|
|
struct bio_list bios;
|
|
|
|
struct blk_plug plug;
|
|
|
|
struct multipath *m =
|
|
|
|
container_of(work, struct multipath, process_queued_bios);
|
|
|
|
|
|
|
|
bio_list_init(&bios);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
|
|
|
|
if (bio_list_empty(&m->queued_bios)) {
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bio_list_merge(&bios, &m->queued_bios);
|
|
|
|
bio_list_init(&m->queued_bios);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
|
|
|
blk_start_plug(&plug);
|
|
|
|
while ((bio = bio_list_pop(&bios))) {
|
2017-12-06 21:08:14 +00:00
|
|
|
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-12-06 21:08:14 +00:00
|
|
|
dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
|
|
|
|
r = __multipath_map_bio(m, bio, mpio);
|
2017-06-03 07:38:02 +00:00
|
|
|
switch (r) {
|
|
|
|
case DM_MAPIO_KILL:
|
2017-06-03 07:38:06 +00:00
|
|
|
bio->bi_status = BLK_STS_IOERR;
|
|
|
|
bio_endio(bio);
|
2017-06-14 14:22:55 +00:00
|
|
|
break;
|
2017-06-03 07:38:02 +00:00
|
|
|
case DM_MAPIO_REQUEUE:
|
2017-06-03 07:38:06 +00:00
|
|
|
bio->bi_status = BLK_STS_DM_REQUEUE;
|
2016-05-19 20:15:14 +00:00
|
|
|
bio_endio(bio);
|
2017-06-03 07:38:02 +00:00
|
|
|
break;
|
|
|
|
case DM_MAPIO_REMAPPED:
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
2017-06-03 07:38:02 +00:00
|
|
|
break;
|
2018-03-10 02:18:55 +00:00
|
|
|
case DM_MAPIO_SUBMITTED:
|
2017-08-09 18:32:14 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
|
2017-06-03 07:38:02 +00:00
|
|
|
}
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
blk_finish_plug(&plug);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* If we run out of usable paths, should we queue I/O or error it?
|
|
|
|
*/
|
2023-02-06 22:42:32 +00:00
|
|
|
static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
|
2020-05-29 19:59:13 +00:00
|
|
|
bool save_old_value, const char *caller)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2020-05-27 20:32:51 +00:00
|
|
|
bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
|
2020-09-19 17:36:58 +00:00
|
|
|
const char *dm_dev_name = dm_table_device_name(m->ti->table);
|
2020-05-29 19:59:13 +00:00
|
|
|
|
2023-02-06 22:42:32 +00:00
|
|
|
DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
|
|
|
|
dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2020-05-27 20:32:51 +00:00
|
|
|
|
|
|
|
queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
|
|
|
|
saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
|
|
|
|
|
|
|
|
if (save_old_value) {
|
|
|
|
if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
|
|
|
|
DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
|
2020-05-29 19:59:13 +00:00
|
|
|
dm_dev_name);
|
2020-05-27 20:32:51 +00:00
|
|
|
} else
|
|
|
|
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
|
2023-02-06 22:42:32 +00:00
|
|
|
} else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
|
2020-05-27 20:32:51 +00:00
|
|
|
/* due to "fail_if_no_path" message, need to honor it. */
|
|
|
|
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
|
|
|
|
}
|
2023-02-06 22:42:32 +00:00
|
|
|
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
|
2020-05-27 20:32:51 +00:00
|
|
|
|
2020-05-29 19:59:13 +00:00
|
|
|
DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
|
|
|
|
dm_dev_name, __func__,
|
|
|
|
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
|
|
|
|
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
|
|
|
|
dm_noflush_suspending(m->ti));
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
2023-02-06 22:42:32 +00:00
|
|
|
if (!f_queue_if_no_path) {
|
2014-05-26 12:45:39 +00:00
|
|
|
dm_table_run_md_queue_async(m->ti->table);
|
2016-09-14 14:47:03 +00:00
|
|
|
process_queued_io_list(m);
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
2014-05-26 12:45:39 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:41:27 +00:00
|
|
|
/*
|
|
|
|
* If the queue_if_no_path timeout fires, turn off queue_if_no_path and
|
|
|
|
* process any queued I/O.
|
|
|
|
*/
|
|
|
|
static void queue_if_no_path_timeout_work(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct multipath *m = from_timer(m, t, nopath_timer);
|
|
|
|
|
2020-09-19 17:36:58 +00:00
|
|
|
DMWARN("queue_if_no_path timeout on %s, failing queued IO",
|
|
|
|
dm_table_device_name(m->ti->table));
|
2020-05-29 19:59:13 +00:00
|
|
|
queue_if_no_path(m, false, false, __func__);
|
2020-01-13 22:41:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable the queue_if_no_path timeout if necessary.
|
|
|
|
* Called with m->lock held.
|
|
|
|
*/
|
|
|
|
static void enable_nopath_timeout(struct multipath *m)
|
|
|
|
{
|
|
|
|
unsigned long queue_if_no_path_timeout =
|
|
|
|
READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
|
|
|
|
|
|
|
|
lockdep_assert_held(&m->lock);
|
|
|
|
|
|
|
|
if (queue_if_no_path_timeout > 0 &&
|
|
|
|
atomic_read(&m->nr_valid_paths) == 0 &&
|
|
|
|
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
|
|
|
mod_timer(&m->nopath_timer,
|
|
|
|
jiffies + queue_if_no_path_timeout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void disable_nopath_timeout(struct multipath *m)
|
|
|
|
{
|
|
|
|
del_timer_sync(&m->nopath_timer);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* An event is triggered whenever a path is taken out of use.
|
|
|
|
* Includes path failure and PG bypass.
|
|
|
|
*/
|
2006-11-22 14:57:56 +00:00
|
|
|
static void trigger_event(struct work_struct *work)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-11-22 14:57:56 +00:00
|
|
|
struct multipath *m =
|
|
|
|
container_of(work, struct multipath, trigger_event);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
dm_table_event(m->ti->table);
|
|
|
|
}
|
|
|
|
|
2023-01-26 14:48:30 +00:00
|
|
|
/*
|
|
|
|
*---------------------------------------------------------------
|
2005-04-16 22:20:36 +00:00
|
|
|
* Constructor/argument parsing:
|
|
|
|
* <#multipath feature args> [<arg>]*
|
|
|
|
* <#hw_handler args> [hw_handler [<arg>]*]
|
|
|
|
* <#priority groups>
|
|
|
|
* <initial priority group>
|
|
|
|
* [<selector> <#selector args> [<arg>]*
|
|
|
|
* <#paths> <#per-path selector args>
|
|
|
|
* [<path> [<arg>]* ]+ ]+
|
2023-01-26 14:48:30 +00:00
|
|
|
*---------------------------------------------------------------
|
|
|
|
*/
|
2011-08-02 11:32:04 +00:00
|
|
|
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
|
2005-04-16 22:20:36 +00:00
|
|
|
struct dm_target *ti)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct path_selector_type *pst;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int ps_argc;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-06-22 18:32:45 +00:00
|
|
|
static const struct dm_arg _args[] = {
|
2006-06-26 07:27:35 +00:00
|
|
|
{0, 1024, "invalid number of path selector args"},
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
pst = dm_get_path_selector(dm_shift_arg(as));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!pst) {
|
2006-06-26 07:27:35 +00:00
|
|
|
ti->error = "unknown path selector type";
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
|
2008-07-21 11:00:24 +00:00
|
|
|
if (r) {
|
|
|
|
dm_put_path_selector(pst);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
2008-07-21 11:00:24 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
r = pst->create(&pg->ps, ps_argc, as->argv);
|
|
|
|
if (r) {
|
|
|
|
dm_put_path_selector(pst);
|
2006-06-26 07:27:35 +00:00
|
|
|
ti->error = "path selector constructor failed";
|
2005-04-16 22:20:36 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
pg->ps.type = pst;
|
2011-08-02 11:32:04 +00:00
|
|
|
dm_consume_args(as, ps_argc);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-12 23:49:25 +00:00
|
|
|
static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
|
2018-09-17 15:38:47 +00:00
|
|
|
const char **attached_handler_name, char **error)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-12-10 20:37:21 +00:00
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
int r;
|
2012-07-27 14:08:04 +00:00
|
|
|
|
2020-06-15 15:31:04 +00:00
|
|
|
if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
|
2015-08-27 12:16:54 +00:00
|
|
|
retain:
|
2018-09-17 15:38:47 +00:00
|
|
|
if (*attached_handler_name) {
|
2016-11-24 07:11:48 +00:00
|
|
|
/*
|
|
|
|
* Clear any hw_handler_params associated with a
|
|
|
|
* handler that isn't already attached.
|
|
|
|
*/
|
2018-09-17 15:38:47 +00:00
|
|
|
if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
|
2016-11-24 07:11:48 +00:00
|
|
|
kfree(m->hw_handler_params);
|
|
|
|
m->hw_handler_params = NULL;
|
|
|
|
}
|
|
|
|
|
2012-07-27 14:08:04 +00:00
|
|
|
/*
|
|
|
|
* Reset hw_handler_name to match the attached handler
|
|
|
|
*
|
|
|
|
* NB. This modifies the table line to show the actual
|
|
|
|
* handler instead of the original table passed in.
|
|
|
|
*/
|
|
|
|
kfree(m->hw_handler_name);
|
2018-09-17 15:38:47 +00:00
|
|
|
m->hw_handler_name = *attached_handler_name;
|
|
|
|
*attached_handler_name = NULL;
|
2012-07-27 14:08:04 +00:00
|
|
|
}
|
|
|
|
}
|
2009-06-22 09:12:11 +00:00
|
|
|
|
2012-07-27 14:08:04 +00:00
|
|
|
if (m->hw_handler_name) {
|
2009-06-22 09:12:11 +00:00
|
|
|
r = scsi_dh_attach(q, m->hw_handler_name);
|
|
|
|
if (r == -EBUSY) {
|
2022-03-02 17:19:47 +00:00
|
|
|
DMINFO("retaining handler on device %pg", bdev);
|
2015-08-27 12:16:54 +00:00
|
|
|
goto retain;
|
|
|
|
}
|
2008-07-18 00:49:02 +00:00
|
|
|
if (r < 0) {
|
2017-12-10 20:37:21 +00:00
|
|
|
*error = "error attaching hardware handler";
|
|
|
|
return r;
|
2008-07-18 00:49:02 +00:00
|
|
|
}
|
2009-08-03 19:42:45 +00:00
|
|
|
|
|
|
|
if (m->hw_handler_params) {
|
|
|
|
r = scsi_dh_set_params(q, m->hw_handler_params);
|
|
|
|
if (r < 0) {
|
2017-12-10 20:37:21 +00:00
|
|
|
*error = "unable to set hardware handler parameters";
|
|
|
|
return r;
|
2009-08-03 19:42:45 +00:00
|
|
|
}
|
|
|
|
}
|
2008-07-18 00:49:02 +00:00
|
|
|
}
|
|
|
|
|
2017-12-10 20:37:21 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
|
|
|
|
struct dm_target *ti)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct pgpath *p;
|
|
|
|
struct multipath *m = ti->private;
|
2018-03-12 23:49:25 +00:00
|
|
|
struct request_queue *q;
|
2018-09-17 15:38:47 +00:00
|
|
|
const char *attached_handler_name = NULL;
|
2017-12-10 20:37:21 +00:00
|
|
|
|
|
|
|
/* we need at least a path arg */
|
|
|
|
if (as->argc < 1) {
|
|
|
|
ti->error = "no device given";
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
p = alloc_pgpath();
|
|
|
|
if (!p)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
|
|
|
|
&p->path.dev);
|
|
|
|
if (r) {
|
|
|
|
ti->error = "error getting device";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2018-03-12 23:49:25 +00:00
|
|
|
q = bdev_get_queue(p->path.dev->bdev);
|
|
|
|
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
|
2018-03-29 15:50:10 +00:00
|
|
|
if (attached_handler_name || m->hw_handler_name) {
|
2017-12-10 20:37:21 +00:00
|
|
|
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
|
2018-09-17 15:38:47 +00:00
|
|
|
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
|
2019-04-29 09:48:15 +00:00
|
|
|
kfree(attached_handler_name);
|
2017-12-10 20:37:21 +00:00
|
|
|
if (r) {
|
|
|
|
dm_put_device(ti, p->path.dev);
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
|
|
|
|
if (r) {
|
|
|
|
dm_put_device(ti, p->path.dev);
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
return p;
|
|
|
|
bad:
|
|
|
|
free_pgpath(p);
|
2008-10-10 12:36:57 +00:00
|
|
|
return ERR_PTR(r);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
static struct priority_group *parse_priority_group(struct dm_arg_set *as,
|
2006-10-03 08:15:33 +00:00
|
|
|
struct multipath *m)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-06-22 18:32:45 +00:00
|
|
|
static const struct dm_arg _args[] = {
|
2006-06-26 07:27:35 +00:00
|
|
|
{1, 1024, "invalid number of paths"},
|
|
|
|
{0, 1024, "invalid number of selector args"}
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i, nr_selector_args, nr_args;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct priority_group *pg;
|
2006-10-03 08:15:33 +00:00
|
|
|
struct dm_target *ti = m->ti;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (as->argc < 2) {
|
|
|
|
as->argc = 0;
|
2008-10-10 12:36:57 +00:00
|
|
|
ti->error = "not enough priority group arguments";
|
|
|
|
return ERR_PTR(-EINVAL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pg = alloc_priority_group();
|
|
|
|
if (!pg) {
|
2006-06-26 07:27:35 +00:00
|
|
|
ti->error = "couldn't allocate priority group";
|
2008-10-10 12:36:57 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
pg->m = m;
|
|
|
|
|
|
|
|
r = parse_path_selector(as, pg, ti);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* read the paths
|
|
|
|
*/
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
nr_args = 1 + nr_selector_args;
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < pg->nr_pgpaths; i++) {
|
|
|
|
struct pgpath *pgpath;
|
2011-08-02 11:32:04 +00:00
|
|
|
struct dm_arg_set path_args;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
if (as->argc < nr_args) {
|
2008-07-21 11:00:30 +00:00
|
|
|
ti->error = "not enough path parameters";
|
2010-08-12 03:13:49 +00:00
|
|
|
r = -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad;
|
2008-07-21 11:00:30 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
path_args.argc = nr_args;
|
2005-04-16 22:20:36 +00:00
|
|
|
path_args.argv = as->argv;
|
|
|
|
|
|
|
|
pgpath = parse_path(&path_args, &pg->ps, ti);
|
2008-10-10 12:36:57 +00:00
|
|
|
if (IS_ERR(pgpath)) {
|
|
|
|
r = PTR_ERR(pgpath);
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad;
|
2008-10-10 12:36:57 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pgpath->pg = pg;
|
|
|
|
list_add_tail(&pgpath->list, &pg->pgpaths);
|
2011-08-02 11:32:04 +00:00
|
|
|
dm_consume_args(as, nr_args);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return pg;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
free_priority_group(pg, ti);
|
2008-10-10 12:36:57 +00:00
|
|
|
return ERR_PTR(r);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int hw_argc;
|
2009-08-03 19:42:45 +00:00
|
|
|
int ret;
|
2006-10-03 08:15:33 +00:00
|
|
|
struct dm_target *ti = m->ti;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-06-22 18:32:45 +00:00
|
|
|
static const struct dm_arg _args[] = {
|
2006-06-26 07:27:35 +00:00
|
|
|
{0, 1024, "invalid number of hardware handler args"},
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!hw_argc)
|
|
|
|
return 0;
|
|
|
|
|
2018-03-05 19:10:11 +00:00
|
|
|
if (m->queue_mode == DM_TYPE_BIO_BASED) {
|
2016-05-19 20:15:14 +00:00
|
|
|
dm_consume_args(as, hw_argc);
|
|
|
|
DMERR("bio-based multipath doesn't allow hardware handler args");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
|
2016-10-28 09:04:46 +00:00
|
|
|
if (!m->hw_handler_name)
|
|
|
|
return -EINVAL;
|
2008-11-13 23:39:06 +00:00
|
|
|
|
2009-08-03 19:42:45 +00:00
|
|
|
if (hw_argc > 1) {
|
|
|
|
char *p;
|
|
|
|
int i, j, len = 4;
|
|
|
|
|
|
|
|
for (i = 0; i <= hw_argc - 2; i++)
|
|
|
|
len += strlen(as->argv[i]) + 1;
|
|
|
|
p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
|
|
|
|
if (!p) {
|
|
|
|
ti->error = "memory allocation failed";
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
j = sprintf(p, "%d", hw_argc - 1);
|
2023-01-30 20:43:57 +00:00
|
|
|
for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
|
2009-08-03 19:42:45 +00:00
|
|
|
j = sprintf(p, "%s", as->argv[i]);
|
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
dm_consume_args(as, hw_argc - 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-08-03 19:42:45 +00:00
|
|
|
fail:
|
|
|
|
kfree(m->hw_handler_name);
|
|
|
|
m->hw_handler_name = NULL;
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
static int parse_features(struct dm_arg_set *as, struct multipath *m)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int argc;
|
2006-10-03 08:15:33 +00:00
|
|
|
struct dm_target *ti = m->ti;
|
2011-08-02 11:32:04 +00:00
|
|
|
const char *arg_name;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-06-22 18:32:45 +00:00
|
|
|
static const struct dm_arg _args[] = {
|
2016-05-25 01:16:51 +00:00
|
|
|
{0, 8, "invalid number of feature args"},
|
2007-10-19 21:47:53 +00:00
|
|
|
{1, 50, "pg_init_retries must be between 1 and 50"},
|
2011-01-13 20:00:01 +00:00
|
|
|
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg_group(_args, as, &argc, &ti->error);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!argc)
|
|
|
|
return 0;
|
|
|
|
|
2007-10-19 21:47:53 +00:00
|
|
|
do {
|
2011-08-02 11:32:04 +00:00
|
|
|
arg_name = dm_shift_arg(as);
|
2007-10-19 21:47:53 +00:00
|
|
|
argc--;
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
if (!strcasecmp(arg_name, "queue_if_no_path")) {
|
2020-05-29 19:59:13 +00:00
|
|
|
r = queue_if_no_path(m, true, false, __func__);
|
2007-10-19 21:47:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-07-27 14:08:04 +00:00
|
|
|
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
|
2016-03-17 20:32:10 +00:00
|
|
|
set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
|
2012-07-27 14:08:04 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
if (!strcasecmp(arg_name, "pg_init_retries") &&
|
2007-10-19 21:47:53 +00:00
|
|
|
(argc >= 1)) {
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
|
2007-10-19 21:47:53 +00:00
|
|
|
argc--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
|
2011-01-13 20:00:01 +00:00
|
|
|
(argc >= 1)) {
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
|
2011-01-13 20:00:01 +00:00
|
|
|
argc--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
if (!strcasecmp(arg_name, "queue_mode") &&
|
|
|
|
(argc >= 1)) {
|
|
|
|
const char *queue_mode_name = dm_shift_arg(as);
|
|
|
|
|
|
|
|
if (!strcasecmp(queue_mode_name, "bio"))
|
|
|
|
m->queue_mode = DM_TYPE_BIO_BASED;
|
2018-10-11 15:06:29 +00:00
|
|
|
else if (!strcasecmp(queue_mode_name, "rq") ||
|
|
|
|
!strcasecmp(queue_mode_name, "mq"))
|
2016-05-25 01:16:51 +00:00
|
|
|
m->queue_mode = DM_TYPE_REQUEST_BASED;
|
|
|
|
else {
|
|
|
|
ti->error = "Unknown 'queue_mode' requested";
|
|
|
|
r = -EINVAL;
|
|
|
|
}
|
|
|
|
argc--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
ti->error = "Unrecognised multipath feature request";
|
2007-10-19 21:47:53 +00:00
|
|
|
r = -EINVAL;
|
|
|
|
} while (argc && !r);
|
|
|
|
|
|
|
|
return r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-08-02 11:32:04 +00:00
|
|
|
/* target arguments */
|
2017-06-22 18:32:45 +00:00
|
|
|
static const struct dm_arg _args[] = {
|
2011-03-24 13:54:33 +00:00
|
|
|
{0, 1024, "invalid number of priority groups"},
|
|
|
|
{0, 1024, "invalid initial priority group number"},
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int r;
|
|
|
|
struct multipath *m;
|
2011-08-02 11:32:04 +00:00
|
|
|
struct dm_arg_set as;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pg_count = 0;
|
|
|
|
unsigned int next_pg_num;
|
2020-01-13 22:41:27 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
as.argc = argc;
|
|
|
|
as.argv = argv;
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
m = alloc_multipath(ti);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!m) {
|
2006-06-26 07:27:35 +00:00
|
|
|
ti->error = "can't allocate multipath";
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2006-10-03 08:15:33 +00:00
|
|
|
r = parse_features(&as, m);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
r = alloc_multipath_stage2(ti, m);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2006-10-03 08:15:33 +00:00
|
|
|
r = parse_hw_handler(&as, m);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2011-03-24 13:54:33 +00:00
|
|
|
if ((!m->nr_priority_groups && next_pg_num) ||
|
|
|
|
(m->nr_priority_groups && !next_pg_num)) {
|
|
|
|
ti->error = "invalid initial priority group";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* parse the priority groups */
|
|
|
|
while (as.argc) {
|
|
|
|
struct priority_group *pg;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-03 08:15:33 +00:00
|
|
|
pg = parse_priority_group(&as, m);
|
2008-10-10 12:36:57 +00:00
|
|
|
if (IS_ERR(pg)) {
|
|
|
|
r = PTR_ERR(pg);
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
nr_valid_paths += pg->nr_pgpaths;
|
|
|
|
atomic_set(&m->nr_valid_paths, nr_valid_paths);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
list_add_tail(&pg->list, &m->priority_groups);
|
|
|
|
pg_count++;
|
|
|
|
pg->pg_num = pg_count;
|
|
|
|
if (!--next_pg_num)
|
|
|
|
m->next_pg = pg;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pg_count != m->nr_priority_groups) {
|
2006-06-26 07:27:35 +00:00
|
|
|
ti->error = "priority group count mismatch";
|
2005-04-16 22:20:36 +00:00
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:41:27 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
enable_nopath_timeout(m);
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
2013-03-01 22:45:47 +00:00
|
|
|
ti->num_flush_bios = 1;
|
|
|
|
ti->num_discard_bios = 1;
|
2017-04-05 17:21:05 +00:00
|
|
|
ti->num_write_zeroes_bios = 1;
|
2018-03-05 19:10:11 +00:00
|
|
|
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
2016-05-24 19:48:08 +00:00
|
|
|
ti->per_io_data_size = multipath_per_bio_data_size();
|
2017-01-22 17:32:46 +00:00
|
|
|
else
|
2016-01-31 17:08:36 +00:00
|
|
|
ti->per_io_data_size = sizeof(struct dm_mpath_io);
|
2009-06-22 09:12:24 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
free_multipath(m);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-03-06 02:32:13 +00:00
|
|
|
static void multipath_wait_for_pg_init_completion(struct multipath *m)
|
|
|
|
{
|
2016-08-31 22:16:43 +00:00
|
|
|
DEFINE_WAIT(wait);
|
2010-03-06 02:32:13 +00:00
|
|
|
|
|
|
|
while (1) {
|
2016-08-31 22:16:43 +00:00
|
|
|
prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
|
2010-03-06 02:32:13 +00:00
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
if (!atomic_read(&m->pg_init_in_progress))
|
2010-03-06 02:32:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
io_schedule();
|
|
|
|
}
|
2016-08-31 22:16:43 +00:00
|
|
|
finish_wait(&m->pg_init_wait, &wait);
|
2010-03-06 02:32:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void flush_multipath_work(struct multipath *m)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-12-10 20:37:21 +00:00
|
|
|
if (m->hw_handler_name) {
|
2020-08-24 18:19:55 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!atomic_read(&m->pg_init_in_progress))
|
|
|
|
goto skip;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
if (atomic_read(&m->pg_init_in_progress) &&
|
|
|
|
!test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2017-12-10 20:37:21 +00:00
|
|
|
|
2018-03-22 14:22:38 +00:00
|
|
|
flush_workqueue(kmpath_handlerd);
|
2020-08-24 18:19:55 +00:00
|
|
|
multipath_wait_for_pg_init_completion(m);
|
2017-12-10 20:37:21 +00:00
|
|
|
|
2020-08-24 18:19:55 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2017-12-10 20:37:21 +00:00
|
|
|
}
|
2020-08-24 18:19:55 +00:00
|
|
|
skip:
|
2018-03-22 14:22:38 +00:00
|
|
|
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
|
|
|
flush_work(&m->process_queued_bios);
|
2012-08-20 21:51:24 +00:00
|
|
|
flush_work(&m->trigger_event);
|
2009-12-10 23:52:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void multipath_dtr(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct multipath *m = ti->private;
|
|
|
|
|
2020-01-13 22:41:27 +00:00
|
|
|
disable_nopath_timeout(m);
|
2010-03-06 02:32:13 +00:00
|
|
|
flush_multipath_work(m);
|
2005-04-16 22:20:36 +00:00
|
|
|
free_multipath(m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take a path out of use.
|
|
|
|
*/
|
|
|
|
static int fail_path(struct pgpath *pgpath)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct multipath *m = pgpath->pg->m;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
|
2008-10-10 12:36:58 +00:00
|
|
|
if (!pgpath->is_active)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2020-06-04 14:44:34 +00:00
|
|
|
DMWARN("%s: Failing path %s.",
|
2020-09-19 17:36:58 +00:00
|
|
|
dm_table_device_name(m->ti->table),
|
2020-06-04 14:44:34 +00:00
|
|
|
pgpath->path.dev->name);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
|
2016-02-10 18:02:21 +00:00
|
|
|
pgpath->is_active = false;
|
2005-04-16 22:20:36 +00:00
|
|
|
pgpath->fail_count++;
|
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_dec(&m->nr_valid_paths);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (pgpath == m->current_pgpath)
|
|
|
|
m->current_pgpath = NULL;
|
|
|
|
|
2007-10-19 21:48:02 +00:00
|
|
|
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
|
2016-03-17 21:10:15 +00:00
|
|
|
pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
|
2007-10-19 21:48:02 +00:00
|
|
|
|
2022-04-20 05:12:26 +00:00
|
|
|
queue_work(dm_mpath_wq, &m->trigger_event);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-01-13 22:41:27 +00:00
|
|
|
enable_nopath_timeout(m);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reinstate a previously-failed path
|
|
|
|
*/
|
|
|
|
static int reinstate_path(struct pgpath *pgpath)
|
|
|
|
{
|
2014-05-26 12:45:39 +00:00
|
|
|
int r = 0, run_queue = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long flags;
|
|
|
|
struct multipath *m = pgpath->pg->m;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int nr_valid_paths;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
|
2008-10-10 12:36:58 +00:00
|
|
|
if (pgpath->is_active)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
|
|
|
|
2020-06-04 14:44:34 +00:00
|
|
|
DMWARN("%s: Reinstating path %s.",
|
2020-09-19 17:36:58 +00:00
|
|
|
dm_table_device_name(m->ti->table),
|
2020-06-04 14:44:34 +00:00
|
|
|
pgpath->path.dev->name);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
2016-02-10 18:02:21 +00:00
|
|
|
pgpath->is_active = true;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
|
|
|
|
if (nr_valid_paths == 1) {
|
2009-06-22 09:12:12 +00:00
|
|
|
m->current_pgpath = NULL;
|
2014-05-26 12:45:39 +00:00
|
|
|
run_queue = 1;
|
2009-06-22 09:12:12 +00:00
|
|
|
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
|
2011-01-13 20:00:01 +00:00
|
|
|
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
|
2016-03-17 21:10:15 +00:00
|
|
|
atomic_inc(&m->pg_init_in_progress);
|
2009-06-22 09:12:12 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-19 21:48:02 +00:00
|
|
|
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
|
2016-03-17 21:10:15 +00:00
|
|
|
pgpath->path.dev->name, nr_valid_paths);
|
2007-10-19 21:48:02 +00:00
|
|
|
|
2009-01-06 03:05:13 +00:00
|
|
|
schedule_work(&m->trigger_event);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2016-05-19 20:15:14 +00:00
|
|
|
if (run_queue) {
|
2014-05-26 12:45:39 +00:00
|
|
|
dm_table_run_md_queue_async(m->ti->table);
|
2016-09-14 14:47:03 +00:00
|
|
|
process_queued_io_list(m);
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-01-13 22:41:27 +00:00
|
|
|
if (pgpath->is_active)
|
|
|
|
disable_nopath_timeout(m);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fail or reinstate all paths that match the provided struct dm_dev.
|
|
|
|
*/
|
|
|
|
static int action_dev(struct multipath *m, struct dm_dev *dev,
|
|
|
|
action_fn action)
|
|
|
|
{
|
2011-03-24 13:54:31 +00:00
|
|
|
int r = -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct pgpath *pgpath;
|
|
|
|
struct priority_group *pg;
|
|
|
|
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
list_for_each_entry(pgpath, &pg->pgpaths, list) {
|
|
|
|
if (pgpath->path.dev == dev)
|
|
|
|
r = action(pgpath);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Temporarily try to avoid having to use the specified PG
|
|
|
|
*/
|
|
|
|
static void bypass_pg(struct multipath *m, struct priority_group *pg,
|
2016-02-10 18:02:21 +00:00
|
|
|
bool bypassed)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
|
|
|
|
pg->bypassed = bypassed;
|
|
|
|
m->current_pgpath = NULL;
|
|
|
|
m->current_pg = NULL;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
2009-01-06 03:05:13 +00:00
|
|
|
schedule_work(&m->trigger_event);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to using the specified PG from the next I/O that gets mapped
|
|
|
|
*/
|
|
|
|
static int switch_pg_num(struct multipath *m, const char *pgstr)
|
|
|
|
{
|
|
|
|
struct priority_group *pg;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pgnum;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long flags;
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-28 17:41:26 +00:00
|
|
|
char dummy;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-28 17:41:26 +00:00
|
|
|
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
|
2016-11-04 04:37:09 +00:00
|
|
|
!m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
|
2023-02-06 22:42:32 +00:00
|
|
|
DMWARN("invalid PG number supplied to %s", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
2016-02-10 18:02:21 +00:00
|
|
|
pg->bypassed = false;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (--pgnum)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
m->current_pgpath = NULL;
|
|
|
|
m->current_pg = NULL;
|
|
|
|
m->next_pg = pg;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
2009-01-06 03:05:13 +00:00
|
|
|
schedule_work(&m->trigger_event);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set/clear bypassed status of a PG.
|
|
|
|
* PGs are numbered upwards from 1 in the order they were declared.
|
|
|
|
*/
|
2016-02-10 18:02:21 +00:00
|
|
|
static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct priority_group *pg;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pgnum;
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-28 17:41:26 +00:00
|
|
|
char dummy;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-28 17:41:26 +00:00
|
|
|
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
|
2016-11-04 04:37:09 +00:00
|
|
|
!m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
DMWARN("invalid PG number supplied to bypass_pg");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
if (!--pgnum)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bypass_pg(m, pg, bypassed);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-19 21:47:53 +00:00
|
|
|
/*
|
|
|
|
* Should we retry pg_init immediately?
|
|
|
|
*/
|
2016-02-10 18:02:21 +00:00
|
|
|
static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
|
2007-10-19 21:47:53 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2016-02-10 18:02:21 +00:00
|
|
|
bool limit_reached = false;
|
2007-10-19 21:47:53 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
|
|
|
|
!test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
|
2016-03-17 20:32:10 +00:00
|
|
|
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
|
2007-10-19 21:47:53 +00:00
|
|
|
else
|
2016-02-10 18:02:21 +00:00
|
|
|
limit_reached = true;
|
2007-10-19 21:47:53 +00:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
|
|
|
|
return limit_reached;
|
|
|
|
}
|
|
|
|
|
2009-10-21 16:22:46 +00:00
|
|
|
static void pg_init_done(void *data, int errors)
|
2008-05-01 21:50:11 +00:00
|
|
|
{
|
2010-03-06 02:29:45 +00:00
|
|
|
struct pgpath *pgpath = data;
|
2008-05-01 21:50:11 +00:00
|
|
|
struct priority_group *pg = pgpath->pg;
|
|
|
|
struct multipath *m = pg->m;
|
|
|
|
unsigned long flags;
|
2016-02-10 18:02:21 +00:00
|
|
|
bool delay_retry = false;
|
2008-05-01 21:50:11 +00:00
|
|
|
|
|
|
|
/* device or driver problems */
|
|
|
|
switch (errors) {
|
|
|
|
case SCSI_DH_OK:
|
|
|
|
break;
|
|
|
|
case SCSI_DH_NOSYS:
|
|
|
|
if (!m->hw_handler_name) {
|
|
|
|
errors = 0;
|
|
|
|
break;
|
|
|
|
}
|
2010-03-06 02:29:49 +00:00
|
|
|
DMERR("Could not failover the device: Handler scsi_dh_%s "
|
|
|
|
"Error %d.", m->hw_handler_name, errors);
|
2008-05-01 21:50:11 +00:00
|
|
|
/*
|
|
|
|
* Fail path for now, so we do not ping pong
|
|
|
|
*/
|
|
|
|
fail_path(pgpath);
|
|
|
|
break;
|
|
|
|
case SCSI_DH_DEV_TEMP_BUSY:
|
|
|
|
/*
|
|
|
|
* Probably doing something like FW upgrade on the
|
|
|
|
* controller so try the other pg.
|
|
|
|
*/
|
2016-02-10 18:02:21 +00:00
|
|
|
bypass_pg(m, pg, true);
|
2008-05-01 21:50:11 +00:00
|
|
|
break;
|
|
|
|
case SCSI_DH_RETRY:
|
2011-01-13 20:00:01 +00:00
|
|
|
/* Wait before retrying. */
|
2019-12-24 06:38:00 +00:00
|
|
|
delay_retry = true;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2008-05-01 21:50:11 +00:00
|
|
|
case SCSI_DH_IMM_RETRY:
|
|
|
|
case SCSI_DH_RES_TEMP_UNAVAIL:
|
|
|
|
if (pg_init_limit_reached(m, pgpath))
|
|
|
|
fail_path(pgpath);
|
|
|
|
errors = 0;
|
|
|
|
break;
|
2016-02-20 17:49:43 +00:00
|
|
|
case SCSI_DH_DEV_OFFLINED:
|
2008-05-01 21:50:11 +00:00
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* We probably do not want to fail the path for a device
|
|
|
|
* error, but this is what the old dm did. In future
|
|
|
|
* patches we can do more advanced handling.
|
|
|
|
*/
|
|
|
|
fail_path(pgpath);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
if (errors) {
|
2009-06-22 09:12:12 +00:00
|
|
|
if (pgpath == m->current_pgpath) {
|
|
|
|
DMERR("Could not failover device. Error %d.", errors);
|
|
|
|
m->current_pgpath = NULL;
|
|
|
|
m->current_pg = NULL;
|
|
|
|
}
|
2016-03-17 20:32:10 +00:00
|
|
|
} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
|
2016-02-10 18:02:21 +00:00
|
|
|
pg->bypassed = false;
|
2008-05-01 21:50:11 +00:00
|
|
|
|
2016-03-17 21:10:15 +00:00
|
|
|
if (atomic_dec_return(&m->pg_init_in_progress) > 0)
|
2010-03-06 02:30:02 +00:00
|
|
|
/* Activations of other paths are still on going */
|
|
|
|
goto out;
|
|
|
|
|
2016-03-17 20:32:10 +00:00
|
|
|
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
|
|
|
|
if (delay_retry)
|
|
|
|
set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
|
|
|
|
else
|
|
|
|
clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
|
|
|
|
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
if (__pg_init_all_paths(m))
|
|
|
|
goto out;
|
|
|
|
}
|
2016-03-17 20:32:10 +00:00
|
|
|
clear_bit(MPATHF_QUEUE_IO, &m->flags);
|
2010-03-06 02:30:02 +00:00
|
|
|
|
2016-09-14 14:47:03 +00:00
|
|
|
process_queued_io_list(m);
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2010-03-06 02:32:13 +00:00
|
|
|
/*
|
|
|
|
* Wake up any thread waiting to suspend.
|
|
|
|
*/
|
|
|
|
wake_up(&m->pg_init_wait);
|
|
|
|
|
2010-03-06 02:30:02 +00:00
|
|
|
out:
|
2008-05-01 21:50:11 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-04-27 17:11:14 +00:00
|
|
|
static void activate_or_offline_path(struct pgpath *pgpath)
|
2008-05-01 21:50:22 +00:00
|
|
|
{
|
2016-09-01 16:06:37 +00:00
|
|
|
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
|
2008-05-01 21:50:22 +00:00
|
|
|
|
2016-09-01 16:06:37 +00:00
|
|
|
if (pgpath->is_active && !blk_queue_dying(q))
|
|
|
|
scsi_dh_activate(q, pg_init_done, pgpath);
|
2014-02-28 14:33:49 +00:00
|
|
|
else
|
|
|
|
pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
|
2008-05-01 21:50:22 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 17:11:14 +00:00
|
|
|
static void activate_path_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct pgpath *pgpath =
|
|
|
|
container_of(work, struct pgpath, activate_path.work);
|
|
|
|
|
|
|
|
activate_or_offline_path(pgpath);
|
|
|
|
}
|
|
|
|
|
2017-04-26 07:40:36 +00:00
|
|
|
static int multipath_end_io(struct dm_target *ti, struct request *clone,
|
2017-06-03 07:38:04 +00:00
|
|
|
blk_status_t error, union map_info *map_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-04-26 07:40:36 +00:00
|
|
|
struct dm_mpath_io *mpio = get_mpio(map_context);
|
|
|
|
struct pgpath *pgpath = mpio->pgpath;
|
2017-04-26 07:40:37 +00:00
|
|
|
int r = DM_ENDIO_DONE;
|
2017-04-26 07:40:36 +00:00
|
|
|
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
/*
|
|
|
|
* We don't queue any clone request inside the multipath target
|
|
|
|
* during end I/O handling, since those clone requests don't have
|
|
|
|
* bio clones. If we queue them inside the multipath target,
|
|
|
|
* we need to make bio clones, that requires memory allocation.
|
2016-05-12 20:28:10 +00:00
|
|
|
* (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
* don't have bio clones.)
|
|
|
|
* Instead of queueing the clone request here, we queue the original
|
|
|
|
* request into dm core, which will remake a clone request and
|
|
|
|
* clone bios for it and resubmit it later.
|
|
|
|
*/
|
2018-01-09 19:04:18 +00:00
|
|
|
if (error && blk_path_error(error)) {
|
2017-04-26 07:40:36 +00:00
|
|
|
struct multipath *m = ti->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-01-13 00:53:40 +00:00
|
|
|
if (error == BLK_STS_RESOURCE)
|
|
|
|
r = DM_ENDIO_DELAY_REQUEUE;
|
|
|
|
else
|
|
|
|
r = DM_ENDIO_REQUEUE;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-04-26 07:40:36 +00:00
|
|
|
if (pgpath)
|
|
|
|
fail_path(pgpath);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-10 20:51:32 +00:00
|
|
|
if (!atomic_read(&m->nr_valid_paths) &&
|
2017-12-08 03:42:27 +00:00
|
|
|
!must_push_back_rq(m)) {
|
2017-06-03 07:38:04 +00:00
|
|
|
if (error == BLK_STS_IOERR)
|
2017-05-15 15:28:37 +00:00
|
|
|
dm_report_EIO(m);
|
2017-04-26 07:40:37 +00:00
|
|
|
/* complete with the original error */
|
|
|
|
r = DM_ENDIO_DONE;
|
|
|
|
}
|
2017-04-26 07:40:36 +00:00
|
|
|
}
|
2012-03-28 17:41:25 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (pgpath) {
|
2017-04-26 07:40:36 +00:00
|
|
|
struct path_selector *ps = &pgpath->pg->ps;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ps->type->end_io)
|
2020-04-30 20:48:29 +00:00
|
|
|
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
|
|
|
|
clone->io_start_time_ns);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-04-26 07:40:37 +00:00
|
|
|
return r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 07:38:06 +00:00
|
|
|
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
|
2017-12-10 20:37:21 +00:00
|
|
|
blk_status_t *error)
|
2016-05-19 20:15:14 +00:00
|
|
|
{
|
2017-06-03 07:38:01 +00:00
|
|
|
struct multipath *m = ti->private;
|
|
|
|
struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
|
|
|
|
struct pgpath *pgpath = mpio->pgpath;
|
2016-05-19 20:15:14 +00:00
|
|
|
unsigned long flags;
|
2017-06-03 07:38:03 +00:00
|
|
|
int r = DM_ENDIO_DONE;
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2018-01-09 19:04:18 +00:00
|
|
|
if (!*error || !blk_path_error(*error))
|
2017-06-03 07:38:01 +00:00
|
|
|
goto done;
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2017-06-03 07:38:01 +00:00
|
|
|
if (pgpath)
|
|
|
|
fail_path(pgpath);
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2020-06-10 20:07:57 +00:00
|
|
|
if (!atomic_read(&m->nr_valid_paths)) {
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
|
|
|
if (__must_push_back(m)) {
|
|
|
|
r = DM_ENDIO_REQUEUE;
|
|
|
|
} else {
|
|
|
|
dm_report_EIO(m);
|
|
|
|
*error = BLK_STS_IOERR;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
goto done;
|
2017-12-08 03:42:27 +00:00
|
|
|
}
|
2020-06-10 20:07:57 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2017-05-15 15:28:37 +00:00
|
|
|
}
|
2016-05-19 20:15:14 +00:00
|
|
|
|
2020-06-11 00:03:19 +00:00
|
|
|
multipath_queue_bio(m, clone);
|
2017-06-03 07:38:03 +00:00
|
|
|
r = DM_ENDIO_INCOMPLETE;
|
2017-06-03 07:38:01 +00:00
|
|
|
done:
|
2016-05-19 20:15:14 +00:00
|
|
|
if (pgpath) {
|
2017-06-03 07:38:01 +00:00
|
|
|
struct path_selector *ps = &pgpath->pg->ps;
|
|
|
|
|
2016-05-19 20:15:14 +00:00
|
|
|
if (ps->type->end_io)
|
2020-04-30 20:48:29 +00:00
|
|
|
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
|
dm mpath: provide high-resolution timer to HST for bio-based
The precision loss of reading IO start_time with jiffies_to_nsecs
instead of using a high resolution timer degrades HST path prediction
for BIO-based mpath on high load workloads.
Below, I show the utilization percentage of a 10 disk multipath with
asymmetrical disk access cost, while being exercised by a randwrite FIO
benchmark with high submission queue depth (depth=64). It is possible
to see that the HST path selection degrades heavily for high-iops in
BIO-mpath, underutilizing the slower paths way beyond expected. This
seems to be caused by the start_time truncation, which makes some IO to
seem much slower than it actually is. In this scenario ST outperforms
HST for bio-mpath, but not for mq-mpath, which already uses ktime_get_ns().
The third column shows utilization with this patch applied. It is easy
to see that now HST prediction is much closer to the ideal distribution
(calculated considering the real cost of each path).
| | ST | HST (orig) | HST(ktime) | Best |
| sdd | 0.17 | 0.20 | 0.17 | 0.18 |
| sde | 0.17 | 0.20 | 0.17 | 0.18 |
| sdf | 0.17 | 0.20 | 0.17 | 0.18 |
| sdg | 0.06 | 0.00 | 0.06 | 0.04 |
| sdh | 0.03 | 0.00 | 0.03 | 0.02 |
| sdi | 0.03 | 0.00 | 0.03 | 0.02 |
| sdj | 0.02 | 0.00 | 0.01 | 0.01 |
| sdk | 0.02 | 0.00 | 0.01 | 0.01 |
| sdl | 0.17 | 0.20 | 0.17 | 0.18 |
| sdm | 0.17 | 0.20 | 0.17 | 0.18 |
This issue was originally discussed [1] when we first merged HST, and
this patch was left as a low hanging fruit to be solved later.
Regarding the implementation, as suggested by Mike in that mail thread,
in order to avoid the overhead of ktime_get_ns for other selectors, this
patch adds a flag for the selector code to request the high-resolution
timer.
I tested this using the same benchmark used in the original HST submission.
Full test and benchmark scripts are available here:
https://people.collabora.com/~krisman/HST-BIO-MPATH/
[1] https://lore.kernel.org/lkml/85tv0am9de.fsf@collabora.com/T/
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
[snitzer: cleaned up various implementation details]
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2022-04-27 16:57:10 +00:00
|
|
|
(mpio->start_time_ns ?:
|
|
|
|
dm_start_time_ns_from_clone(clone)));
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 07:38:03 +00:00
|
|
|
return r;
|
2016-05-19 20:15:14 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2020-05-27 20:32:51 +00:00
|
|
|
* Suspend with flush can't complete until all the I/O is processed
|
|
|
|
* so if the last path fails we must error any remaining I/O.
|
|
|
|
* - Note that if the freeze_bdev fails while suspending, the
|
|
|
|
* queue_if_no_path state is lost - userspace should reset it.
|
|
|
|
* Otherwise, during noflush suspend, queue_if_no_path will not change.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static void multipath_presuspend(struct dm_target *ti)
|
|
|
|
{
|
2016-02-03 02:53:15 +00:00
|
|
|
struct multipath *m = ti->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-27 20:32:51 +00:00
|
|
|
/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
|
|
|
|
if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
|
2020-05-29 19:59:13 +00:00
|
|
|
queue_if_no_path(m, false, true, __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-10 23:52:19 +00:00
|
|
|
static void multipath_postsuspend(struct dm_target *ti)
|
|
|
|
{
|
2009-12-10 23:52:21 +00:00
|
|
|
struct multipath *m = ti->private;
|
|
|
|
|
|
|
|
mutex_lock(&m->work_mutex);
|
2010-03-06 02:32:13 +00:00
|
|
|
flush_multipath_work(m);
|
2009-12-10 23:52:21 +00:00
|
|
|
mutex_unlock(&m->work_mutex);
|
2009-12-10 23:52:19 +00:00
|
|
|
}
|
|
|
|
|
2005-07-12 22:53:03 +00:00
|
|
|
/*
|
|
|
|
* Restore the queue_if_no_path setting.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
static void multipath_resume(struct dm_target *ti)
|
|
|
|
{
|
2016-02-03 02:53:15 +00:00
|
|
|
struct multipath *m = ti->private;
|
2016-07-26 01:08:51 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-07-26 01:08:51 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2020-05-27 20:32:51 +00:00
|
|
|
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
|
|
|
|
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
|
|
|
|
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
|
|
|
|
}
|
2020-05-29 19:59:13 +00:00
|
|
|
|
|
|
|
DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
|
2020-09-19 17:36:58 +00:00
|
|
|
dm_table_device_name(m->ti->table), __func__,
|
2020-05-29 19:59:13 +00:00
|
|
|
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
|
|
|
|
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
|
|
|
|
|
2016-07-26 01:08:51 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Info output has the following format:
|
|
|
|
* num_multipath_feature_args [multipath_feature_args]*
|
|
|
|
* num_handler_status_args [handler_status_args]*
|
|
|
|
* num_groups init_group_number
|
|
|
|
* [A|D|E num_ps_status_args [ps_status_args]*
|
|
|
|
* num_paths num_selector_args
|
|
|
|
* [path_dev A|F fail_count [selector_args]* ]+ ]+
|
|
|
|
*
|
|
|
|
* Table output has the following format (identical to the constructor string):
|
|
|
|
* num_feature_args [features_args]*
|
|
|
|
* num_handler_args hw_handler [hw_handler_args]*
|
|
|
|
* num_groups init_group_number
|
|
|
|
* [priority selector-name num_ps_args [ps_args]*
|
|
|
|
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
|
|
|
|
*/
|
2013-03-01 22:45:44 +00:00
|
|
|
static void multipath_status(struct dm_target *ti, status_type_t type,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int status_flags, char *result, unsigned int maxlen)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-08-13 21:38:00 +00:00
|
|
|
int sz = 0, pg_counter, pgpath_counter;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long flags;
|
2016-02-03 02:53:15 +00:00
|
|
|
struct multipath *m = ti->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct priority_group *pg;
|
|
|
|
struct pgpath *p;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pg_num;
|
2005-04-16 22:20:36 +00:00
|
|
|
char state;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
|
|
|
|
/* Features */
|
|
|
|
if (type == STATUSTYPE_INFO)
|
2016-03-17 21:10:15 +00:00
|
|
|
DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
|
|
|
|
atomic_read(&m->pg_init_count));
|
2007-10-19 21:47:53 +00:00
|
|
|
else {
|
2016-03-17 20:32:10 +00:00
|
|
|
DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
|
2011-01-13 20:00:01 +00:00
|
|
|
(m->pg_init_retries > 0) * 2 +
|
2012-07-27 14:08:04 +00:00
|
|
|
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
|
2016-05-25 01:16:51 +00:00
|
|
|
test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
|
|
|
|
(m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
|
|
|
|
|
2016-03-17 20:32:10 +00:00
|
|
|
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
|
2007-10-19 21:47:53 +00:00
|
|
|
DMEMIT("queue_if_no_path ");
|
|
|
|
if (m->pg_init_retries)
|
|
|
|
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
|
2011-01-13 20:00:01 +00:00
|
|
|
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
|
|
|
|
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
|
2016-03-17 20:32:10 +00:00
|
|
|
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
|
2012-07-27 14:08:04 +00:00
|
|
|
DMEMIT("retain_attached_hw_handler ");
|
2016-05-25 01:16:51 +00:00
|
|
|
if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
|
2023-01-25 21:57:42 +00:00
|
|
|
switch (m->queue_mode) {
|
2016-05-25 01:16:51 +00:00
|
|
|
case DM_TYPE_BIO_BASED:
|
|
|
|
DMEMIT("queue_mode bio ");
|
|
|
|
break;
|
2017-04-27 17:11:23 +00:00
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(true);
|
|
|
|
break;
|
2016-05-25 01:16:51 +00:00
|
|
|
}
|
|
|
|
}
|
2007-10-19 21:47:53 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-01 21:50:11 +00:00
|
|
|
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
|
2005-04-16 22:20:36 +00:00
|
|
|
DMEMIT("0 ");
|
|
|
|
else
|
2008-05-01 21:50:11 +00:00
|
|
|
DMEMIT("1 %s ", m->hw_handler_name);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
DMEMIT("%u ", m->nr_priority_groups);
|
|
|
|
|
|
|
|
if (m->next_pg)
|
|
|
|
pg_num = m->next_pg->pg_num;
|
|
|
|
else if (m->current_pg)
|
|
|
|
pg_num = m->current_pg->pg_num;
|
|
|
|
else
|
2011-03-24 13:54:33 +00:00
|
|
|
pg_num = (m->nr_priority_groups ? 1 : 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
DMEMIT("%u ", pg_num);
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case STATUSTYPE_INFO:
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
if (pg->bypassed)
|
|
|
|
state = 'D'; /* Disabled */
|
|
|
|
else if (pg == m->current_pg)
|
|
|
|
state = 'A'; /* Currently Active */
|
|
|
|
else
|
|
|
|
state = 'E'; /* Enabled */
|
|
|
|
|
|
|
|
DMEMIT("%c ", state);
|
|
|
|
|
|
|
|
if (pg->ps.type->status)
|
|
|
|
sz += pg->ps.type->status(&pg->ps, NULL, type,
|
|
|
|
result + sz,
|
|
|
|
maxlen - sz);
|
|
|
|
else
|
|
|
|
DMEMIT("0 ");
|
|
|
|
|
|
|
|
DMEMIT("%u %u ", pg->nr_pgpaths,
|
|
|
|
pg->ps.type->info_args);
|
|
|
|
|
|
|
|
list_for_each_entry(p, &pg->pgpaths, list) {
|
|
|
|
DMEMIT("%s %s %u ", p->path.dev->name,
|
2008-10-10 12:36:58 +00:00
|
|
|
p->is_active ? "A" : "F",
|
2005-04-16 22:20:36 +00:00
|
|
|
p->fail_count);
|
|
|
|
if (pg->ps.type->status)
|
|
|
|
sz += pg->ps.type->status(&pg->ps,
|
|
|
|
&p->path, type, result + sz,
|
|
|
|
maxlen - sz);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STATUSTYPE_TABLE:
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
DMEMIT("%s ", pg->ps.type->name);
|
|
|
|
|
|
|
|
if (pg->ps.type->status)
|
|
|
|
sz += pg->ps.type->status(&pg->ps, NULL, type,
|
|
|
|
result + sz,
|
|
|
|
maxlen - sz);
|
|
|
|
else
|
|
|
|
DMEMIT("0 ");
|
|
|
|
|
|
|
|
DMEMIT("%u %u ", pg->nr_pgpaths,
|
|
|
|
pg->ps.type->table_args);
|
|
|
|
|
|
|
|
list_for_each_entry(p, &pg->pgpaths, list) {
|
|
|
|
DMEMIT("%s ", p->path.dev->name);
|
|
|
|
if (pg->ps.type->status)
|
|
|
|
sz += pg->ps.type->status(&pg->ps,
|
|
|
|
&p->path, type, result + sz,
|
|
|
|
maxlen - sz);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2021-07-13 00:49:03 +00:00
|
|
|
|
|
|
|
case STATUSTYPE_IMA:
|
2021-08-13 21:38:00 +00:00
|
|
|
sz = 0; /*reset the result pointer*/
|
|
|
|
|
2021-07-13 00:49:03 +00:00
|
|
|
DMEMIT_TARGET_NAME_VERSION(ti->type);
|
2021-08-13 21:38:00 +00:00
|
|
|
DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
|
|
|
|
|
|
|
|
pg_counter = 0;
|
2021-07-13 00:49:03 +00:00
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
if (pg->bypassed)
|
|
|
|
state = 'D'; /* Disabled */
|
|
|
|
else if (pg == m->current_pg)
|
|
|
|
state = 'A'; /* Currently Active */
|
|
|
|
else
|
|
|
|
state = 'E'; /* Enabled */
|
2021-08-13 21:38:00 +00:00
|
|
|
DMEMIT(",pg_state_%d=%c", pg_counter, state);
|
|
|
|
DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
|
|
|
|
DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
|
2021-07-13 00:49:03 +00:00
|
|
|
|
2021-08-13 21:38:00 +00:00
|
|
|
pgpath_counter = 0;
|
2021-07-13 00:49:03 +00:00
|
|
|
list_for_each_entry(p, &pg->pgpaths, list) {
|
2021-08-13 21:38:00 +00:00
|
|
|
DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
|
|
|
|
pg_counter, pgpath_counter, p->path.dev->name,
|
|
|
|
pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
|
|
|
|
pg_counter, pgpath_counter, p->fail_count);
|
2021-07-13 00:49:03 +00:00
|
|
|
if (pg->ps.type->status) {
|
2021-08-13 21:38:00 +00:00
|
|
|
DMEMIT(",path_selector_status_%d_%d=",
|
|
|
|
pg_counter, pgpath_counter);
|
2021-07-13 00:49:03 +00:00
|
|
|
sz += pg->ps.type->status(&pg->ps, &p->path,
|
|
|
|
type, result + sz,
|
|
|
|
maxlen - sz);
|
|
|
|
}
|
2021-08-13 21:38:00 +00:00
|
|
|
pgpath_counter++;
|
2021-07-13 00:49:03 +00:00
|
|
|
}
|
2021-08-13 21:38:00 +00:00
|
|
|
pg_counter++;
|
2021-07-13 00:49:03 +00:00
|
|
|
}
|
|
|
|
DMEMIT(";");
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
|
|
|
|
char *result, unsigned int maxlen)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-12-10 23:52:21 +00:00
|
|
|
int r = -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct dm_dev *dev;
|
2016-02-03 02:53:15 +00:00
|
|
|
struct multipath *m = ti->private;
|
2005-04-16 22:20:36 +00:00
|
|
|
action_fn action;
|
2020-01-13 22:41:27 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-12-10 23:52:21 +00:00
|
|
|
mutex_lock(&m->work_mutex);
|
|
|
|
|
2009-12-10 23:52:27 +00:00
|
|
|
if (dm_suspended(ti)) {
|
|
|
|
r = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (argc == 1) {
|
2011-08-02 11:32:04 +00:00
|
|
|
if (!strcasecmp(argv[0], "queue_if_no_path")) {
|
2020-05-29 19:59:13 +00:00
|
|
|
r = queue_if_no_path(m, true, false, __func__);
|
2020-01-13 22:41:27 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
enable_nopath_timeout(m);
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
2011-08-02 11:32:04 +00:00
|
|
|
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
|
2020-05-29 19:59:13 +00:00
|
|
|
r = queue_if_no_path(m, false, false, __func__);
|
2020-01-13 22:41:27 +00:00
|
|
|
disable_nopath_timeout(m);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-10 23:52:21 +00:00
|
|
|
if (argc != 2) {
|
2014-01-29 16:52:45 +00:00
|
|
|
DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
if (!strcasecmp(argv[0], "disable_group")) {
|
2016-02-10 18:02:21 +00:00
|
|
|
r = bypass_pg_num(m, argv[1], true);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
2011-08-02 11:32:04 +00:00
|
|
|
} else if (!strcasecmp(argv[0], "enable_group")) {
|
2016-02-10 18:02:21 +00:00
|
|
|
r = bypass_pg_num(m, argv[1], false);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
2011-08-02 11:32:04 +00:00
|
|
|
} else if (!strcasecmp(argv[0], "switch_group")) {
|
2009-12-10 23:52:21 +00:00
|
|
|
r = switch_pg_num(m, argv[1]);
|
|
|
|
goto out;
|
2011-08-02 11:32:04 +00:00
|
|
|
} else if (!strcasecmp(argv[0], "reinstate_path"))
|
2005-04-16 22:20:36 +00:00
|
|
|
action = reinstate_path;
|
2011-08-02 11:32:04 +00:00
|
|
|
else if (!strcasecmp(argv[0], "fail_path"))
|
2005-04-16 22:20:36 +00:00
|
|
|
action = fail_path;
|
2009-12-10 23:52:21 +00:00
|
|
|
else {
|
2014-01-29 16:52:45 +00:00
|
|
|
DMWARN("Unrecognised multipath message received: %s", argv[0]);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-03-06 02:32:27 +00:00
|
|
|
r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r) {
|
2006-06-26 07:27:35 +00:00
|
|
|
DMWARN("message: error getting device %s",
|
2005-04-16 22:20:36 +00:00
|
|
|
argv[1]);
|
2009-12-10 23:52:21 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
r = action_dev(m, dev, action);
|
|
|
|
|
|
|
|
dm_put_device(ti, dev);
|
|
|
|
|
2009-12-10 23:52:21 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&m->work_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-10-15 12:10:50 +00:00
|
|
|
static int multipath_prepare_ioctl(struct dm_target *ti,
|
2018-04-03 20:54:10 +00:00
|
|
|
struct block_device **bdev)
|
2006-10-03 08:15:20 +00:00
|
|
|
{
|
2012-06-02 23:29:58 +00:00
|
|
|
struct multipath *m = ti->private;
|
2020-06-11 01:25:39 +00:00
|
|
|
struct pgpath *pgpath;
|
2020-06-10 19:02:37 +00:00
|
|
|
unsigned long flags;
|
2012-06-02 23:29:58 +00:00
|
|
|
int r;
|
|
|
|
|
2020-06-11 01:25:39 +00:00
|
|
|
pgpath = READ_ONCE(m->current_pgpath);
|
2020-06-15 15:31:04 +00:00
|
|
|
if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
|
2020-06-11 01:25:39 +00:00
|
|
|
pgpath = choose_pgpath(m, 0);
|
2006-10-03 08:15:20 +00:00
|
|
|
|
2020-06-11 01:25:39 +00:00
|
|
|
if (pgpath) {
|
2020-06-15 15:31:04 +00:00
|
|
|
if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
|
2020-06-11 01:25:39 +00:00
|
|
|
*bdev = pgpath->path.dev->bdev;
|
2015-11-17 09:36:56 +00:00
|
|
|
r = 0;
|
|
|
|
} else {
|
|
|
|
/* pg_init has not started or completed */
|
|
|
|
r = -ENOTCONN;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* No path is available */
|
2020-06-10 20:07:57 +00:00
|
|
|
r = -EIO;
|
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2016-03-17 20:32:10 +00:00
|
|
|
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
|
2015-11-17 09:36:56 +00:00
|
|
|
r = -ENOTCONN;
|
2020-06-10 20:07:57 +00:00
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2006-10-03 08:15:22 +00:00
|
|
|
}
|
2006-10-03 08:15:20 +00:00
|
|
|
|
2015-11-17 09:39:26 +00:00
|
|
|
if (r == -ENOTCONN) {
|
2017-10-24 10:22:48 +00:00
|
|
|
if (!READ_ONCE(m->current_pg)) {
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
/* Path status changed, redo selection */
|
2016-03-17 22:38:17 +00:00
|
|
|
(void) choose_pgpath(m, 0);
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
}
|
2020-06-10 19:02:37 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
2016-03-17 20:32:10 +00:00
|
|
|
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
|
2020-06-10 19:02:37 +00:00
|
|
|
(void) __pg_init_all_paths(m);
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
2014-05-26 12:45:39 +00:00
|
|
|
dm_table_run_md_queue_async(m->ti->table);
|
2016-09-14 14:47:03 +00:00
|
|
|
process_queued_io_list(m);
|
dm mpath: remove process_queued_ios()
process_queued_ios() has served 3 functions:
1) select pg and pgpath if none is selected
2) start pg_init if requested
3) dispatch queued IOs when pg is ready
Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).
Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.
If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().
If pg_init is requested, we have to make sure the request is detected
and pg_init will be started. pg_init is requested in 3 places:
a) __choose_pgpath() in map_io()
b) __choose_pgpath() in multipath_ioctl()
c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).
So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
2014-02-28 14:33:45 +00:00
|
|
|
}
|
2012-06-02 23:29:58 +00:00
|
|
|
|
2015-10-15 12:10:50 +00:00
|
|
|
/*
|
|
|
|
* Only pass ioctls through if the device sizes match exactly.
|
|
|
|
*/
|
2021-10-18 10:11:05 +00:00
|
|
|
if (!r && ti->len != bdev_nr_sectors((*bdev)))
|
2015-10-15 12:10:50 +00:00
|
|
|
return 1;
|
|
|
|
return r;
|
2006-10-03 08:15:20 +00:00
|
|
|
}
|
|
|
|
|
2009-06-22 09:12:33 +00:00
|
|
|
static int multipath_iterate_devices(struct dm_target *ti,
|
|
|
|
iterate_devices_callout_fn fn, void *data)
|
|
|
|
{
|
|
|
|
struct multipath *m = ti->private;
|
|
|
|
struct priority_group *pg;
|
|
|
|
struct pgpath *p;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
list_for_each_entry(p, &pg->pgpaths, list) {
|
2009-07-23 19:30:42 +00:00
|
|
|
ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
|
2009-06-22 09:12:33 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-02-12 02:42:28 +00:00
|
|
|
static int pgpath_busy(struct pgpath *pgpath)
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
{
|
|
|
|
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
|
|
|
|
|
2015-02-23 21:36:41 +00:00
|
|
|
return blk_lld_busy(q);
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We return "busy", only when we can map I/Os but underlying devices
|
|
|
|
* are busy (so even if we map I/Os now, the I/Os will wait on
|
|
|
|
* the underlying queue).
|
|
|
|
* In other words, if we want to kill I/Os or queue them inside us
|
|
|
|
* due to map unavailability, we don't return "busy". Otherwise,
|
|
|
|
* dm core won't give us the I/Os and we can't do what we want.
|
|
|
|
*/
|
|
|
|
static int multipath_busy(struct dm_target *ti)
|
|
|
|
{
|
2016-02-10 18:02:21 +00:00
|
|
|
bool busy = false, has_active = false;
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
struct multipath *m = ti->private;
|
2016-03-17 22:38:17 +00:00
|
|
|
struct priority_group *pg, *next_pg;
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
struct pgpath *pgpath;
|
|
|
|
|
2016-09-09 23:26:19 +00:00
|
|
|
/* pg_init in progress */
|
|
|
|
if (atomic_read(&m->pg_init_in_progress))
|
2016-03-17 22:38:17 +00:00
|
|
|
return true;
|
|
|
|
|
2016-09-09 23:26:19 +00:00
|
|
|
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
|
2020-06-10 20:07:57 +00:00
|
|
|
if (!atomic_read(&m->nr_valid_paths)) {
|
|
|
|
unsigned long flags;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-06-10 20:07:57 +00:00
|
|
|
spin_lock_irqsave(&m->lock, flags);
|
|
|
|
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
return (m->queue_mode != DM_TYPE_REQUEST_BASED);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&m->lock, flags);
|
|
|
|
}
|
2016-09-09 23:26:19 +00:00
|
|
|
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
/* Guess which priority_group will be used at next mapping time */
|
2017-10-24 10:22:48 +00:00
|
|
|
pg = READ_ONCE(m->current_pg);
|
|
|
|
next_pg = READ_ONCE(m->next_pg);
|
|
|
|
if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
|
2016-03-17 22:38:17 +00:00
|
|
|
pg = next_pg;
|
|
|
|
|
|
|
|
if (!pg) {
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
/*
|
|
|
|
* We don't know which pg will be used at next mapping time.
|
2016-03-17 22:38:17 +00:00
|
|
|
* We don't call choose_pgpath() here to avoid to trigger
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
* pg_init just by busy checking.
|
|
|
|
* So we don't know whether underlying devices we will be using
|
|
|
|
* at next mapping time are busy or not. Just try mapping.
|
|
|
|
*/
|
2016-03-17 22:38:17 +00:00
|
|
|
return busy;
|
|
|
|
}
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is one non-busy active path at least, the path selector
|
|
|
|
* will be able to select it. So we consider such a pg as not busy.
|
|
|
|
*/
|
2016-02-10 18:02:21 +00:00
|
|
|
busy = true;
|
2016-03-17 22:38:17 +00:00
|
|
|
list_for_each_entry(pgpath, &pg->pgpaths, list) {
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
if (pgpath->is_active) {
|
2016-02-10 18:02:21 +00:00
|
|
|
has_active = true;
|
2016-02-12 02:42:28 +00:00
|
|
|
if (!pgpath_busy(pgpath)) {
|
2016-02-10 18:02:21 +00:00
|
|
|
busy = false;
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-03-17 22:38:17 +00:00
|
|
|
}
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
|
2016-03-17 22:38:17 +00:00
|
|
|
if (!has_active) {
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
/*
|
|
|
|
* No active path in this pg, so this pg won't be used and
|
|
|
|
* the current_pg will be changed at next mapping time.
|
|
|
|
* We need to try mapping to determine it.
|
|
|
|
*/
|
2016-02-10 18:02:21 +00:00
|
|
|
busy = false;
|
2016-03-17 22:38:17 +00:00
|
|
|
}
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
|
|
|
|
return busy;
|
|
|
|
}
|
|
|
|
|
2023-01-26 14:48:30 +00:00
|
|
|
/*
|
|
|
|
*---------------------------------------------------------------
|
2005-04-16 22:20:36 +00:00
|
|
|
* Module setup
|
2023-01-26 14:48:30 +00:00
|
|
|
*---------------------------------------------------------------
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct target_type multipath_target = {
|
|
|
|
.name = "multipath",
|
2020-02-27 19:25:31 +00:00
|
|
|
.version = {1, 14, 0},
|
2018-03-14 14:33:06 +00:00
|
|
|
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
|
|
|
|
DM_TARGET_PASSES_INTEGRITY,
|
2005-04-16 22:20:36 +00:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
.ctr = multipath_ctr,
|
|
|
|
.dtr = multipath_dtr,
|
2014-12-18 02:08:12 +00:00
|
|
|
.clone_and_map_rq = multipath_clone_and_map,
|
|
|
|
.release_clone_rq = multipath_release_clone,
|
dm mpath: change to be request based
This patch converts dm-multipath target to request-based from bio-based.
Basically, the patch just converts the I/O unit from struct bio
to struct request.
In the course of the conversion, it also changes the I/O queueing
mechanism. The change in the I/O queueing is described in details
as follows.
I/O queueing mechanism change
-----------------------------
In I/O submission, map_io(), there is no mechanism change from
bio-based, since the clone request is ready for retry as it is.
However, in I/O complition, do_end_io(), there is a mechanism change
from bio-based, since the clone request is not ready for retry.
In do_end_io() of bio-based, the clone bio has all needed memory
for resubmission. So the target driver can queue it and resubmit
it later without memory allocations.
The mechanism has almost no overhead.
On the other hand, in do_end_io() of request-based, the clone request
doesn't have clone bios, so the target driver can't resubmit it
as it is. To resubmit the clone request, memory allocation for
clone bios is needed, and it takes some overheads.
To avoid the overheads just for queueing, the target driver doesn't
queue the clone request inside itself.
Instead, the target driver asks dm core for queueing and remapping
the original request of the clone request, since the overhead for
queueing is just a freeing memory for the clone request.
As a result, the target driver doesn't need to record/restore
the information of the original request for resubmitting
the clone request. So dm_bio_details in dm_mpath_io is removed.
multipath_busy()
---------------------
The target driver returns "busy", only when the following case:
o The target driver will map I/Os, if map() function is called
and
o The mapped I/Os will wait on underlying device's queue due to
their congestions, if map() function is called now.
In other cases, the target driver doesn't return "busy".
Otherwise, dm core will keep the I/Os and the target driver can't
do what it wants.
(e.g. the target driver can't map I/Os now, so wants to kill I/Os.)
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:37 +00:00
|
|
|
.rq_end_io = multipath_end_io,
|
2016-05-19 20:15:14 +00:00
|
|
|
.map = multipath_map_bio,
|
|
|
|
.end_io = multipath_end_io_bio,
|
|
|
|
.presuspend = multipath_presuspend,
|
|
|
|
.postsuspend = multipath_postsuspend,
|
|
|
|
.resume = multipath_resume,
|
|
|
|
.status = multipath_status,
|
|
|
|
.message = multipath_message,
|
|
|
|
.prepare_ioctl = multipath_prepare_ioctl,
|
|
|
|
.iterate_devices = multipath_iterate_devices,
|
|
|
|
.busy = multipath_busy,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int __init dm_multipath_init(void)
|
|
|
|
{
|
2022-04-20 05:12:26 +00:00
|
|
|
int r = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-01-13 19:59:57 +00:00
|
|
|
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
|
2005-05-05 23:16:07 +00:00
|
|
|
if (!kmultipathd) {
|
2007-07-12 16:27:01 +00:00
|
|
|
DMERR("failed to create workqueue kmpathd");
|
2015-01-11 11:45:23 +00:00
|
|
|
goto bad_alloc_kmultipathd;
|
2005-05-05 23:16:07 +00:00
|
|
|
}
|
|
|
|
|
2008-05-01 21:50:22 +00:00
|
|
|
/*
|
|
|
|
* A separate workqueue is used to handle the device handlers
|
|
|
|
* to avoid overloading existing workqueue. Overloading the
|
|
|
|
* old workqueue would also create a bottleneck in the
|
|
|
|
* path of the storage hardware device activation.
|
|
|
|
*/
|
2011-01-13 19:59:57 +00:00
|
|
|
kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
|
|
|
|
WQ_MEM_RECLAIM);
|
2008-05-01 21:50:22 +00:00
|
|
|
if (!kmpath_handlerd) {
|
|
|
|
DMERR("failed to create workqueue kmpath_handlerd");
|
2015-01-11 11:45:23 +00:00
|
|
|
goto bad_alloc_kmpath_handlerd;
|
2008-05-01 21:50:22 +00:00
|
|
|
}
|
|
|
|
|
2022-04-20 05:12:26 +00:00
|
|
|
dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
|
|
|
|
if (!dm_mpath_wq) {
|
|
|
|
DMERR("failed to create workqueue dm_mpath_wq");
|
|
|
|
goto bad_alloc_dm_mpath_wq;
|
|
|
|
}
|
|
|
|
|
2017-11-24 17:43:50 +00:00
|
|
|
r = dm_register_target(&multipath_target);
|
2023-03-18 13:16:33 +00:00
|
|
|
if (r < 0)
|
2017-11-24 17:43:50 +00:00
|
|
|
goto bad_register_target;
|
|
|
|
|
2015-01-11 11:45:23 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-11-24 17:43:50 +00:00
|
|
|
bad_register_target:
|
2022-04-20 05:12:26 +00:00
|
|
|
destroy_workqueue(dm_mpath_wq);
|
|
|
|
bad_alloc_dm_mpath_wq:
|
2017-11-24 17:43:50 +00:00
|
|
|
destroy_workqueue(kmpath_handlerd);
|
2015-01-11 11:45:23 +00:00
|
|
|
bad_alloc_kmpath_handlerd:
|
|
|
|
destroy_workqueue(kmultipathd);
|
|
|
|
bad_alloc_kmultipathd:
|
2005-04-16 22:20:36 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dm_multipath_exit(void)
|
|
|
|
{
|
2022-04-20 05:12:26 +00:00
|
|
|
destroy_workqueue(dm_mpath_wq);
|
2008-05-01 21:50:22 +00:00
|
|
|
destroy_workqueue(kmpath_handlerd);
|
2005-05-05 23:16:07 +00:00
|
|
|
destroy_workqueue(kmultipathd);
|
|
|
|
|
2009-01-06 03:04:58 +00:00
|
|
|
dm_unregister_target(&multipath_target);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(dm_multipath_init);
|
|
|
|
module_exit(dm_multipath_exit);
|
|
|
|
|
2023-02-06 22:58:05 +00:00
|
|
|
module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
|
2020-01-13 22:41:27 +00:00
|
|
|
MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_DESCRIPTION(DM_NAME " multipath target");
|
2024-02-07 20:51:24 +00:00
|
|
|
MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_LICENSE("GPL");
|