forked from Minki/linux
e83068a5fa
Allow a user to specify an optional feature 'queue_mode <mode>' where <mode> may be "bio", "rq" or "mq" -- which corresponds to bio-based, request_fn rq-based, and blk-mq rq-based respectively. If the queue_mode feature isn't specified the default for the "multipath" target is still "rq" but if dm_mod.use_blk_mq is set to Y it'll default to mode "mq". This new queue_mode feature introduces the ability for each multipath device to have its own queue_mode (whereas before this feature all multipath devices effectively had to have the same queue_mode). This commit also goes a long way to eliminate the awkward (ab)use of DM_TYPE_*, the associated filter_md_type() and other relatively fragile and difficult to maintain code. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
65 lines
1.6 KiB
C
65 lines
1.6 KiB
C
/*
|
|
* Internal header file for device mapper
|
|
*
|
|
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This file is released under the LGPL.
|
|
*/
|
|
|
|
#ifndef DM_RQ_INTERNAL_H
|
|
#define DM_RQ_INTERNAL_H
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/kthread.h>
|
|
|
|
#include "dm-stats.h"
|
|
|
|
struct mapped_device;
|
|
|
|
/*
|
|
* One of these is allocated per request.
|
|
*/
|
|
struct dm_rq_target_io {
|
|
struct mapped_device *md;
|
|
struct dm_target *ti;
|
|
struct request *orig, *clone;
|
|
struct kthread_work work;
|
|
int error;
|
|
union map_info info;
|
|
struct dm_stats_aux stats_aux;
|
|
unsigned long duration_jiffies;
|
|
unsigned n_sectors;
|
|
};
|
|
|
|
/*
|
|
* For request-based dm - the bio clones we allocate are embedded in these
|
|
* structs.
|
|
*
|
|
* We allocate these with bio_alloc_bioset, using the front_pad parameter when
|
|
* the bioset is created - this means the bio has to come at the end of the
|
|
* struct.
|
|
*/
|
|
struct dm_rq_clone_bio_info {
|
|
struct bio *orig;
|
|
struct dm_rq_target_io *tio;
|
|
struct bio clone;
|
|
};
|
|
|
|
bool dm_use_blk_mq_default(void);
|
|
bool dm_use_blk_mq(struct mapped_device *md);
|
|
|
|
int dm_old_init_request_queue(struct mapped_device *md);
|
|
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
|
|
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
|
|
|
|
void dm_start_queue(struct request_queue *q);
|
|
void dm_stop_queue(struct request_queue *q);
|
|
|
|
unsigned dm_get_reserved_rq_based_ios(void);
|
|
|
|
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
|
|
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
|
|
const char *buf, size_t count);
|
|
|
|
#endif
|