linux/drivers/md/dm-stripe.c

459 lines
10 KiB
C
Raw Normal View History

/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/log2.h>
#define DM_MSG_PREFIX "striped"
#define DM_IO_ERROR_THRESHOLD 15
struct stripe {
struct dm_dev *dev;
sector_t physical_start;
atomic_t error_count;
};
struct stripe_c {
uint32_t stripes;
int stripes_shift;
/* The size of this target / num. stripes */
sector_t stripe_width;
uint32_t chunk_size;
int chunk_size_shift;
/* Needed for handling events */
struct dm_target *ti;
/* Work struct used for triggering events*/
struct work_struct trigger_event;
struct stripe stripe[0];
};
/*
* An event is triggered whenever a drive
* drops out of a stripe volume.
*/
static void trigger_event(struct work_struct *work)
{
struct stripe_c *sc = container_of(work, struct stripe_c,
trigger_event);
dm_table_event(sc->ti->table);
}
static inline struct stripe_c *alloc_context(unsigned int stripes)
{
size_t len;
if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
stripes))
return NULL;
len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
return kmalloc(len, GFP_KERNEL);
}
/*
* Parse a single <dev> <sector> pair
*/
static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
unsigned int stripe, char **argv)
{
unsigned long long start;
char dummy;
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
return -EINVAL;
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&sc->stripe[stripe].dev))
return -ENXIO;
sc->stripe[stripe].physical_start = start;
return 0;
}
/*
* Construct a striped mapping.
* <number of stripes> <chunk size> [<dev_path> <offset>]+
*/
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct stripe_c *sc;
sector_t width;
uint32_t stripes;
uint32_t chunk_size;
int r;
unsigned int i;
if (argc < 2) {
ti->error = "Not enough arguments";
return -EINVAL;
}
if (kstrtouint(argv[0], 10, &stripes) || !stripes) {
ti->error = "Invalid stripe count";
return -EINVAL;
}
if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
ti->error = "Invalid chunk_size";
return -EINVAL;
}
width = ti->len;
if (sector_div(width, chunk_size)) {
ti->error = "Target length not divisible by "
"chunk size";
return -EINVAL;
}
if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
"number of stripes";
return -EINVAL;
}
/*
* Do we have enough arguments for that many stripes ?
*/
if (argc != (2 + 2 * stripes)) {
ti->error = "Not enough destinations "
"specified";
return -EINVAL;
}
sc = alloc_context(stripes);
if (!sc) {
ti->error = "Memory allocation for striped context "
"failed";
return -ENOMEM;
}
INIT_WORK(&sc->trigger_event, trigger_event);
/* Set pointer to dm target; used in trigger_event */
sc->ti = ti;
sc->stripes = stripes;
sc->stripe_width = width;
if (stripes & (stripes - 1))
sc->stripes_shift = -1;
else
sc->stripes_shift = __ffs(stripes);
r = dm_set_target_max_io_len(ti, chunk_size);
if (r)
return r;
ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes;
ti->num_write_same_requests = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
sc->chunk_size_shift = -1;
else
sc->chunk_size_shift = __ffs(chunk_size);
/*
* Get the stripe destinations.
*/
for (i = 0; i < stripes; i++) {
argv += 2;
r = get_stripe(ti, sc, i, argv);
if (r < 0) {
ti->error = "Couldn't parse stripe destination";
while (i--)
dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
return r;
}
atomic_set(&(sc->stripe[i].error_count), 0);
}
ti->private = sc;
return 0;
}
static void stripe_dtr(struct dm_target *ti)
{
unsigned int i;
struct stripe_c *sc = (struct stripe_c *) ti->private;
for (i = 0; i < sc->stripes; i++)
dm_put_device(ti, sc->stripe[i].dev);
workqueue: deprecate flush[_delayed]_work_sync() flush[_delayed]_work_sync() are now spurious. Mark them deprecated and convert all users to flush[_delayed]_work(). If you're cc'd and wondering what's going on: Now all workqueues are non-reentrant and the regular flushes guarantee that the work item is not pending or running on any CPU on return, so there's no reason to use the sync flushes at all and they're going away. This patch doesn't make any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Mattia Dongili <malattia@linux.it> Cc: Kent Yoder <key@linux.vnet.ibm.com> Cc: David Airlie <airlied@linux.ie> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Karsten Keil <isdn@linux-pingi.de> Cc: Bryan Wu <bryan.wu@canonical.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Florian Tobias Schandinat <FlorianSchandinat@gmx.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: linux-wireless@vger.kernel.org Cc: Anton Vorontsov <cbou@mail.ru> Cc: Sangbeom Kim <sbkim73@samsung.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Eric Van Hensbergen <ericvh@gmail.com> Cc: Takashi Iwai <tiwai@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Petr Vandrovec <petr@vandrovec.name> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Avi Kivity <avi@redhat.com>
2012-08-20 21:51:24 +00:00
flush_work(&sc->trigger_event);
kfree(sc);
}
static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
uint32_t *stripe, sector_t *result)
{
sector_t chunk = dm_target_offset(sc->ti, sector);
sector_t chunk_offset;
if (sc->chunk_size_shift < 0)
chunk_offset = sector_div(chunk, sc->chunk_size);
else {
chunk_offset = chunk & (sc->chunk_size - 1);
chunk >>= sc->chunk_size_shift;
}
if (sc->stripes_shift < 0)
*stripe = sector_div(chunk, sc->stripes);
else {
*stripe = chunk & (sc->stripes - 1);
chunk >>= sc->stripes_shift;
}
if (sc->chunk_size_shift < 0)
chunk *= sc->chunk_size;
else
chunk <<= sc->chunk_size_shift;
*result = chunk + chunk_offset;
}
static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
uint32_t target_stripe, sector_t *result)
{
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, result);
if (stripe == target_stripe)
return;
/* round down */
sector = *result;
if (sc->chunk_size_shift < 0)
*result -= sector_div(sector, sc->chunk_size);
else
*result = sector & ~(sector_t)(sc->chunk_size - 1);
if (target_stripe < stripe)
*result += sc->chunk_size; /* next chunk */
}
static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
uint32_t target_stripe)
{
sector_t begin, end;
stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
bio->bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
bio_endio(bio, 0);
return DM_MAPIO_SUBMITTED;
}
}
static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
unsigned target_request_nr;
dm: implement REQ_FLUSH/FUA support for bio-based dm This patch converts bio-based dm to support REQ_FLUSH/FUA instead of now deprecated REQ_HARDBARRIER. * -EOPNOTSUPP handling logic dropped. * Preflush is handled as before but postflush is dropped and replaced with passing down REQ_FUA to member request_queues. This replaces one array wide cache flush w/ member specific FUA writes. * __split_and_process_bio() now calls __clone_and_map_flush() directly for flushes and guarantees all FLUSH bio's going to targets are zero ` length. * It's now guaranteed that all FLUSH bio's which are passed onto dm targets are zero length. bio_empty_barrier() tests are replaced with REQ_FLUSH tests. * Empty WRITE_BARRIERs are replaced with WRITE_FLUSHes. * Dropped unlikely() around REQ_FLUSH tests. Flushes are not unlikely enough to be marked with unlikely(). * Block layer now filters out REQ_FLUSH/FUA bio's if the request_queue doesn't support cache flushing. Advertise REQ_FLUSH | REQ_FUA capability. * Request based dm isn't converted yet. dm_init_request_based_queue() resets flush support to 0 for now. To avoid disturbing request based dm code, dm->flush_error is added for bio based dm while requested based dm continues to use dm->barrier_error. Lightly tested linear, stripe, raid1, snap and crypt targets. Please proceed with caution as I'm not familiar with the code base. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: dm-devel@redhat.com Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2010-09-03 09:56:19 +00:00
if (bio->bi_rw & REQ_FLUSH) {
target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio->bi_rw & REQ_DISCARD) ||
unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_request_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
bio->bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
return DM_MAPIO_REMAPPED;
}
/*
* Stripe status:
*
* INFO
* #stripes [stripe_name <stripe_name>] [group word count]
* [error count 'A|D' <error count 'A|D'>]
*
* TABLE
* #stripes [stripe chunk size]
* [stripe_name physical_start <stripe_name physical_start>]
*
*/
dm: fix truncated status strings Avoid returning a truncated table or status string instead of setting the DM_BUFFER_FULL_FLAG when the last target of a table fills the buffer. When processing a table or status request, the function retrieve_status calls ti->type->status. If ti->type->status returns non-zero, retrieve_status assumes that the buffer overflowed and sets DM_BUFFER_FULL_FLAG. However, targets don't return non-zero values from their status method on overflow. Most targets returns always zero. If a buffer overflow happens in a target that is not the last in the table, it gets noticed during the next iteration of the loop in retrieve_status; but if a buffer overflow happens in the last target, it goes unnoticed and erroneously truncated data is returned. In the current code, the targets behave in the following way: * dm-crypt returns -ENOMEM if there is not enough space to store the key, but it returns 0 on all other overflows. * dm-thin returns errors from the status method if a disk error happened. This is incorrect because retrieve_status doesn't check the error code, it assumes that all non-zero values mean buffer overflow. * all the other targets always return 0. This patch changes the ti->type->status function to return void (because most targets don't use the return code). Overflow is detected in retrieve_status: if the status method fills up the remaining space completely, it is assumed that buffer overflow happened. Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2013-03-01 22:45:44 +00:00
static void stripe_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
char buffer[sc->stripes + 1];
unsigned int sz = 0;
unsigned int i;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", sc->stripes);
for (i = 0; i < sc->stripes; i++) {
DMEMIT("%s ", sc->stripe[i].dev->name);
buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
'D' : 'A';
}
buffer[i] = '\0';
DMEMIT("1 %s", buffer);
break;
case STATUSTYPE_TABLE:
DMEMIT("%d %llu", sc->stripes,
(unsigned long long)sc->chunk_size);
for (i = 0; i < sc->stripes; i++)
DMEMIT(" %s %llu", sc->stripe[i].dev->name,
(unsigned long long)sc->stripe[i].physical_start);
break;
}
}
static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
{
unsigned i;
char major_minor[16];
struct stripe_c *sc = ti->private;
if (!error)
return 0; /* I/O complete */
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
return error;
memset(major_minor, 0, sizeof(major_minor));
sprintf(major_minor, "%d:%d",
MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
MINOR(disk_devt(bio->bi_bdev->bd_disk)));
/*
* Test to see which stripe drive triggered the event
* and increment error count for all stripes on that device.
* If the error count for a given device exceeds the threshold
* value we will no longer trigger any further events.
*/
for (i = 0; i < sc->stripes; i++)
if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
atomic_inc(&(sc->stripe[i].error_count));
if (atomic_read(&(sc->stripe[i].error_count)) <
DM_IO_ERROR_THRESHOLD)
schedule_work(&sc->trigger_event);
}
return error;
}
static int stripe_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct stripe_c *sc = ti->private;
int ret = 0;
unsigned i = 0;
do {
ret = fn(ti, sc->stripe[i].dev,
sc->stripe[i].physical_start,
sc->stripe_width, data);
} while (!ret && ++i < sc->stripes);
return ret;
}
static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * sc->stripes);
}
dm stripe: implement merge method Implement a merge function in the striped target. When the striped target's underlying devices provide a merge_bvec_fn (like all DM devices do via dm_merge_bvec) it is important to call down to them when building a biovec that doesn't span a stripe boundary. Without the merge method, a striped DM device stacked on DM devices causes bios with a single page to be submitted which results in unnecessary overhead that hurts performance. This change really helps filesystems (e.g. XFS and now ext4) which take care to assemble larger bios. By implementing stripe_merge(), DM and the stripe target no longer undermine the filesystem's work by only allowing a single page per bio. Buffered IO sees the biggest improvement (particularly uncached reads, buffered writes to a lesser degree). This is especially so for more capable "enterprise" storage LUNs. The performance improvement has been measured to be ~12-35% -- when a reasonable chunk_size is used (e.g. 64K) in conjunction with a stripe count that is a power of 2. In contrast, the performance penalty is ~5-7% for the pathological worst case stripe configuration (small chunk_size with a stripe count that is not a power of 2). The reason for this is that stripe_map_sector() is now called once for every call to dm_merge_bvec(). stripe_map_sector() will use slower division if stripe count isn't a power of 2. Signed-off-by: Mustafa Mesanovic <mume@linux.vnet.ibm.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2011-03-24 13:54:35 +00:00
static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
struct stripe_c *sc = ti->private;
sector_t bvm_sector = bvm->bi_sector;
uint32_t stripe;
struct request_queue *q;
stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector);
q = bdev_get_queue(sc->stripe[stripe].dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
bvm->bi_bdev = sc->stripe[stripe].dev->bdev;
bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector;
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static struct target_type stripe_target = {
.name = "striped",
dm: fix truncated status strings Avoid returning a truncated table or status string instead of setting the DM_BUFFER_FULL_FLAG when the last target of a table fills the buffer. When processing a table or status request, the function retrieve_status calls ti->type->status. If ti->type->status returns non-zero, retrieve_status assumes that the buffer overflowed and sets DM_BUFFER_FULL_FLAG. However, targets don't return non-zero values from their status method on overflow. Most targets returns always zero. If a buffer overflow happens in a target that is not the last in the table, it gets noticed during the next iteration of the loop in retrieve_status; but if a buffer overflow happens in the last target, it goes unnoticed and erroneously truncated data is returned. In the current code, the targets behave in the following way: * dm-crypt returns -ENOMEM if there is not enough space to store the key, but it returns 0 on all other overflows. * dm-thin returns errors from the status method if a disk error happened. This is incorrect because retrieve_status doesn't check the error code, it assumes that all non-zero values mean buffer overflow. * all the other targets always return 0. This patch changes the ti->type->status function to return void (because most targets don't use the return code). Overflow is detected in retrieve_status: if the status method fills up the remaining space completely, it is assumed that buffer overflow happened. Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2013-03-01 22:45:44 +00:00
.version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
.map = stripe_map,
.end_io = stripe_end_io,
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
dm stripe: implement merge method Implement a merge function in the striped target. When the striped target's underlying devices provide a merge_bvec_fn (like all DM devices do via dm_merge_bvec) it is important to call down to them when building a biovec that doesn't span a stripe boundary. Without the merge method, a striped DM device stacked on DM devices causes bios with a single page to be submitted which results in unnecessary overhead that hurts performance. This change really helps filesystems (e.g. XFS and now ext4) which take care to assemble larger bios. By implementing stripe_merge(), DM and the stripe target no longer undermine the filesystem's work by only allowing a single page per bio. Buffered IO sees the biggest improvement (particularly uncached reads, buffered writes to a lesser degree). This is especially so for more capable "enterprise" storage LUNs. The performance improvement has been measured to be ~12-35% -- when a reasonable chunk_size is used (e.g. 64K) in conjunction with a stripe count that is a power of 2. In contrast, the performance penalty is ~5-7% for the pathological worst case stripe configuration (small chunk_size with a stripe count that is not a power of 2). The reason for this is that stripe_map_sector() is now called once for every call to dm_merge_bvec(). stripe_map_sector() will use slower division if stripe count isn't a power of 2. Signed-off-by: Mustafa Mesanovic <mume@linux.vnet.ibm.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2011-03-24 13:54:35 +00:00
.merge = stripe_merge,
};
int __init dm_stripe_init(void)
{
int r;
r = dm_register_target(&stripe_target);
if (r < 0) {
DMWARN("target registration failed");
return r;
}
return r;
}
void dm_stripe_exit(void)
{
dm_unregister_target(&stripe_target);
}