Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md

Pull MD updates from Shaohua Li:

 - a raid5 writeback cache feature.

   The goal is to aggregate writes to make full stripe write and reduce
   read-modify-write. It's helpful for workload which does sequential
   write and follows fsync for example. This feature is experimental and
   off by default right now.

 - FAILFAST support.

   This fails IOs to broken raid disks quickly, so can improve latency.
   It's mainly for DASD storage, but some patches help normal raid array
   too.

 - support bad block for raid array with external metadata

 - AVX2 instruction support for raid6 parity calculation

 - normalize MD info output

 - add missing blktrace

 - other bug fixes

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: (66 commits)
  md: separate flags for superblock changes
  md: MD_RECOVERY_NEEDED is set for mddev->recovery
  md: takeover should clear unrelated bits
  md/r5cache: after recovery, increase journal seq by 10000
  md/raid5-cache: fix crc in rewrite_data_only_stripes()
  md/raid5-cache: no recovery is required when create super-block
  md: fix refcount problem on mddev when stopping array.
  md/r5cache: do r5c_update_log_state after log recovery
  md/raid5-cache: adjust the write position of the empty block if no data blocks
  md/r5cache: run_no_space_stripes() when R5C_LOG_CRITICAL == 0
  md/raid5: limit request size according to implementation limits
  md/raid5-cache: do not need to set STRIPE_PREREAD_ACTIVE repeatedly
  md/raid5-cache: remove the unnecessary next_cp_seq field from the r5l_log
  md/raid5-cache: release the stripe_head at the appropriate location
  md/raid5-cache: use ring add to prevent overflow
  md/raid5-cache: remove unnecessary function parameters
  raid5-cache: don't set STRIPE_R5C_PARTIAL_STRIPE flag while load stripe into cache
  raid5-cache: add another check conditon before replaying one stripe
  md/r5cache: enable IRQs on error path
  md/r5cache: handle alloc_page failure
  ...
This commit is contained in:
Linus Torvalds 2016-12-14 10:58:17 -08:00
commit 2a4c32edd3
16 changed files with 3438 additions and 1271 deletions

View File

@ -27,6 +27,7 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <trace/events/block.h>
#include "md.h" #include "md.h"
#include "bitmap.h" #include "bitmap.h"
@ -208,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{ {
struct md_rdev *rdev = NULL; struct md_rdev *rdev;
struct block_device *bdev; struct block_device *bdev;
struct mddev *mddev = bitmap->mddev; struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage; struct bitmap_storage *store = &bitmap->storage;
restart:
rdev = NULL;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE; int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset; loff_t offset = mddev->bitmap_info.offset;
@ -268,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
page); page);
} }
if (wait) if (wait && md_super_wait(mddev) < 0)
md_super_wait(mddev); goto restart;
return 0; return 0;
bad_alignment: bad_alignment:
@ -405,10 +408,10 @@ static int read_page(struct file *file, unsigned long index,
ret = -EIO; ret = -EIO;
out: out:
if (ret) if (ret)
printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n", pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
(int)PAGE_SIZE, (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT, (unsigned long long)index << PAGE_SHIFT,
ret); ret);
return ret; return ret;
} }
@ -416,6 +419,28 @@ out:
* bitmap file superblock operations * bitmap file superblock operations
*/ */
/*
* bitmap_wait_writes() should be called before writing any bitmap
* blocks, to ensure previous writes, particularly from
* bitmap_daemon_work(), have completed.
*/
static void bitmap_wait_writes(struct bitmap *bitmap)
{
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
/* Note that we ignore the return value. The writes
* might have failed, but that would just mean that
* some bits which should be cleared haven't been,
* which is safe. The relevant bitmap blocks will
* probably get written again, but there is no great
* loss if they aren't.
*/
md_super_wait(bitmap->mddev);
}
/* update the event counter and sync the superblock to disk */ /* update the event counter and sync the superblock to disk */
void bitmap_update_sb(struct bitmap *bitmap) void bitmap_update_sb(struct bitmap *bitmap)
{ {
@ -455,24 +480,24 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page) if (!bitmap || !bitmap->storage.sb_page)
return; return;
sb = kmap_atomic(bitmap->storage.sb_page); sb = kmap_atomic(bitmap->storage.sb_page);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); pr_debug(" version: %d\n", le32_to_cpu(sb->version));
printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n", pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
*(__u32 *)(sb->uuid+0), *(__u32 *)(sb->uuid+0),
*(__u32 *)(sb->uuid+4), *(__u32 *)(sb->uuid+4),
*(__u32 *)(sb->uuid+8), *(__u32 *)(sb->uuid+8),
*(__u32 *)(sb->uuid+12)); *(__u32 *)(sb->uuid+12));
printk(KERN_DEBUG " events: %llu\n", pr_debug(" events: %llu\n",
(unsigned long long) le64_to_cpu(sb->events)); (unsigned long long) le64_to_cpu(sb->events));
printk(KERN_DEBUG "events cleared: %llu\n", pr_debug("events cleared: %llu\n",
(unsigned long long) le64_to_cpu(sb->events_cleared)); (unsigned long long) le64_to_cpu(sb->events_cleared));
printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize)); pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
printk(KERN_DEBUG " sync size: %llu KB\n", pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2); (unsigned long long)le64_to_cpu(sb->sync_size)/2);
printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
kunmap_atomic(sb); kunmap_atomic(sb);
} }
@ -506,14 +531,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
BUG_ON(!chunksize); BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) { if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb); kunmap_atomic(sb);
printk(KERN_ERR "bitmap chunksize not a power of 2\n"); pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL; return -EINVAL;
} }
sb->chunksize = cpu_to_le32(chunksize); sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n"); pr_debug("Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ; daemon_sleep = 5 * HZ;
} }
sb->daemon_sleep = cpu_to_le32(daemon_sleep); sb->daemon_sleep = cpu_to_le32(daemon_sleep);
@ -584,7 +609,7 @@ re_read:
/* to 4k blocks */ /* to 4k blocks */
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
bitmap->cluster_slot, offset); bitmap->cluster_slot, offset);
} }
@ -634,7 +659,7 @@ re_read:
else if (write_behind > COUNTER_MAX) else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)"; reason = "write-behind limit out of range (0 - 16383)";
if (reason) { if (reason) {
printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n", pr_warn("%s: invalid bitmap file superblock: %s\n",
bmname(bitmap), reason); bmname(bitmap), reason);
goto out; goto out;
} }
@ -648,18 +673,15 @@ re_read:
* bitmap's UUID and event counter to the mddev's * bitmap's UUID and event counter to the mddev's
*/ */
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
printk(KERN_INFO pr_warn("%s: bitmap superblock UUID mismatch\n",
"%s: bitmap superblock UUID mismatch\n", bmname(bitmap));
bmname(bitmap));
goto out; goto out;
} }
events = le64_to_cpu(sb->events); events = le64_to_cpu(sb->events);
if (!nodes && (events < bitmap->mddev->events)) { if (!nodes && (events < bitmap->mddev->events)) {
printk(KERN_INFO pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
"%s: bitmap file is out of date (%llu < %llu) " bmname(bitmap), events,
"-- forcing full recovery\n", (unsigned long long) bitmap->mddev->events);
bmname(bitmap), events,
(unsigned long long) bitmap->mddev->events);
set_bit(BITMAP_STALE, &bitmap->flags); set_bit(BITMAP_STALE, &bitmap->flags);
} }
} }
@ -679,8 +701,8 @@ out:
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
err = md_setup_cluster(bitmap->mddev, nodes); err = md_setup_cluster(bitmap->mddev, nodes);
if (err) { if (err) {
pr_err("%s: Could not setup cluster service (%d)\n", pr_warn("%s: Could not setup cluster service (%d)\n",
bmname(bitmap), err); bmname(bitmap), err);
goto out_no_sb; goto out_no_sb;
} }
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
@ -847,15 +869,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = file_path(bitmap->storage.file, ptr = file_path(bitmap->storage.file,
path, PAGE_SIZE); path, PAGE_SIZE);
printk(KERN_ALERT pr_warn("%s: kicking failed bitmap file %s from array!\n",
"%s: kicking failed bitmap file %s from array!\n", bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
kfree(path); kfree(path);
} else } else
printk(KERN_ALERT pr_warn("%s: disabling internal bitmap due to errors\n",
"%s: disabling internal bitmap due to errors\n", bmname(bitmap));
bmname(bitmap));
} }
} }
@ -983,6 +1003,7 @@ void bitmap_unplug(struct bitmap *bitmap)
{ {
unsigned long i; unsigned long i;
int dirty, need_write; int dirty, need_write;
int writing = 0;
if (!bitmap || !bitmap->storage.filemap || if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags)) test_bit(BITMAP_STALE, &bitmap->flags))
@ -997,15 +1018,19 @@ void bitmap_unplug(struct bitmap *bitmap)
need_write = test_and_clear_page_attr(bitmap, i, need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE); BITMAP_PAGE_NEEDWRITE);
if (dirty || need_write) { if (dirty || need_write) {
if (!writing) {
bitmap_wait_writes(bitmap);
if (bitmap->mddev->queue)
blk_add_trace_msg(bitmap->mddev->queue,
"md bitmap_unplug");
}
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0); write_page(bitmap, bitmap->storage.filemap[i], 0);
writing = 1;
} }
} }
if (bitmap->storage.file) if (writing)
wait_event(bitmap->write_wait, bitmap_wait_writes(bitmap);
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap); bitmap_file_kick(bitmap);
@ -1056,14 +1081,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
outofdate = test_bit(BITMAP_STALE, &bitmap->flags); outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
if (outofdate) if (outofdate)
printk(KERN_INFO "%s: bitmap file is out of date, doing full " pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
"recovery\n", bmname(bitmap));
if (file && i_size_read(file->f_mapping->host) < store->bytes) { if (file && i_size_read(file->f_mapping->host) < store->bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", pr_warn("%s: bitmap file too short %lu < %lu\n",
bmname(bitmap), bmname(bitmap),
(unsigned long) i_size_read(file->f_mapping->host), (unsigned long) i_size_read(file->f_mapping->host),
store->bytes); store->bytes);
goto err; goto err;
} }
@ -1137,16 +1161,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = 0; offset = 0;
} }
printk(KERN_INFO "%s: bitmap initialized from disk: " pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
"read %lu pages, set %lu of %lu bits\n", bmname(bitmap), store->file_pages,
bmname(bitmap), store->file_pages, bit_cnt, chunks);
bit_cnt, chunks);
return 0; return 0;
err: err:
printk(KERN_INFO "%s: bitmap initialisation failed: %d\n", pr_warn("%s: bitmap initialisation failed: %d\n",
bmname(bitmap), ret); bmname(bitmap), ret);
return ret; return ret;
} }
@ -1225,6 +1248,10 @@ void bitmap_daemon_work(struct mddev *mddev)
} }
bitmap->allclean = 1; bitmap->allclean = 1;
if (bitmap->mddev->queue)
blk_add_trace_msg(bitmap->mddev->queue,
"md bitmap_daemon_work");
/* Any file-page which is PENDING now needs to be written. /* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes * So set NEEDWRITE now, then after we make any last-minute changes
* we will write it. * we will write it.
@ -1289,6 +1316,7 @@ void bitmap_daemon_work(struct mddev *mddev)
} }
spin_unlock_irq(&counts->lock); spin_unlock_irq(&counts->lock);
bitmap_wait_writes(bitmap);
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY. /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
* DIRTY pages need to be written by bitmap_unplug so it can wait * DIRTY pages need to be written by bitmap_unplug so it can wait
* for them. * for them.
@ -1595,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
atomic_read(&bitmap->mddev->recovery_active) == 0); atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = sector; bitmap->mddev->curr_resync_completed = sector;
set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0; s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) { while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@ -1825,8 +1853,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
if (err) if (err)
goto error; goto error;
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", pr_debug("created bitmap (%lu pages) for device %s\n",
bitmap->counts.pages, bmname(bitmap)); bitmap->counts.pages, bmname(bitmap));
err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
if (err) if (err)
@ -2029,8 +2057,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
!bitmap->mddev->bitmap_info.external, !bitmap->mddev->bitmap_info.external,
mddev_is_clustered(bitmap->mddev) mddev_is_clustered(bitmap->mddev)
? bitmap->cluster_slot : 0); ? bitmap->cluster_slot : 0);
if (ret) if (ret) {
bitmap_file_unmap(&store);
goto err; goto err;
}
pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
@ -2089,7 +2119,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
BITMAP_BLOCK_SHIFT); BITMAP_BLOCK_SHIFT);
blocks = old_counts.chunks << old_counts.chunkshift; blocks = old_counts.chunks << old_counts.chunkshift;
pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n"); pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break; break;
} else } else
bitmap->counts.bp[page].count += 1; bitmap->counts.bp[page].count += 1;
@ -2266,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* Ensure new bitmap info is stored in /* Ensure new bitmap info is stored in
* metadata promptly. * metadata promptly.
*/ */
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
rv = 0; rv = 0;

View File

@ -2011,7 +2011,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */ /* Force writing of superblocks to disk */
set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
/* Any superblock is better than none, choose that if given */ /* Any superblock is better than none, choose that if given */
return refdev ? 0 : 1; return refdev ? 0 : 1;
@ -3497,7 +3497,7 @@ static void rs_update_sbs(struct raid_set *rs)
struct mddev *mddev = &rs->md; struct mddev *mddev = &rs->md;
int ro = mddev->ro; int ro = mddev->ro;
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
mddev->ro = 0; mddev->ro = 0;
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
mddev->ro = ro; mddev->ro = ro;

View File

@ -21,6 +21,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <trace/events/block.h>
#include "md.h" #include "md.h"
#include "linear.h" #include "linear.h"
@ -101,8 +102,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
sector_t sectors; sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) { if (j < 0 || j >= raid_disks || disk->rdev) {
printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n", pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
mdname(mddev)); mdname(mddev));
goto out; goto out;
} }
@ -123,8 +124,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
discard_supported = true; discard_supported = true;
} }
if (cnt != raid_disks) { if (cnt != raid_disks) {
printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
mdname(mddev)); mdname(mddev));
goto out; goto out;
} }
@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
} }
do { do {
tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); sector_t bio_sector = bio->bi_iter.bi_sector;
tmp_dev = which_dev(mddev, bio_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
end_sector = tmp_dev->end_sector; end_sector = tmp_dev->end_sector;
data_offset = tmp_dev->rdev->data_offset; data_offset = tmp_dev->rdev->data_offset;
bio->bi_bdev = tmp_dev->rdev->bdev; bio->bi_bdev = tmp_dev->rdev->bdev;
if (unlikely(bio->bi_iter.bi_sector >= end_sector || if (unlikely(bio_sector >= end_sector ||
bio->bi_iter.bi_sector < start_sector)) bio_sector < start_sector))
goto out_of_bounds; goto out_of_bounds;
if (unlikely(bio_end_sector(bio) > end_sector)) { if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to /* This bio crosses a device boundary, so we have to
* split it. * split it.
*/ */
split = bio_split(bio, end_sector - split = bio_split(bio, end_sector - bio_sector,
bio->bi_iter.bi_sector,
GFP_NOIO, fs_bio_set); GFP_NOIO, fs_bio_set);
bio_chain(split, bio); bio_chain(split, bio);
} else { } else {
@ -256,15 +257,18 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */ /* Just ignore it */
bio_endio(split); bio_endio(split);
} else } else {
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
split, disk_devt(mddev->gendisk),
bio_sector);
generic_make_request(split); generic_make_request(split);
}
} while (split != bio); } while (split != bio);
return; return;
out_of_bounds: out_of_bounds:
printk(KERN_ERR pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
"md/linear:%s: make_request: Sector %llu out of bounds on "
"dev %s: %llu sectors, offset %llu\n",
mdname(mddev), mdname(mddev),
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
bdevname(tmp_dev->rdev->bdev, b), bdevname(tmp_dev->rdev->bdev, b),
@ -275,7 +279,6 @@ out_of_bounds:
static void linear_status (struct seq_file *seq, struct mddev *mddev) static void linear_status (struct seq_file *seq, struct mddev *mddev)
{ {
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
} }

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,16 @@
#define MaxSector (~(sector_t)0) #define MaxSector (~(sector_t)0)
/*
* These flags should really be called "NO_RETRY" rather than
* "FAILFAST" because they don't make any promise about time lapse,
* only about the number of retries, which will be zero.
* REQ_FAILFAST_DRIVER is not included because
* Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
* seems to suggest that the errors it avoids retrying should usually
* be retried.
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
/* /*
* MD's 'extended' device * MD's 'extended' device
*/ */
@ -168,6 +178,19 @@ enum flag_bits {
* so it is safe to remove without * so it is safe to remove without
* another synchronize_rcu() call. * another synchronize_rcu() call.
*/ */
ExternalBbl, /* External metadata provides bad
* block management for a disk
*/
FailFast, /* Minimal retries should be attempted on
* this device, so use REQ_FAILFAST_DEV.
* Also don't try to repair failed reads.
* It is expects that no bad block log
* is present.
*/
LastDev, /* Seems to be the last working dev as
* it didn't fail, so don't use FailFast
* any more for metadata
*/
}; };
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@ -189,6 +212,31 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new); int is_new);
struct md_cluster_info; struct md_cluster_info;
enum mddev_flags {
MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
MD_CLOSING, /* If set, we are closing the array, do not open
* it then */
MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
MD_HAS_JOURNAL, /* The raid array has journal feature set */
MD_RELOAD_SB, /* Reload the superblock because another node
* updated it.
*/
MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
* already took resync lock, need to
* release the lock */
MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
* supported as calls to md_error() will
* never cause the array to become failed.
*/
};
enum mddev_sb_flags {
MD_SB_CHANGE_DEVS, /* Some device status has changed */
MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
struct mddev { struct mddev {
void *private; void *private;
struct md_personality *pers; struct md_personality *pers;
@ -196,21 +244,7 @@ struct mddev {
int md_minor; int md_minor;
struct list_head disks; struct list_head disks;
unsigned long flags; unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */ unsigned long sb_flags;
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
#define MD_CLOSING 4 /* If set, we are closing the array, do not open
* it then */
#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */
#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */
#define MD_RELOAD_SB 7 /* Reload the superblock because another node
* updated it.
*/
#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
* already took resync lock, need to
* release the lock */
int suspended; int suspended;
atomic_t active_io; atomic_t active_io;
@ -304,31 +338,6 @@ struct mddev {
int parallel_resync; int parallel_resync;
int ok_start_degraded; int ok_start_degraded;
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
* SYNC: actually doing a resync, not a recovery
* RECOVER: doing recovery, or need to try it.
* INTR: resync needs to be aborted for some reason
* DONE: thread is done and is waiting to be reaped
* REQUEST: user-space has requested a sync (used with SYNC)
* CHECK: user-space request for check-only, no repair
* RESHAPE: A reshape is happening
* ERROR: sync-action interrupted because io-error
*
* If neither SYNC or RESHAPE are set, then it is a recovery.
*/
#define MD_RECOVERY_RUNNING 0
#define MD_RECOVERY_SYNC 1
#define MD_RECOVERY_RECOVER 2
#define MD_RECOVERY_INTR 3
#define MD_RECOVERY_DONE 4
#define MD_RECOVERY_NEEDED 5
#define MD_RECOVERY_REQUESTED 6
#define MD_RECOVERY_CHECK 7
#define MD_RECOVERY_RESHAPE 8
#define MD_RECOVERY_FROZEN 9
#define MD_RECOVERY_ERROR 10
unsigned long recovery; unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular /* If a RAID personality determines that recovery (of a particular
@ -442,6 +451,23 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */ unsigned int good_device_nr; /* good device num within cluster raid */
}; };
enum recovery_flags {
/*
* If neither SYNC or RESHAPE are set, then it is a recovery.
*/
MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
MD_RECOVERY_RESHAPE, /* A reshape is happening */
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
};
static inline int __must_check mddev_lock(struct mddev *mddev) static inline int __must_check mddev_lock(struct mddev *mddev)
{ {
return mutex_lock_interruptible(&mddev->reconfig_mutex); return mutex_lock_interruptible(&mddev->reconfig_mutex);
@ -623,7 +649,7 @@ extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page); sector_t sector, int size, struct page *page);
extern void md_super_wait(struct mddev *mddev); extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, struct page *page, int op, int op_flags,
bool metadata_op); bool metadata_op);

View File

@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf)
} }
rcu_read_unlock(); rcu_read_unlock();
printk(KERN_ERR "multipath_map(): no more operational IO paths?\n"); pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
return (-1); return (-1);
} }
@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio)
*/ */
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev); md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", pr_info("multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b), bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh); multipath_reschedule_retry(mp_bh);
} else } else
multipath_end_bh_io(mp_bh, bio->bi_error); multipath_end_bh_io(mp_bh, bio->bi_error);
@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device * first check if this is a queued request for a device
* which has just failed. * which has just failed.
*/ */
printk(KERN_ALERT pr_warn("multipath: only one IO path left and IO error.\n");
"multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */ /* leave it active... it's all we have */
return; return;
} }
@ -209,11 +208,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
set_bit(Faulty, &rdev->flags); set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
printk(KERN_ALERT "multipath: IO failure on %s," pr_err("multipath: IO failure on %s, disabling IO path.\n"
" disabling IO path.\n" "multipath: Operation continuing on %d IO paths.\n",
"multipath: Operation continuing"
" on %d IO paths.\n",
bdevname(rdev->bdev, b), bdevname(rdev->bdev, b),
conf->raid_disks - mddev->degraded); conf->raid_disks - mddev->degraded);
} }
@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf)
int i; int i;
struct multipath_info *tmp; struct multipath_info *tmp;
printk("MULTIPATH conf printout:\n"); pr_debug("MULTIPATH conf printout:\n");
if (!conf) { if (!conf) {
printk("(conf==NULL)\n"); pr_debug("(conf==NULL)\n");
return; return;
} }
printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks); conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) { for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i; tmp = conf->multipaths + i;
if (tmp->rdev) if (tmp->rdev)
printk(" disk%d, o:%d, dev:%s\n", pr_debug(" disk%d, o:%d, dev:%s\n",
i,!test_bit(Faulty, &tmp->rdev->flags), i,!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b)); bdevname(tmp->rdev->bdev,b));
} }
} }
@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev == p->rdev) { if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) || if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) { atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified" pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
" but is still operational!\n", number);
err = -EBUSY; err = -EBUSY;
goto abort; goto abort;
} }
@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) { if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read" pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
" error for block %llu\n", bdevname(bio->bi_bdev,b),
bdevname(bio->bi_bdev,b), (unsigned long long)bio->bi_iter.bi_sector);
(unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, -EIO); multipath_end_bh_io(mp_bh, -EIO);
} else { } else {
printk(KERN_ERR "multipath: %s: redirecting sector %llu" pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
" to another IO path\n", bdevname(bio->bi_bdev,b),
bdevname(bio->bi_bdev,b), (unsigned long long)bio->bi_iter.bi_sector);
(unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio); *bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector += bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset; conf->multipaths[mp_bh->path].rdev->data_offset;
@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev)
return -EINVAL; return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) { if (mddev->level != LEVEL_MULTIPATH) {
printk("multipath: %s: raid level not set to multipath IO (%d)\n", pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
mdname(mddev), mddev->level); mdname(mddev), mddev->level);
goto out; goto out;
} }
/* /*
@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL); conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf; mddev->private = conf;
if (!conf) { if (!conf)
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out; goto out;
}
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks, conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL); GFP_KERNEL);
if (!conf->multipaths) { if (!conf->multipaths)
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf; goto out_free_conf;
}
working_disks = 0; working_disks = 0;
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list); INIT_LIST_HEAD(&conf->retry_list);
if (!working_disks) { if (!working_disks) {
printk(KERN_ERR "multipath: no operational IO paths for %s\n", pr_warn("multipath: no operational IO paths for %s\n",
mdname(mddev)); mdname(mddev));
goto out_free_conf; goto out_free_conf;
} }
@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh)); sizeof(struct multipath_bh));
if (conf->pool == NULL) { if (conf->pool == NULL)
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf; goto out_free_conf;
}
{ mddev->thread = md_register_thread(multipathd, mddev,
mddev->thread = md_register_thread(multipathd, mddev, "multipath");
"multipath"); if (!mddev->thread)
if (!mddev->thread) { goto out_free_conf;
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
goto out_free_conf;
}
}
printk(KERN_INFO pr_info("multipath: array %s active with %d out of %d IO paths\n",
"multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded, mdname(mddev), conf->raid_disks - mddev->degraded,
mddev->raid_disks); mddev->raid_disks);
/* /*
* Ok, everything is just fine now * Ok, everything is just fine now
*/ */

View File

@ -21,6 +21,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <trace/events/block.h>
#include "md.h" #include "md.h"
#include "raid0.h" #include "raid0.h"
#include "raid5.h" #include "raid5.h"
@ -51,20 +52,21 @@ static void dump_zones(struct mddev *mddev)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
struct r0conf *conf = mddev->private; struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev; int raid_disks = conf->strip_zone[0].nb_dev;
printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
mdname(mddev), mdname(mddev),
conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
for (j = 0; j < conf->nr_strip_zones; j++) { for (j = 0; j < conf->nr_strip_zones; j++) {
printk(KERN_INFO "md: zone%d=[", j); char line[200];
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++) for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
printk(KERN_CONT "%s%s", k?"/":"", len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
bdevname(conf->devlist[j*raid_disks bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b)); + k]->bdev, b));
printk(KERN_CONT "]\n"); pr_debug("md: zone%d=[%s]\n", j, line);
zone_size = conf->strip_zone[j].zone_end - zone_start; zone_size = conf->strip_zone[j].zone_end - zone_start;
printk(KERN_INFO " zone-offset=%10lluKB, " pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
"device-offset=%10lluKB, size=%10lluKB\n",
(unsigned long long)zone_start>>1, (unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1, (unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1); (unsigned long long)zone_size>>1);
@ -142,9 +144,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
* chunk size is a multiple of that sector size * chunk size is a multiple of that sector size
*/ */
if ((mddev->chunk_sectors << 9) % blksize) { if ((mddev->chunk_sectors << 9) % blksize) {
printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n", pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
mdname(mddev), mdname(mddev),
mddev->chunk_sectors << 9, blksize); mddev->chunk_sectors << 9, blksize);
err = -EINVAL; err = -EINVAL;
goto abort; goto abort;
} }
@ -186,19 +188,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
} }
if (j < 0) { if (j < 0) {
printk(KERN_ERR pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
"md/raid0:%s: remove inactive devices before converting to RAID0\n", mdname(mddev));
mdname(mddev));
goto abort; goto abort;
} }
if (j >= mddev->raid_disks) { if (j >= mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: bad disk number %d - " pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
"aborting!\n", mdname(mddev), j); mdname(mddev), j);
goto abort; goto abort;
} }
if (dev[j]) { if (dev[j]) {
printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
"aborting!\n", mdname(mddev), j); mdname(mddev), j);
goto abort; goto abort;
} }
dev[j] = rdev1; dev[j] = rdev1;
@ -208,8 +209,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
cnt++; cnt++;
} }
if (cnt != mddev->raid_disks) { if (cnt != mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
"aborting!\n", mdname(mddev), cnt, mddev->raid_disks); mdname(mddev), cnt, mddev->raid_disks);
goto abort; goto abort;
} }
zone->nb_dev = cnt; zone->nb_dev = cnt;
@ -357,8 +358,7 @@ static int raid0_run(struct mddev *mddev)
int ret; int ret;
if (mddev->chunk_sectors == 0) { if (mddev->chunk_sectors == 0) {
printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
mdname(mddev));
return -EINVAL; return -EINVAL;
} }
if (md_check_no_bitmap(mddev)) if (md_check_no_bitmap(mddev))
@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
/* calculate array device size */ /* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
mdname(mddev), mdname(mddev),
(unsigned long long)mddev->array_sectors); (unsigned long long)mddev->array_sectors);
if (mddev->queue) { if (mddev->queue) {
/* calculate the max read-ahead size. /* calculate the max read-ahead size.
@ -464,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
} }
do { do {
sector_t sector = bio->bi_iter.bi_sector; sector_t bio_sector = bio->bi_iter.bi_sector;
sector_t sector = bio_sector;
unsigned chunk_sects = mddev->chunk_sectors; unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects - unsigned sectors = chunk_sects -
@ -473,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects)); : sector_div(sector, chunk_sects));
/* Restore due to sector_div */ /* Restore due to sector_div */
sector = bio->bi_iter.bi_sector; sector = bio_sector;
if (sectors < bio_sectors(bio)) { if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
@ -492,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */ /* Just ignore it */
bio_endio(split); bio_endio(split);
} else } else {
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
split, disk_devt(mddev->gendisk),
bio_sector);
generic_make_request(split); generic_make_request(split);
}
} while (split != bio); } while (split != bio);
} }
@ -509,17 +515,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
struct r0conf *priv_conf; struct r0conf *priv_conf;
if (mddev->degraded != 1) { if (mddev->degraded != 1) {
printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
mdname(mddev), mdname(mddev),
mddev->degraded); mddev->degraded);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
/* check slot number for a disk */ /* check slot number for a disk */
if (rdev->raid_disk == mddev->raid_disks-1) { if (rdev->raid_disk == mddev->raid_disks-1) {
printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
mdname(mddev)); mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
rdev->sectors = mddev->dev_sectors; rdev->sectors = mddev->dev_sectors;
@ -533,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1; mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */ /* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector; mddev->recovery_cp = MaxSector;
clear_bit(MD_HAS_JOURNAL, &mddev->flags);
clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
create_strip_zones(mddev, &priv_conf); create_strip_zones(mddev, &priv_conf);
return priv_conf; return priv_conf;
} }
@ -549,19 +558,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
* - all mirrors must be already degraded * - all mirrors must be already degraded
*/ */
if (mddev->layout != ((1 << 8) + 2)) { if (mddev->layout != ((1 << 8) + 2)) {
printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n", pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
mdname(mddev), mdname(mddev),
mddev->layout); mddev->layout);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (mddev->raid_disks & 1) { if (mddev->raid_disks & 1) {
printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n", pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
mdname(mddev)); mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (mddev->degraded != (mddev->raid_disks>>1)) { if (mddev->degraded != (mddev->raid_disks>>1)) {
printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
mdname(mddev)); mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
@ -574,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0; mddev->degraded = 0;
/* make sure it will be not marked as dirty */ /* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector; mddev->recovery_cp = MaxSector;
clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf); create_strip_zones(mddev, &priv_conf);
return priv_conf; return priv_conf;
@ -588,7 +598,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
* - (N - 1) mirror drives must be already faulty * - (N - 1) mirror drives must be already faulty
*/ */
if ((mddev->raid_disks - 1) != mddev->degraded) { if ((mddev->raid_disks - 1) != mddev->degraded) {
printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
mdname(mddev)); mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
@ -616,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1; mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */ /* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector; mddev->recovery_cp = MaxSector;
clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
create_strip_zones(mddev, &priv_conf); create_strip_zones(mddev, &priv_conf);
return priv_conf; return priv_conf;
@ -631,8 +642,8 @@ static void *raid0_takeover(struct mddev *mddev)
*/ */
if (mddev->bitmap) { if (mddev->bitmap) {
printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n", pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
mdname(mddev)); mdname(mddev));
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
} }
if (mddev->level == 4) if (mddev->level == 4)
@ -642,8 +653,8 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->layout == ALGORITHM_PARITY_N) if (mddev->layout == ALGORITHM_PARITY_N)
return raid0_takeover_raid45(mddev); return raid0_takeover_raid45(mddev);
printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
mdname(mddev), ALGORITHM_PARITY_N); mdname(mddev), ALGORITHM_PARITY_N);
} }
if (mddev->level == 10) if (mddev->level == 10)
@ -652,7 +663,7 @@ static void *raid0_takeover(struct mddev *mddev)
if (mddev->level == 1) if (mddev->level == 1)
return raid0_takeover_raid1(mddev); return raid0_takeover_raid1(mddev);
printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", pr_warn("Takeover from raid%i to raid0 not supported\n",
mddev->level); mddev->level);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);

View File

@ -37,6 +37,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <trace/events/block.h>
#include "md.h" #include "md.h"
#include "raid1.h" #include "raid1.h"
#include "bitmap.h" #include "bitmap.h"
@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
sector_t bi_sector); sector_t bi_sector);
static void lower_barrier(struct r1conf *conf); static void lower_barrier(struct r1conf *conf);
#define raid1_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{ {
struct pool_info *pi = data; struct pool_info *pi = data;
@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
if (uptodate) if (uptodate)
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
else if (test_bit(FailFast, &rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
/* This was a fail-fast read so we definitely
* want to retry */
;
else { else {
/* If all other devices have failed, we want to return /* If all other devices have failed, we want to return
* the error upwards rather than fail the last device. * the error upwards rather than fail the last device.
@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)
* oops, read error: * oops, read error:
*/ */
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk_ratelimited( pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
KERN_ERR "md/raid1:%s: %s: " mdname(conf->mddev),
"rescheduling sector %llu\n", bdevname(rdev->bdev, b),
mdname(conf->mddev), (unsigned long long)r1_bio->sector);
bdevname(rdev->bdev,
b),
(unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state); set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */ /* don't drop the reference on read_disk yet */
@ -416,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio)
set_bit(MD_RECOVERY_NEEDED, & set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery); conf->mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state); if (test_bit(FailFast, &rdev->flags) &&
(bio->bi_opf & MD_FAILFAST) &&
/* We never try FailFast to WriteMostly devices */
!test_bit(WriteMostly, &rdev->flags)) {
md_error(r1_bio->mddev, rdev);
if (!test_bit(Faulty, &rdev->flags))
/* This is the only remaining device,
* We need to retry the write without
* FailFast
*/
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;
}
} else
set_bit(R1BIO_WriteError, &r1_bio->state);
} else { } else {
/* /*
* Set R1BIO_Uptodate in our master bio, so that we * Set R1BIO_Uptodate in our master bio, so that we
@ -534,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = 0; best_good_sectors = 0;
has_nonrot_disk = 0; has_nonrot_disk = 0;
choose_next_idle = 0; choose_next_idle = 0;
clear_bit(R1BIO_FailFast, &r1_bio->state);
if ((conf->mddev->recovery_cp < this_sector + sectors) || if ((conf->mddev->recovery_cp < this_sector + sectors) ||
(mddev_is_clustered(conf->mddev) && (mddev_is_clustered(conf->mddev) &&
@ -607,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else } else
best_good_sectors = sectors; best_good_sectors = sectors;
if (best_disk >= 0)
/* At least two disks to choose from so failfast is OK */
set_bit(R1BIO_FailFast, &r1_bio->state);
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
has_nonrot_disk |= nonrot; has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending); pending = atomic_read(&rdev->nr_pending);
@ -645,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} }
break; break;
} }
/* If device is idle, use it */
if (pending == 0) {
best_disk = disk;
break;
}
if (choose_next_idle) if (choose_next_idle)
continue; continue;
@ -672,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
* mixed ratation/non-rotational disks depending on workload. * mixed ratation/non-rotational disks depending on workload.
*/ */
if (best_disk == -1) { if (best_disk == -1) {
if (has_nonrot_disk) if (has_nonrot_disk || min_pending == 0)
best_disk = best_pending_disk; best_disk = best_pending_disk;
else else
best_disk = best_dist_disk; best_disk = best_dist_disk;
@ -745,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && bio->bi_bdev = rdev->bdev;
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO;
bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -832,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
else if (conf->barrier && bio_data_dir(bio) == WRITE) { else if (conf->barrier && bio_data_dir(bio) == WRITE) {
if ((conf->mddev->curr_resync_completed if ((conf->mddev->curr_resync_completed
>= bio_end_sector(bio)) || >= bio_end_sector(bio)) ||
(conf->next_resync + NEXT_NORMALIO_DISTANCE (conf->start_next_window + NEXT_NORMALIO_DISTANCE
<= bio->bi_iter.bi_sector)) <= bio->bi_iter.bi_sector))
wait = false; wait = false;
else else
@ -858,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* that queue to allow conf->start_next_window * that queue to allow conf->start_next_window
* to increase. * to increase.
*/ */
raid1_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier, wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen && !conf->array_frozen &&
(!conf->barrier || (!conf->barrier ||
@ -937,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/ */
spin_lock_irq(&conf->resync_lock); spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1; conf->array_frozen = 1;
raid1_log(conf->mddev, "wait freeze");
wait_event_lock_irq_cmd(conf->wait_barrier, wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra, conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock, conf->resync_lock,
@ -1019,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && bio->bi_bdev = rdev->bdev;
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO;
bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -1136,6 +1171,7 @@ read_again:
* take care not to over-take any writes * take care not to over-take any writes
* that are 'behind' * that are 'behind'
*/ */
raid1_log(mddev, "wait behind writes");
wait_event(bitmap->behind_wait, wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0); atomic_read(&bitmap->behind_writes) == 0);
} }
@ -1153,8 +1189,16 @@ read_again:
read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync); bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio; read_bio->bi_private = r1_bio;
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
read_bio, disk_devt(mddev->gendisk),
r1_bio->sector);
if (max_sectors < r1_bio->sectors) { if (max_sectors < r1_bio->sectors) {
/* could not read all from this device, so we will /* could not read all from this device, so we will
* need another r1_bio. * need another r1_bio.
@ -1195,6 +1239,7 @@ read_again:
*/ */
if (conf->pending_count >= max_queued_requests) { if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
raid1_log(mddev, "wait queued");
wait_event(conf->wait_barrier, wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests); conf->pending_count < max_queued_requests);
} }
@ -1286,6 +1331,7 @@ read_again:
rdev_dec_pending(conf->mirrors[j].rdev, mddev); rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0; r1_bio->state = 0;
allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev); md_wait_for_blocked_rdev(blocked_rdev, mddev);
start_next_window = wait_barrier(conf, bio); start_next_window = wait_barrier(conf, bio);
/* /*
@ -1363,10 +1409,21 @@ read_again:
mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request; mbio->bi_end_io = raid1_end_write_request;
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio; mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb) if (cb)
plug = container_of(cb, struct raid1_plug_cb, cb); plug = container_of(cb, struct raid1_plug_cb, cb);
@ -1436,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* next level up know. * next level up know.
* else mark the drive as failed * else mark the drive as failed
*/ */
spin_lock_irqsave(&conf->device_lock, flags);
if (test_bit(In_sync, &rdev->flags) if (test_bit(In_sync, &rdev->flags)
&& (conf->raid_disks - mddev->degraded) == 1) { && (conf->raid_disks - mddev->degraded) == 1) {
/* /*
@ -1445,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* it is very likely to fail. * it is very likely to fail.
*/ */
conf->recovery_disabled = mddev->recovery_disabled; conf->recovery_disabled = mddev->recovery_disabled;
spin_unlock_irqrestore(&conf->device_lock, flags);
return; return;
} }
set_bit(Blocked, &rdev->flags); set_bit(Blocked, &rdev->flags);
spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) { if (test_and_clear_bit(In_sync, &rdev->flags)) {
mddev->degraded++; mddev->degraded++;
set_bit(Faulty, &rdev->flags); set_bit(Faulty, &rdev->flags);
@ -1459,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* if recovery is running, make sure it aborts. * if recovery is running, make sure it aborts.
*/ */
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_mask_bits(&mddev->flags, 0, set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
printk(KERN_ALERT pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
"md/raid1:%s: Disk failure on %s, disabling device.\n" "md/raid1:%s: Operation continuing on %d devices.\n",
"md/raid1:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b),
mdname(mddev), bdevname(rdev->bdev, b), mdname(mddev), conf->raid_disks - mddev->degraded);
mdname(mddev), conf->raid_disks - mddev->degraded);
} }
static void print_conf(struct r1conf *conf) static void print_conf(struct r1conf *conf)
{ {
int i; int i;
printk(KERN_DEBUG "RAID1 conf printout:\n"); pr_debug("RAID1 conf printout:\n");
if (!conf) { if (!conf) {
printk(KERN_DEBUG "(!conf)\n"); pr_debug("(!conf)\n");
return; return;
} }
printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks); conf->raid_disks);
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) { for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev) if (rdev)
printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &rdev->flags), i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags), !test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b)); bdevname(rdev->bdev,b));
} }
rcu_read_unlock(); rcu_read_unlock();
} }
@ -1788,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
sector_t sect = r1_bio->sector; sector_t sect = r1_bio->sector;
int sectors = r1_bio->sectors; int sectors = r1_bio->sectors;
int idx = 0; int idx = 0;
struct md_rdev *rdev;
rdev = conf->mirrors[r1_bio->read_disk].rdev;
if (test_bit(FailFast, &rdev->flags)) {
/* Don't try recovering from here - just fail it
* ... unless it is the last working device of course */
md_error(mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
/* Don't try to read from here, but make sure
* put_buf does it's thing
*/
bio->bi_end_io = end_sync_write;
}
while(sectors) { while(sectors) {
int s = sectors; int s = sectors;
int d = r1_bio->read_disk; int d = r1_bio->read_disk;
int success = 0; int success = 0;
struct md_rdev *rdev;
int start; int start;
if (s > (PAGE_SIZE>>9)) if (s > (PAGE_SIZE>>9))
@ -1825,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* work just disable and interrupt the recovery. * work just disable and interrupt the recovery.
* Don't fail devices as that won't really help. * Don't fail devices as that won't really help.
*/ */
printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
" for block %llu\n", mdname(mddev),
mdname(mddev), bdevname(bio->bi_bdev, b),
bdevname(bio->bi_bdev, b), (unsigned long long)r1_bio->sector);
(unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) { for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev; rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags)) if (!rdev || test_bit(Faulty, &rdev->flags))
@ -2013,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
continue; continue;
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
wbio->bi_opf |= MD_FAILFAST;
wbio->bi_end_io = end_sync_write; wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@ -2122,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (r1_sync_page_io(rdev, sect, s, if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) { conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors); atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
"md/raid1:%s: read error corrected " mdname(mddev), s,
"(%d sectors at %llu on %s)\n", (unsigned long long)(sect +
mdname(mddev), s, rdev->data_offset),
(unsigned long long)(sect + bdevname(rdev->bdev, b));
rdev->data_offset),
bdevname(rdev->bdev, b));
} }
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
} else } else
@ -2287,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct bio *bio; struct bio *bio;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
struct md_rdev *rdev; struct md_rdev *rdev;
dev_t bio_dev;
sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state); clear_bit(R1BIO_ReadError, &r1_bio->state);
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
@ -2300,10 +2371,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
bio = r1_bio->bios[r1_bio->read_disk]; bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b); bdevname(bio->bi_bdev, b);
bio_dev = bio->bi_bdev->bd_dev;
bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio); bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL; r1_bio->bios[r1_bio->read_disk] = NULL;
if (mddev->ro == 0) { rdev = conf->mirrors[r1_bio->read_disk].rdev;
if (mddev->ro == 0
&& !test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1); freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk, fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors); r1_bio->sector, r1_bio->sectors);
@ -2312,14 +2387,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
} }
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
read_more: read_more:
disk = read_balance(conf, r1_bio, &max_sectors); disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) { if (disk == -1) {
printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
" read error for block %llu\n", mdname(mddev), b, (unsigned long long)r1_bio->sector);
mdname(mddev), b, (unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio); raid_end_bio_io(r1_bio);
} else { } else {
const unsigned long do_sync const unsigned long do_sync
@ -2330,16 +2404,17 @@ read_more:
max_sectors); max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio; r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev; rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
"md/raid1:%s: redirecting sector %llu" mdname(mddev),
" to other mirror: %s\n", (unsigned long long)r1_bio->sector,
mdname(mddev), bdevname(rdev->bdev, b));
(unsigned long long)r1_bio->sector,
bdevname(rdev->bdev, b));
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request; bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync); bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r1_bio; bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) { if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */ /* Drat - have to split this up more */
@ -2353,6 +2428,8 @@ read_more:
else else
mbio->bi_phys_segments++; mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
bio, bio_dev, bio_sector);
generic_make_request(bio); generic_make_request(bio);
bio = NULL; bio = NULL;
@ -2367,8 +2444,11 @@ read_more:
sectors_handled; sectors_handled;
goto read_more; goto read_more;
} else } else {
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
bio, bio_dev, bio_sector);
generic_make_request(bio); generic_make_request(bio);
}
} }
} }
@ -2384,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
md_check_recovery(mddev); md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) && if (!list_empty_careful(&conf->bio_end_io_list) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp); LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) { while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp); list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--; conf->nr_queued--;
@ -2441,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
generic_make_request(r1_bio->bios[r1_bio->read_disk]); generic_make_request(r1_bio->bios[r1_bio->read_disk]);
cond_resched(); cond_resched();
if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev); md_check_recovery(mddev);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
@ -2623,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio; bio->bi_private = r1_bio;
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
@ -2642,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
min_bad, 0 min_bad, 0
) && ok; ) && ok;
} }
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
*skipped = 1; *skipped = 1;
put_buf(r1_bio); put_buf(r1_bio);
@ -2753,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) { if (bio->bi_end_io == end_sync_read) {
read_targets--; read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors); md_sync_acct(bio->bi_bdev, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio); generic_make_request(bio);
} }
} }
@ -2760,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk]; bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(bio->bi_bdev, nr_sectors); md_sync_acct(bio->bi_bdev, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio); generic_make_request(bio);
} }
@ -2875,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -ENOMEM; err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, "raid1"); conf->thread = md_register_thread(raid1d, mddev, "raid1");
if (!conf->thread) { if (!conf->thread)
printk(KERN_ERR
"md/raid1:%s: couldn't allocate thread\n",
mdname(mddev));
goto abort; goto abort;
}
return conf; return conf;
@ -2905,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)
bool discard_supported = false; bool discard_supported = false;
if (mddev->level != 1) { if (mddev->level != 1) {
printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
mdname(mddev), mddev->level); mdname(mddev), mddev->level);
return -EIO; return -EIO;
} }
if (mddev->reshape_position != MaxSector) { if (mddev->reshape_position != MaxSector) {
printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", pr_warn("md/raid1:%s: reshape_position set but not supported\n",
mdname(mddev)); mdname(mddev));
return -EIO; return -EIO;
} }
/* /*
@ -2950,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)
mddev->recovery_cp = MaxSector; mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector) if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid1:%s: not clean" pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
" -- starting background reconstruction\n", mdname(mddev));
mdname(mddev)); pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
printk(KERN_INFO
"md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded, mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks); mddev->raid_disks);
@ -2964,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)
mddev->thread = conf->thread; mddev->thread = conf->thread;
conf->thread = NULL; conf->thread = NULL;
mddev->private = conf; mddev->private = conf;
set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
@ -3107,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)
rdev->raid_disk = d2; rdev->raid_disk = d2;
sysfs_unlink_rdev(mddev, rdev); sysfs_unlink_rdev(mddev, rdev);
if (sysfs_link_rdev(mddev, rdev)) if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING pr_warn("md/raid1:%s: cannot register rd%d\n",
"md/raid1:%s: cannot register rd%d\n", mdname(mddev), rdev->raid_disk);
mdname(mddev), rdev->raid_disk);
} }
if (rdev) if (rdev)
newmirrors[d2++].rdev = rdev; newmirrors[d2++].rdev = rdev;
@ -3163,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_layout = 0; mddev->new_layout = 0;
mddev->new_chunk_sectors = 0; mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev); conf = setup_conf(mddev);
if (!IS_ERR(conf)) if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */ /* Array must appear to be quiesced */
conf->array_frozen = 1; conf->array_frozen = 1;
clear_bit(MD_HAS_JOURNAL, &mddev->flags);
clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
}
return conf; return conf;
} }
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);

View File

@ -161,14 +161,15 @@ struct r1bio {
}; };
/* bits for r1bio.state */ /* bits for r1bio.state */
#define R1BIO_Uptodate 0 enum r1bio_state {
#define R1BIO_IsSync 1 R1BIO_Uptodate,
#define R1BIO_Degraded 2 R1BIO_IsSync,
#define R1BIO_BehindIO 3 R1BIO_Degraded,
R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that /* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them. * raid1d knows what to do with them.
*/ */
#define R1BIO_ReadError 4 R1BIO_ReadError,
/* For write-behind requests, we call bi_end_io when /* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing * the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when * any write was successful. Otherwise we call when
@ -176,10 +177,12 @@ struct r1bio {
* with failure when last write completes (and all failed). * with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag... * Record that bi_end_io was called with this flag...
*/ */
#define R1BIO_Returned 6 R1BIO_Returned,
/* If a write for this request means we can clear some /* If a write for this request means we can clear some
* known-bad-block records, we set this flag * known-bad-block records, we set this flag
*/ */
#define R1BIO_MadeGood 7 R1BIO_MadeGood,
#define R1BIO_WriteError 8 R1BIO_WriteError,
R1BIO_FailFast,
};
#endif #endif

View File

@ -25,6 +25,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <trace/events/block.h>
#include "md.h" #include "md.h"
#include "raid10.h" #include "raid10.h"
#include "raid0.h" #include "raid0.h"
@ -99,12 +100,16 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf); static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf); static void lower_barrier(struct r10conf *conf);
static int _enough(struct r10conf *conf, int previous, int ignore); static int _enough(struct r10conf *conf, int previous, int ignore);
static int enough(struct r10conf *conf, int ignore);
static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
int *skipped); int *skipped);
static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio); static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf); static void end_reshape(struct r10conf *conf);
#define raid10_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{ {
struct r10conf *conf = data; struct r10conf *conf = data;
@ -404,8 +409,7 @@ static void raid10_end_read_request(struct bio *bio)
* oops, read error - keep the refcount on the rdev * oops, read error - keep the refcount on the rdev
*/ */
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk_ratelimited(KERN_ERR pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
"md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev), mdname(conf->mddev),
bdevname(rdev->bdev, b), bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector); (unsigned long long)r10_bio->sector);
@ -447,6 +451,7 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private; struct r10conf *conf = r10_bio->mddev->private;
int slot, repl; int slot, repl;
struct md_rdev *rdev = NULL; struct md_rdev *rdev = NULL;
struct bio *to_put = NULL;
bool discard_error; bool discard_error;
discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
@ -474,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio)
if (!test_and_set_bit(WantReplacement, &rdev->flags)) if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery); &rdev->mddev->recovery);
set_bit(R10BIO_WriteError, &r10_bio->state);
dec_rdev = 0; dec_rdev = 0;
if (test_bit(FailFast, &rdev->flags) &&
(bio->bi_opf & MD_FAILFAST)) {
md_error(rdev->mddev, rdev);
if (!test_bit(Faulty, &rdev->flags))
/* This is the only remaining device,
* We need to retry the write without
* FailFast
*/
set_bit(R10BIO_WriteError, &r10_bio->state);
else {
r10_bio->devs[slot].bio = NULL;
to_put = bio;
dec_rdev = 1;
}
} else
set_bit(R10BIO_WriteError, &r10_bio->state);
} }
} else { } else {
/* /*
@ -525,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio)
one_write_done(r10_bio); one_write_done(r10_bio);
if (dec_rdev) if (dec_rdev)
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
if (to_put)
bio_put(to_put);
} }
/* /*
@ -716,6 +739,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_dist = MaxSector; best_dist = MaxSector;
best_good_sectors = 0; best_good_sectors = 0;
do_balance = 1; do_balance = 1;
clear_bit(R10BIO_FailFast, &r10_bio->state);
/* /*
* Check if we can balance. We can balance on the whole * Check if we can balance. We can balance on the whole
* device if no resync is going on (recovery is ok), or below * device if no resync is going on (recovery is ok), or below
@ -780,15 +804,18 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance) if (!do_balance)
break; break;
if (best_slot >= 0)
/* At least 2 disks to choose from so failfast is OK */
set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys /* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only * sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later. * keep it for 'near' arrays, and review those later.
*/ */
if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
break; new_distance = 0;
/* for far > 1 always use the lowest address */ /* for far > 1 always use the lowest address */
if (geo->far_copies > 1) else if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr; new_distance = r10_bio->devs[slot].addr;
else else
new_distance = abs(r10_bio->devs[slot].addr - new_distance = abs(r10_bio->devs[slot].addr -
@ -859,9 +886,14 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && bio->bi_bdev = rdev->bdev;
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO;
bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -937,6 +969,7 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending * that queue to get the nr_pending
* count down. * count down.
*/ */
raid10_log(conf->mddev, "wait barrier");
wait_event_lock_irq(conf->wait_barrier, wait_event_lock_irq(conf->wait_barrier,
!conf->barrier || !conf->barrier ||
(atomic_read(&conf->nr_pending) && (atomic_read(&conf->nr_pending) &&
@ -1037,9 +1070,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && bio->bi_bdev = rdev->bdev;
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) if (test_bit(Faulty, &rdev->flags)) {
bio->bi_error = -EIO;
bio_endio(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -1083,6 +1121,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
/* IO spans the reshape position. Need to wait for /* IO spans the reshape position. Need to wait for
* reshape to pass * reshape to pass
*/ */
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf); allow_barrier(conf);
wait_event(conf->wait_barrier, wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector || conf->reshape_progress <= bio->bi_iter.bi_sector ||
@ -1099,11 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector < conf->reshape_progress))) { bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */ /* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress; mddev->reshape_position = conf->reshape_progress;
set_mask_bits(&mddev->flags, 0, set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait, wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags)); !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
conf->reshape_safe = mddev->reshape_position; conf->reshape_safe = mddev->reshape_position;
} }
@ -1154,8 +1194,15 @@ read_again:
read_bio->bi_bdev = rdev->bdev; read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request; read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync); bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
test_bit(R10BIO_FailFast, &r10_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio; read_bio->bi_private = r10_bio;
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
if (max_sectors < r10_bio->sectors) { if (max_sectors < r10_bio->sectors) {
/* Could not read all from this device, so we will /* Could not read all from this device, so we will
* need another r10_bio. * need another r10_bio.
@ -1195,6 +1242,7 @@ read_again:
*/ */
if (conf->pending_count >= max_queued_requests) { if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
raid10_log(mddev, "wait queued");
wait_event(conf->wait_barrier, wait_event(conf->wait_barrier,
conf->pending_count < max_queued_requests); conf->pending_count < max_queued_requests);
} }
@ -1322,6 +1370,7 @@ retry_write:
} }
} }
allow_barrier(conf); allow_barrier(conf);
raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev); md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf); wait_barrier(conf);
goto retry_write; goto retry_write;
@ -1361,8 +1410,18 @@ retry_write:
mbio->bi_bdev = rdev->bdev; mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua); bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
enough(conf, d))
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void*)rdev;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev, cb = blk_check_plugged(raid10_unplug, mddev,
@ -1405,6 +1464,13 @@ retry_write:
bio_set_op_attrs(mbio, op, do_sync | do_fua); bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void*)rdev;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
@ -1586,14 +1652,13 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags); set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags); set_bit(Faulty, &rdev->flags);
set_mask_bits(&mddev->flags, 0, set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
printk(KERN_ALERT pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
"md/raid10:%s: Disk failure on %s, disabling device.\n" "md/raid10:%s: Operation continuing on %d devices.\n",
"md/raid10:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b),
mdname(mddev), bdevname(rdev->bdev, b), mdname(mddev), conf->geo.raid_disks - mddev->degraded);
mdname(mddev), conf->geo.raid_disks - mddev->degraded);
} }
static void print_conf(struct r10conf *conf) static void print_conf(struct r10conf *conf)
@ -1601,13 +1666,13 @@ static void print_conf(struct r10conf *conf)
int i; int i;
struct md_rdev *rdev; struct md_rdev *rdev;
printk(KERN_DEBUG "RAID10 conf printout:\n"); pr_debug("RAID10 conf printout:\n");
if (!conf) { if (!conf) {
printk(KERN_DEBUG "(!conf)\n"); pr_debug("(!conf)\n");
return; return;
} }
printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
conf->geo.raid_disks); conf->geo.raid_disks);
/* This is only called with ->reconfix_mutex held, so /* This is only called with ->reconfix_mutex held, so
* rcu protection of rdev is not needed */ * rcu protection of rdev is not needed */
@ -1615,10 +1680,10 @@ static void print_conf(struct r10conf *conf)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
rdev = conf->mirrors[i].rdev; rdev = conf->mirrors[i].rdev;
if (rdev) if (rdev)
printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &rdev->flags), i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags), !test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b)); bdevname(rdev->bdev,b));
} }
} }
@ -1953,6 +2018,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
/* now find blocks with errors */ /* now find blocks with errors */
for (i=0 ; i < conf->copies ; i++) { for (i=0 ; i < conf->copies ; i++) {
int j, d; int j, d;
struct md_rdev *rdev;
tbio = r10_bio->devs[i].bio; tbio = r10_bio->devs[i].bio;
@ -1960,6 +2026,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue; continue;
if (i == first) if (i == first)
continue; continue;
d = r10_bio->devs[i].devnum;
rdev = conf->mirrors[d].rdev;
if (!r10_bio->devs[i].bio->bi_error) { if (!r10_bio->devs[i].bio->bi_error) {
/* We know that the bi_io_vec layout is the same for /* We know that the bi_io_vec layout is the same for
* both 'first' and 'i', so we just compare them. * both 'first' and 'i', so we just compare them.
@ -1982,6 +2050,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */ /* Don't fix anything. */
continue; continue;
} else if (test_bit(FailFast, &rdev->flags)) {
/* Just give up on this device */
md_error(rdev->mddev, rdev);
continue;
} }
/* Ok, we need to write this bio, either to correct an /* Ok, we need to write this bio, either to correct an
* inconsistency or to correct an unreadable block. * inconsistency or to correct an unreadable block.
@ -1999,11 +2071,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio); bio_copy_data(tbio, fbio);
d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev; tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
generic_make_request(tbio); generic_make_request(tbio);
@ -2109,10 +2182,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
ok = rdev_set_badblocks(rdev2, addr, s, 0); ok = rdev_set_badblocks(rdev2, addr, s, 0);
if (!ok) { if (!ok) {
/* just abort the recovery */ /* just abort the recovery */
printk(KERN_NOTICE pr_notice("md/raid10:%s: recovery aborted due to read error\n",
"md/raid10:%s: recovery aborted" mdname(mddev));
" due to read error\n",
mdname(mddev));
conf->mirrors[dw].recovery_disabled conf->mirrors[dw].recovery_disabled
= mddev->recovery_disabled; = mddev->recovery_disabled;
@ -2259,14 +2330,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b); bdevname(rdev->bdev, b);
printk(KERN_NOTICE pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
"md/raid10:%s: %s: Raid device exceeded " mdname(mddev), b,
"read_error threshold [cur %d:max %d]\n", atomic_read(&rdev->read_errors), max_read_errors);
mdname(mddev), b, pr_notice("md/raid10:%s: %s: Failing raid device\n",
atomic_read(&rdev->read_errors), max_read_errors); mdname(mddev), b);
printk(KERN_NOTICE
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
md_error(mddev, rdev); md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return; return;
@ -2356,20 +2424,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s, conf->tmppage, WRITE) s, conf->tmppage, WRITE)
== 0) { == 0) {
/* Well, this device is dead */ /* Well, this device is dead */
printk(KERN_NOTICE pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
"md/raid10:%s: read correction " mdname(mddev), s,
"write failed" (unsigned long long)(
" (%d sectors at %llu on %s)\n", sect +
mdname(mddev), s, choose_data_offset(r10_bio,
(unsigned long long)( rdev)),
sect + bdevname(rdev->bdev, b));
choose_data_offset(r10_bio, pr_notice("md/raid10:%s: %s: failing drive\n",
rdev)), mdname(mddev),
bdevname(rdev->bdev, b)); bdevname(rdev->bdev, b));
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
mdname(mddev),
bdevname(rdev->bdev, b));
} }
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
@ -2397,24 +2461,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
READ)) { READ)) {
case 0: case 0:
/* Well, this device is dead */ /* Well, this device is dead */
printk(KERN_NOTICE pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
"md/raid10:%s: unable to read back "
"corrected sectors"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s, mdname(mddev), s,
(unsigned long long)( (unsigned long long)(
sect + sect +
choose_data_offset(r10_bio, rdev)), choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b)); bdevname(rdev->bdev, b));
printk(KERN_NOTICE "md/raid10:%s: %s: failing " pr_notice("md/raid10:%s: %s: failing drive\n",
"drive\n",
mdname(mddev), mdname(mddev),
bdevname(rdev->bdev, b)); bdevname(rdev->bdev, b));
break; break;
case 1: case 1:
printk(KERN_INFO pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
"md/raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s, mdname(mddev), s,
(unsigned long long)( (unsigned long long)(
sect + sect +
@ -2503,6 +2561,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
unsigned long do_sync; unsigned long do_sync;
int max_sectors; int max_sectors;
dev_t bio_dev;
sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it. * the block and we can fix it.
@ -2514,38 +2574,38 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
*/ */
bio = r10_bio->devs[slot].bio; bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b); bdevname(bio->bi_bdev, b);
bio_dev = bio->bi_bdev->bd_dev;
bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio); bio_put(bio);
r10_bio->devs[slot].bio = NULL; r10_bio->devs[slot].bio = NULL;
if (mddev->ro == 0) { if (mddev->ro)
r10_bio->devs[slot].bio = IO_BLOCKED;
else if (!test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1); freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio); fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf); unfreeze_array(conf);
} else } else
r10_bio->devs[slot].bio = IO_BLOCKED; md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
read_more: read_more:
rdev = read_balance(conf, r10_bio, &max_sectors); rdev = read_balance(conf, r10_bio, &max_sectors);
if (rdev == NULL) { if (rdev == NULL) {
printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
" read error for block %llu\n", mdname(mddev), b,
mdname(mddev), b, (unsigned long long)r10_bio->sector);
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio); raid_end_bio_io(r10_bio);
return; return;
} }
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC); do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot; slot = r10_bio->read_slot;
printk_ratelimited( pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
KERN_ERR mdname(mddev),
"md/raid10:%s: %s: redirecting " bdevname(rdev->bdev, b),
"sector %llu to another mirror\n", (unsigned long long)r10_bio->sector);
mdname(mddev),
bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio, bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev); GFP_NOIO, mddev);
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
@ -2555,8 +2615,15 @@ read_more:
+ choose_data_offset(r10_bio, rdev); + choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
bio_set_op_attrs(bio, REQ_OP_READ, do_sync); bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
test_bit(R10BIO_FailFast, &r10_bio->state))
bio->bi_opf |= MD_FAILFAST;
bio->bi_private = r10_bio; bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request; bio->bi_end_io = raid10_end_read_request;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
bio, bio_dev,
bio_last_sector - r10_bio->sectors);
if (max_sectors < r10_bio->sectors) { if (max_sectors < r10_bio->sectors) {
/* Drat - have to split this up more */ /* Drat - have to split this up more */
struct bio *mbio = r10_bio->master_bio; struct bio *mbio = r10_bio->master_bio;
@ -2694,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
md_check_recovery(mddev); md_check_recovery(mddev);
if (!list_empty_careful(&conf->bio_end_io_list) && if (!list_empty_careful(&conf->bio_end_io_list) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp); LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) { while (!list_empty(&conf->bio_end_io_list)) {
list_move(conf->bio_end_io_list.prev, &tmp); list_move(conf->bio_end_io_list.prev, &tmp);
conf->nr_queued--; conf->nr_queued--;
@ -2755,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
} }
cond_resched(); cond_resched();
if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
md_check_recovery(mddev); md_check_recovery(mddev);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
@ -3072,6 +3139,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio; bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read; bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
from_addr = r10_bio->devs[j].addr; from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr + bio->bi_iter.bi_sector = from_addr +
rdev->data_offset; rdev->data_offset;
@ -3160,8 +3229,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!any_working) { if (!any_working) {
if (!test_and_set_bit(MD_RECOVERY_INTR, if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery)) &mddev->recovery))
printk(KERN_INFO "md/raid10:%s: insufficient " pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
"working devices for recovery.\n",
mdname(mddev)); mdname(mddev));
mirror->recovery_disabled mirror->recovery_disabled
= mddev->recovery_disabled; = mddev->recovery_disabled;
@ -3178,6 +3246,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rdev_dec_pending(mrdev, mddev); rdev_dec_pending(mrdev, mddev);
if (mreplace) if (mreplace)
rdev_dec_pending(mreplace, mddev); rdev_dec_pending(mreplace, mddev);
if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
/* Only want this if there is elsewhere to
* read from. 'j' is currently the first
* readable copy.
*/
int targets = 1;
for (; j < conf->copies; j++) {
int d = r10_bio->devs[j].devnum;
if (conf->mirrors[d].rdev &&
test_bit(In_sync,
&conf->mirrors[d].rdev->flags))
targets++;
}
if (targets == 1)
r10_bio->devs[0].bio->bi_opf
&= ~MD_FAILFAST;
}
} }
if (biolist == NULL) { if (biolist == NULL) {
while (r10_bio) { while (r10_bio) {
@ -3256,6 +3341,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio; bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read; bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
count++; count++;
@ -3279,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_private = r10_bio; bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write; bio->bi_end_io = end_sync_write;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
count++; count++;
@ -3489,15 +3578,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
copies = setup_geo(&geo, mddev, geo_new); copies = setup_geo(&geo, mddev, geo_new);
if (copies == -2) { if (copies == -2) {
printk(KERN_ERR "md/raid10:%s: chunk size must be " pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
"at least PAGE_SIZE(%ld) and be a power of 2.\n", mdname(mddev), PAGE_SIZE);
mdname(mddev), PAGE_SIZE);
goto out; goto out;
} }
if (copies < 2 || copies > mddev->raid_disks) { if (copies < 2 || copies > mddev->raid_disks) {
printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->new_layout); mdname(mddev), mddev->new_layout);
goto out; goto out;
} }
@ -3557,9 +3645,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf; return conf;
out: out:
if (err == -ENOMEM)
printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
mdname(mddev));
if (conf) { if (conf) {
mempool_destroy(conf->r10bio_pool); mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors); kfree(conf->mirrors);
@ -3656,7 +3741,7 @@ static int raid10_run(struct mddev *mddev)
} }
/* need to check that every block has at least one working mirror */ /* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) { if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", pr_err("md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev)); mdname(mddev));
goto out_free_conf; goto out_free_conf;
} }
@ -3698,11 +3783,9 @@ static int raid10_run(struct mddev *mddev)
} }
if (mddev->recovery_cp != MaxSector) if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid10:%s: not clean" pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
" -- starting background reconstruction\n", mdname(mddev));
mdname(mddev)); pr_info("md/raid10:%s: active with %d out of %d devices\n",
printk(KERN_INFO
"md/raid10:%s: active with %d out of %d devices\n",
mdname(mddev), conf->geo.raid_disks - mddev->degraded, mdname(mddev), conf->geo.raid_disks - mddev->degraded,
conf->geo.raid_disks); conf->geo.raid_disks);
/* /*
@ -3712,6 +3795,7 @@ static int raid10_run(struct mddev *mddev)
size = raid10_size(mddev, 0, 0); size = raid10_size(mddev, 0, 0);
md_set_array_sectors(mddev, size); md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size; mddev->resync_max_sectors = size;
set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
if (mddev->queue) { if (mddev->queue) {
int stripe = conf->geo.raid_disks * int stripe = conf->geo.raid_disks *
@ -3739,7 +3823,7 @@ static int raid10_run(struct mddev *mddev)
if (max(before_length, after_length) > min_offset_diff) { if (max(before_length, after_length) > min_offset_diff) {
/* This cannot work */ /* This cannot work */
printk("md/raid10: offset difference not enough to continue reshape\n"); pr_warn("md/raid10: offset difference not enough to continue reshape\n");
goto out_free_conf; goto out_free_conf;
} }
conf->offset_diff = min_offset_diff; conf->offset_diff = min_offset_diff;
@ -3846,8 +3930,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
struct r10conf *conf; struct r10conf *conf;
if (mddev->degraded > 0) { if (mddev->degraded > 0) {
printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", pr_warn("md/raid10:%s: Error: degraded raid0!\n",
mdname(mddev)); mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
sector_div(size, devs); sector_div(size, devs);
@ -3887,9 +3971,8 @@ static void *raid10_takeover(struct mddev *mddev)
/* for raid0 takeover only one zone is supported */ /* for raid0 takeover only one zone is supported */
raid0_conf = mddev->private; raid0_conf = mddev->private;
if (raid0_conf->nr_strip_zones > 1) { if (raid0_conf->nr_strip_zones > 1) {
printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
" with more than one zone.\n", mdname(mddev));
mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
return raid10_takeover_raid0(mddev, return raid10_takeover_raid0(mddev,
@ -4078,8 +4161,8 @@ static int raid10_start_reshape(struct mddev *mddev)
sector_t size = raid10_size(mddev, 0, 0); sector_t size = raid10_size(mddev, 0, 0);
if (size < mddev->array_sectors) { if (size < mddev->array_sectors) {
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
mdname(mddev)); mdname(mddev));
return -EINVAL; return -EINVAL;
} }
mddev->resync_max_sectors = size; mddev->resync_max_sectors = size;
@ -4126,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
mddev->raid_disks = conf->geo.raid_disks; mddev->raid_disks = conf->geo.raid_disks;
mddev->reshape_position = conf->reshape_progress; mddev->reshape_position = conf->reshape_progress;
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@ -4321,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
else else
mddev->curr_resync_completed = conf->reshape_progress; mddev->curr_resync_completed = conf->reshape_progress;
conf->reshape_checkpoint = jiffies; conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 || wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
test_bit(MD_RECOVERY_INTR, &mddev->recovery)); test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
allow_barrier(conf); allow_barrier(conf);

View File

@ -156,5 +156,7 @@ enum r10bio_state {
* flag is set * flag is set
*/ */
R10BIO_Previous, R10BIO_Previous,
/* failfast devices did receive failfast requests. */
R10BIO_FailFast,
}; };
#endif #endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -226,6 +226,8 @@ struct stripe_head {
struct r5l_io_unit *log_io; struct r5l_io_unit *log_io;
struct list_head log_list; struct list_head log_list;
sector_t log_start; /* first meta block on the journal */
struct list_head r5c; /* for r5c_cache->stripe_in_journal */
/** /**
* struct stripe_operations * struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target * @target - STRIPE_OP_COMPUTE_BLK target
@ -264,6 +266,7 @@ struct stripe_head_state {
int syncing, expanding, expanded, replacing; int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written; int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite; int to_fill, compute, req_compute, non_overwrite;
int injournal, just_cached;
int failed_num[2]; int failed_num[2];
int p_failed, q_failed; int p_failed, q_failed;
int dec_preread_active; int dec_preread_active;
@ -273,6 +276,7 @@ struct stripe_head_state {
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
int handle_bad_blocks; int handle_bad_blocks;
int log_failed; int log_failed;
int waiting_extra_page;
}; };
/* Flags for struct r5dev.flags */ /* Flags for struct r5dev.flags */
@ -313,6 +317,11 @@ enum r5dev_flags {
*/ */
R5_Discard, /* Discard the stripe */ R5_Discard, /* Discard the stripe */
R5_SkipCopy, /* Don't copy data from bio to stripe cache */ R5_SkipCopy, /* Don't copy data from bio to stripe cache */
R5_InJournal, /* data being written is in the journal device.
* if R5_InJournal is set for parity pd_idx, all the
* data and parity being written are in the journal
* device
*/
}; };
/* /*
@ -345,7 +354,30 @@ enum {
STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
* to batch yet. * to batch yet.
*/ */
STRIPE_LOG_TRAPPED, /* trapped into log */ STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
* this bit is used in two scenarios:
*
* 1. write-out phase
* set in first entry of r5l_write_stripe
* clear in second entry of r5l_write_stripe
* used to bypass logic in handle_stripe
*
* 2. caching phase
* set in r5c_try_caching_write()
* clear when journal write is done
* used to initiate r5c_cache_data()
* also used to bypass logic in handle_stripe
*/
STRIPE_R5C_CACHING, /* the stripe is in caching phase
* see more detail in the raid5-cache.c
*/
STRIPE_R5C_PARTIAL_STRIPE, /* in r5c cache (to-be/being handled or
* in conf->r5c_partial_stripe_list)
*/
STRIPE_R5C_FULL_STRIPE, /* in r5c cache (to-be/being handled or
* in conf->r5c_full_stripe_list)
*/
STRIPE_R5C_PREFLUSH, /* need to flush journal device */
}; };
#define STRIPE_EXPAND_SYNC_FLAGS \ #define STRIPE_EXPAND_SYNC_FLAGS \
@ -408,8 +440,86 @@ enum {
struct disk_info { struct disk_info {
struct md_rdev *rdev, *replacement; struct md_rdev *rdev, *replacement;
struct page *extra_page; /* extra page to use in prexor */
}; };
/*
* Stripe cache
*/
#define NR_STRIPES 256
#define STRIPE_SIZE PAGE_SIZE
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
#define IO_THRESHOLD 1
#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
#define MAX_STRIPE_BATCH 8
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This function is used to determine the 'next' bio in the list, given the
* sector of the current stripe+device
*/
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
int sectors = bio_sectors(bio);
if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
}
/*
* We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
*/
static inline int raid5_bi_processed_stripes(struct bio *bio)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
return (atomic_read(segments) >> 16) & 0xffff;
}
static inline int raid5_dec_bi_active_stripes(struct bio *bio)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
return atomic_sub_return(1, segments) & 0xffff;
}
static inline void raid5_inc_bi_active_stripes(struct bio *bio)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
atomic_inc(segments);
}
static inline void raid5_set_bi_processed_stripes(struct bio *bio,
unsigned int cnt)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
int old, new;
do {
old = atomic_read(segments);
new = (old & 0xffff) | (cnt << 16);
} while (atomic_cmpxchg(segments, old, new) != old);
}
static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
atomic_set(segments, cnt);
}
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks * This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause * and creating that much locking depth can cause
@ -432,6 +542,30 @@ struct r5worker_group {
int stripes_cnt; int stripes_cnt;
}; };
enum r5_cache_state {
R5_INACTIVE_BLOCKED, /* release of inactive stripes blocked,
* waiting for 25% to be free
*/
R5_ALLOC_MORE, /* It might help to allocate another
* stripe.
*/
R5_DID_ALLOC, /* A stripe was allocated, don't allocate
* more until at least one has been
* released. This avoids flooding
* the cache.
*/
R5C_LOG_TIGHT, /* log device space tight, need to
* prioritize stripes at last_checkpoint
*/
R5C_LOG_CRITICAL, /* log device is running out of space,
* only process stripes that are already
* occupying the log
*/
R5C_EXTRA_PAGE_IN_USE, /* a stripe is using disk_info.extra_page
* for prexor
*/
};
struct r5conf { struct r5conf {
struct hlist_head *stripe_hashtbl; struct hlist_head *stripe_hashtbl;
/* only protect corresponding hash list and inactive_list */ /* only protect corresponding hash list and inactive_list */
@ -519,23 +653,18 @@ struct r5conf {
*/ */
atomic_t active_stripes; atomic_t active_stripes;
struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
atomic_t r5c_cached_full_stripes;
struct list_head r5c_full_stripe_list;
atomic_t r5c_cached_partial_stripes;
struct list_head r5c_partial_stripe_list;
atomic_t empty_inactive_list_nr; atomic_t empty_inactive_list_nr;
struct llist_head released_stripes; struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent; wait_queue_head_t wait_for_quiescent;
wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap; wait_queue_head_t wait_for_overlap;
unsigned long cache_state; unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
* waiting for 25% to be free
*/
#define R5_ALLOC_MORE 2 /* It might help to allocate another
* stripe.
*/
#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
* more until at least one has been
* released. This avoids flooding
* the cache.
*/
struct shrinker shrinker; struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */ int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock; spinlock_t device_lock;
@ -633,4 +762,23 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
extern void r5l_quiesce(struct r5l_log *log, int state); extern void r5l_quiesce(struct r5l_log *log, int state);
extern bool r5l_log_disk_error(struct r5conf *conf); extern bool r5l_log_disk_error(struct r5conf *conf);
extern bool r5c_is_writeback(struct r5l_log *log);
extern int
r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks);
extern void
r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s);
extern void r5c_release_extra_page(struct stripe_head *sh);
extern void r5c_use_extra_page(struct stripe_head *sh);
extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
extern void r5c_handle_cached_data_endio(struct r5conf *conf,
struct stripe_head *sh, int disks, struct bio_list *return_bi);
extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
struct stripe_head_state *s);
extern void r5c_make_stripe_write_out(struct stripe_head *sh);
extern void r5c_flush_cache(struct r5conf *conf, int num);
extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
extern void r5c_check_cached_full_stripe(struct r5conf *conf);
extern struct md_sysfs_entry r5c_journal_mode;
#endif #endif

View File

@ -84,6 +84,10 @@
#define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed #define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
* For clustered enviroments only. * For clustered enviroments only.
*/ */
#define MD_DISK_FAILFAST 10 /* Send REQ_FAILFAST if there are multiple
* devices available - and don't try to
* correct read errors.
*/
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config. #define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in * read requests will only be sent here in
@ -265,8 +269,9 @@ struct mdp_superblock_1 {
__le32 dev_number; /* permanent identifier of this device - not role in raid */ __le32 dev_number; /* permanent identifier of this device - not role in raid */
__le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
__u8 device_uuid[16]; /* user-space setable, ignored by kernel */ __u8 device_uuid[16]; /* user-space setable, ignored by kernel */
__u8 devflags; /* per-device flags. Only one defined...*/ __u8 devflags; /* per-device flags. Only two defined...*/
#define WriteMostly1 1 /* mask for writemostly flag in above */ #define WriteMostly1 1 /* mask for writemostly flag in above */
#define FailFast1 2 /* Should avoid retries and fixups and just fail */
/* Bad block log. If there are any bad blocks the feature flag is set. /* Bad block log. If there are any bad blocks the feature flag is set.
* If offset and size are non-zero, that space is reserved and available * If offset and size are non-zero, that space is reserved and available
*/ */

View File

@ -87,9 +87,57 @@ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end(); kernel_fpu_end();
} }
static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 32) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
}
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
}
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx2x1 = { const struct raid6_calls raid6_avx2x1 = {
raid6_avx21_gen_syndrome, raid6_avx21_gen_syndrome,
NULL, /* XOR not yet implemented */ raid6_avx21_xor_syndrome,
raid6_have_avx2, raid6_have_avx2,
"avx2x1", "avx2x1",
1 /* Has cache hints */ 1 /* Has cache hints */
@ -149,9 +197,77 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end(); kernel_fpu_end();
} }
static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 64) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm3,%ymm3");
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7"
:: "m" (dptr[z][d+32]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx2x2 = { const struct raid6_calls raid6_avx2x2 = {
raid6_avx22_gen_syndrome, raid6_avx22_gen_syndrome,
NULL, /* XOR not yet implemented */ raid6_avx22_xor_syndrome,
raid6_have_avx2, raid6_have_avx2,
"avx2x2", "avx2x2",
1 /* Has cache hints */ 1 /* Has cache hints */
@ -242,9 +358,119 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
kernel_fpu_end(); kernel_fpu_end();
} }
static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 128) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm3,%ymm3");
asm volatile("vpxor %ymm12,%ymm10,%ymm10");
asm volatile("vpxor %ymm14,%ymm11,%ymm11");
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpxor %ymm13,%ymm13,%ymm13");
asm volatile("vpxor %ymm15,%ymm15,%ymm15");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpand %ymm0,%ymm13,%ymm13");
asm volatile("vpand %ymm0,%ymm15,%ymm15");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7"
:: "m" (dptr[z][d+32]));
asm volatile("vmovdqa %0,%%ymm13"
:: "m" (dptr[z][d+64]));
asm volatile("vmovdqa %0,%%ymm15"
:: "m" (dptr[z][d+96]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm13,%ymm10,%ymm10");
asm volatile("vpxor %ymm15,%ymm11,%ymm11");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
}
asm volatile("prefetchnta %0" :: "m" (q[d]));
asm volatile("prefetchnta %0" :: "m" (q[d+64]));
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpxor %ymm13,%ymm13,%ymm13");
asm volatile("vpxor %ymm15,%ymm15,%ymm15");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpand %ymm0,%ymm13,%ymm13");
asm volatile("vpand %ymm0,%ymm15,%ymm15");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
}
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx2x4 = { const struct raid6_calls raid6_avx2x4 = {
raid6_avx24_gen_syndrome, raid6_avx24_gen_syndrome,
NULL, /* XOR not yet implemented */ raid6_avx24_xor_syndrome,
raid6_have_avx2, raid6_have_avx2,
"avx2x4", "avx2x4",
1 /* Has cache hints */ 1 /* Has cache hints */