2005-06-22 00:17:14 +00:00
|
|
|
/*
|
|
|
|
* bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
|
|
|
|
*
|
|
|
|
* bitmap_create - sets up the bitmap structure
|
|
|
|
* bitmap_destroy - destroys the bitmap structure
|
|
|
|
*
|
|
|
|
* additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
|
|
|
|
* - added disk storage for bitmap
|
|
|
|
* - changes to allow various bitmap chunk sizes
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Still to do:
|
|
|
|
*
|
|
|
|
* flush after percent set rather than just time based. (maybe both).
|
|
|
|
*/
|
|
|
|
|
2009-03-31 03:33:13 +00:00
|
|
|
#include <linux/blkdev.h>
|
2005-06-22 00:17:14 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/buffer_head.h>
|
2012-03-19 01:46:40 +00:00
|
|
|
#include <linux/seq_file.h>
|
2016-11-14 05:30:21 +00:00
|
|
|
#include <trace/events/block.h>
|
2009-03-31 03:33:13 +00:00
|
|
|
#include "md.h"
|
2017-10-10 21:02:41 +00:00
|
|
|
#include "md-bitmap.h"
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2010-06-01 09:37:31 +00:00
|
|
|
static inline char *bmname(struct bitmap *bitmap)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check a page and, if necessary, allocate it (or hijack it if the alloc fails)
|
|
|
|
*
|
|
|
|
* 1) check to see if this page is allocated, if it's not then try to alloc
|
|
|
|
* 2) if the alloc fails, set the page's hijacked flag so we'll use the
|
|
|
|
* page pointer directly as a counter
|
|
|
|
*
|
|
|
|
* if we find our page, we increment the page's refcount so that it stays
|
|
|
|
* allocated while we're using it
|
|
|
|
*/
|
2012-05-22 03:55:24 +00:00
|
|
|
static int bitmap_checkpage(struct bitmap_counts *bitmap,
|
2016-05-02 15:50:11 +00:00
|
|
|
unsigned long page, int create, int no_hijack)
|
2009-09-23 08:06:44 +00:00
|
|
|
__releases(bitmap->lock)
|
|
|
|
__acquires(bitmap->lock)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
unsigned char *mappage;
|
|
|
|
|
|
|
|
if (page >= bitmap->pages) {
|
2009-03-31 03:27:02 +00:00
|
|
|
/* This can happen if bitmap_start_sync goes beyond
|
|
|
|
* End-of-device while looking for a whole page.
|
|
|
|
* It is harmless.
|
|
|
|
*/
|
2005-06-22 00:17:14 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (bitmap->bp[page].map) /* page is already allocated, just return */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* this page has not been allocated yet */
|
|
|
|
|
2010-06-01 09:37:31 +00:00
|
|
|
spin_unlock_irq(&bitmap->lock);
|
2015-02-02 06:08:03 +00:00
|
|
|
/* It is possible that this is being called inside a
|
|
|
|
* prepare_to_wait/finish_wait loop from raid5c:make_request().
|
|
|
|
* In general it is not permitted to sleep in that context as it
|
|
|
|
* can cause the loop to spin freely.
|
|
|
|
* That doesn't apply here as we can only reach this point
|
|
|
|
* once with any loop.
|
|
|
|
* When this function completes, either bp[page].map or
|
|
|
|
* bp[page].hijacked. In either case, this function will
|
|
|
|
* abort before getting to this point again. So there is
|
|
|
|
* no risk of a free-spin, and so it is safe to assert
|
|
|
|
* that sleeping here is allowed.
|
|
|
|
*/
|
|
|
|
sched_annotate_sleep();
|
2012-03-19 01:46:41 +00:00
|
|
|
mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
|
2010-06-01 09:37:31 +00:00
|
|
|
spin_lock_irq(&bitmap->lock);
|
|
|
|
|
|
|
|
if (mappage == NULL) {
|
2012-05-22 03:55:24 +00:00
|
|
|
pr_debug("md/bitmap: map page allocation failed, hijacking\n");
|
2016-05-02 15:50:11 +00:00
|
|
|
/* We don't support hijack for cluster raid */
|
|
|
|
if (no_hijack)
|
|
|
|
return -ENOMEM;
|
2005-06-22 00:17:14 +00:00
|
|
|
/* failed - set the hijacked flag so that we can use the
|
|
|
|
* pointer as a counter */
|
|
|
|
if (!bitmap->bp[page].map)
|
|
|
|
bitmap->bp[page].hijacked = 1;
|
2010-06-01 09:37:31 +00:00
|
|
|
} else if (bitmap->bp[page].map ||
|
|
|
|
bitmap->bp[page].hijacked) {
|
2005-06-22 00:17:14 +00:00
|
|
|
/* somebody beat us to getting the page */
|
2012-03-19 01:46:41 +00:00
|
|
|
kfree(mappage);
|
2010-06-01 09:37:31 +00:00
|
|
|
} else {
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2010-06-01 09:37:31 +00:00
|
|
|
/* no page was in place and we have one, so install it */
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2010-06-01 09:37:31 +00:00
|
|
|
bitmap->bp[page].map = mappage;
|
|
|
|
bitmap->missing_pages--;
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if page is completely empty, put it back on the free list, or dealloc it */
|
|
|
|
/* if page was hijacked, unmark the flag so it might get alloced next time */
|
|
|
|
/* Note: lock should be held when calling this */
|
2012-05-22 03:55:24 +00:00
|
|
|
static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
|
|
|
|
if (bitmap->bp[page].count) /* page is still busy */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* page is no longer in use, it can be released */
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
|
|
|
|
bitmap->bp[page].hijacked = 0;
|
|
|
|
bitmap->bp[page].map = NULL;
|
2010-06-01 09:37:31 +00:00
|
|
|
} else {
|
|
|
|
/* normal case, free the page */
|
|
|
|
ptr = bitmap->bp[page].map;
|
|
|
|
bitmap->bp[page].map = NULL;
|
|
|
|
bitmap->missing_pages++;
|
2012-03-19 01:46:41 +00:00
|
|
|
kfree(ptr);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap file handling - read and write the bitmap file and its superblock
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* basic page I/O operations
|
|
|
|
*/
|
|
|
|
|
2005-06-22 00:17:27 +00:00
|
|
|
/* IO operations when bitmap is stored near all superblocks */
|
2012-05-22 03:55:08 +00:00
|
|
|
static int read_sb_page(struct mddev *mddev, loff_t offset,
|
|
|
|
struct page *page,
|
|
|
|
unsigned long index, int size)
|
2005-06-22 00:17:27 +00:00
|
|
|
{
|
|
|
|
/* choose a good rdev and read the page from there */
|
|
|
|
|
2011-10-11 05:45:26 +00:00
|
|
|
struct md_rdev *rdev;
|
2005-06-22 00:17:27 +00:00
|
|
|
sector_t target;
|
|
|
|
|
2012-03-19 01:46:39 +00:00
|
|
|
rdev_for_each(rdev, mddev) {
|
2005-11-09 05:39:31 +00:00
|
|
|
if (! test_bit(In_sync, &rdev->flags)
|
2017-07-04 03:20:30 +00:00
|
|
|
|| test_bit(Faulty, &rdev->flags)
|
|
|
|
|| test_bit(Bitmap_sync, &rdev->flags))
|
2005-09-09 23:23:52 +00:00
|
|
|
continue;
|
|
|
|
|
2011-01-13 22:14:33 +00:00
|
|
|
target = offset + index * (PAGE_SIZE/512);
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2010-10-27 04:16:40 +00:00
|
|
|
if (sync_page_io(rdev, target,
|
2009-05-22 21:17:49 +00:00
|
|
|
roundup(size, bdev_logical_block_size(rdev->bdev)),
|
2016-06-05 19:32:07 +00:00
|
|
|
page, REQ_OP_READ, 0, true)) {
|
2005-09-09 23:23:52 +00:00
|
|
|
page->index = index;
|
2012-05-22 03:55:08 +00:00
|
|
|
return 0;
|
2005-09-09 23:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
2012-05-22 03:55:08 +00:00
|
|
|
return -EIO;
|
2005-06-22 00:17:27 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
|
2008-09-01 02:48:13 +00:00
|
|
|
{
|
|
|
|
/* Iterate the disks of an mddev, using rcu to protect access to the
|
|
|
|
* linked list, and raising the refcount of devices we return to ensure
|
|
|
|
* they don't disappear while in use.
|
|
|
|
* As devices are only added or removed when raid_disk is < 0 and
|
|
|
|
* nr_pending is 0 and In_sync is clear, the entries we return will
|
|
|
|
* still be in the same position on the list when we re-enter
|
2012-10-11 02:43:21 +00:00
|
|
|
* list_for_each_entry_continue_rcu.
|
2015-05-20 05:05:09 +00:00
|
|
|
*
|
|
|
|
* Note that if entered with 'rdev == NULL' to start at the
|
|
|
|
* beginning, we temporarily assign 'rdev' to an address which
|
|
|
|
* isn't really an rdev, but which can be used by
|
|
|
|
* list_for_each_entry_continue_rcu() to find the first entry.
|
2008-09-01 02:48:13 +00:00
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
|
|
|
if (rdev == NULL)
|
|
|
|
/* start at the beginning */
|
2015-05-20 05:05:09 +00:00
|
|
|
rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
|
2008-09-01 02:48:13 +00:00
|
|
|
else {
|
|
|
|
/* release the previous rdev and start from there. */
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
}
|
2012-10-11 02:43:21 +00:00
|
|
|
list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
|
2008-09-01 02:48:13 +00:00
|
|
|
if (rdev->raid_disk >= 0 &&
|
|
|
|
!test_bit(Faulty, &rdev->flags)) {
|
|
|
|
/* this is a usable devices */
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return rdev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-05-23 20:58:10 +00:00
|
|
|
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
|
2005-06-22 00:17:27 +00:00
|
|
|
{
|
2016-11-18 05:16:11 +00:00
|
|
|
struct md_rdev *rdev;
|
2011-01-13 22:14:34 +00:00
|
|
|
struct block_device *bdev;
|
2011-10-11 05:47:53 +00:00
|
|
|
struct mddev *mddev = bitmap->mddev;
|
2012-05-22 03:55:10 +00:00
|
|
|
struct bitmap_storage *store = &bitmap->storage;
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2016-11-18 05:16:11 +00:00
|
|
|
restart:
|
|
|
|
rdev = NULL;
|
2008-09-01 02:48:13 +00:00
|
|
|
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
|
2010-06-01 09:37:31 +00:00
|
|
|
int size = PAGE_SIZE;
|
|
|
|
loff_t offset = mddev->bitmap_info.offset;
|
2011-01-13 22:14:34 +00:00
|
|
|
|
|
|
|
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
|
|
|
|
|
2012-05-22 03:55:11 +00:00
|
|
|
if (page->index == store->file_pages-1) {
|
|
|
|
int last_page_size = store->bytes & (PAGE_SIZE-1);
|
|
|
|
if (last_page_size == 0)
|
|
|
|
last_page_size = PAGE_SIZE;
|
|
|
|
size = roundup(last_page_size,
|
2011-01-13 22:14:34 +00:00
|
|
|
bdev_logical_block_size(bdev));
|
2012-05-22 03:55:11 +00:00
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
/* Just make sure we aren't corrupting data or
|
|
|
|
* metadata
|
|
|
|
*/
|
|
|
|
if (mddev->external) {
|
|
|
|
/* Bitmap could be anywhere. */
|
|
|
|
if (rdev->sb_start + offset + (page->index
|
|
|
|
* (PAGE_SIZE/512))
|
|
|
|
> rdev->data_offset
|
|
|
|
&&
|
|
|
|
rdev->sb_start + offset
|
|
|
|
< (rdev->data_offset + mddev->dev_sectors
|
|
|
|
+ (PAGE_SIZE/512)))
|
|
|
|
goto bad_alignment;
|
|
|
|
} else if (offset < 0) {
|
|
|
|
/* DATA BITMAP METADATA */
|
|
|
|
if (offset
|
|
|
|
+ (long)(page->index * (PAGE_SIZE/512))
|
|
|
|
+ size/512 > 0)
|
|
|
|
/* bitmap runs in to metadata */
|
|
|
|
goto bad_alignment;
|
|
|
|
if (rdev->data_offset + mddev->dev_sectors
|
|
|
|
> rdev->sb_start + offset)
|
|
|
|
/* data runs in to bitmap */
|
|
|
|
goto bad_alignment;
|
|
|
|
} else if (rdev->sb_start < rdev->data_offset) {
|
|
|
|
/* METADATA BITMAP DATA */
|
|
|
|
if (rdev->sb_start
|
|
|
|
+ offset
|
|
|
|
+ page->index*(PAGE_SIZE/512) + size/512
|
|
|
|
> rdev->data_offset)
|
|
|
|
/* bitmap runs in to data */
|
|
|
|
goto bad_alignment;
|
|
|
|
} else {
|
|
|
|
/* DATA METADATA BITMAP - no problems */
|
|
|
|
}
|
|
|
|
md_super_write(mddev, rdev,
|
|
|
|
rdev->sb_start + offset
|
|
|
|
+ page->index * (PAGE_SIZE/512),
|
|
|
|
size,
|
|
|
|
page);
|
2008-09-01 02:48:13 +00:00
|
|
|
}
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2016-11-18 05:16:11 +00:00
|
|
|
if (wait && md_super_wait(mddev) < 0)
|
|
|
|
goto restart;
|
2005-06-22 00:17:27 +00:00
|
|
|
return 0;
|
2008-07-21 07:05:25 +00:00
|
|
|
|
|
|
|
bad_alignment:
|
|
|
|
return -EINVAL;
|
2005-06-22 00:17:27 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 11:06:13 +00:00
|
|
|
static void bitmap_file_kick(struct bitmap *bitmap);
|
2005-06-22 00:17:14 +00:00
|
|
|
/*
|
2005-06-22 00:17:27 +00:00
|
|
|
* write out a page to a file
|
2005-06-22 00:17:14 +00:00
|
|
|
*/
|
2007-07-17 11:06:13 +00:00
|
|
|
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2006-06-26 07:27:48 +00:00
|
|
|
struct buffer_head *bh;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
if (bitmap->storage.file == NULL) {
|
2007-07-17 11:06:12 +00:00
|
|
|
switch (write_sb_page(bitmap, page, wait)) {
|
|
|
|
case -EINVAL:
|
2012-05-22 03:55:15 +00:00
|
|
|
set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
|
2007-07-17 11:06:12 +00:00
|
|
|
}
|
2007-07-17 11:06:13 +00:00
|
|
|
} else {
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2007-07-17 11:06:13 +00:00
|
|
|
bh = page_buffers(page);
|
2006-01-06 08:20:45 +00:00
|
|
|
|
2007-07-17 11:06:13 +00:00
|
|
|
while (bh && bh->b_blocknr) {
|
|
|
|
atomic_inc(&bitmap->pending_writes);
|
|
|
|
set_buffer_locked(bh);
|
|
|
|
set_buffer_mapped(bh);
|
2016-06-05 19:31:43 +00:00
|
|
|
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
|
2007-07-17 11:06:13 +00:00
|
|
|
bh = bh->b_this_page;
|
|
|
|
}
|
2006-06-26 07:27:48 +00:00
|
|
|
|
2010-06-01 09:37:31 +00:00
|
|
|
if (wait)
|
2007-07-17 11:06:13 +00:00
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
2005-06-22 00:17:29 +00:00
|
|
|
}
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
2007-07-17 11:06:13 +00:00
|
|
|
bitmap_file_kick(bitmap);
|
2006-06-26 07:27:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
|
|
|
|
{
|
|
|
|
struct bitmap *bitmap = bh->b_private;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:15 +00:00
|
|
|
if (!uptodate)
|
|
|
|
set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
|
2006-06-26 07:27:48 +00:00
|
|
|
if (atomic_dec_and_test(&bitmap->pending_writes))
|
|
|
|
wake_up(&bitmap->write_wait);
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2006-06-26 07:27:48 +00:00
|
|
|
/* copied from buffer.c */
|
|
|
|
static void
|
|
|
|
__clear_page_buffers(struct page *page)
|
|
|
|
{
|
|
|
|
ClearPagePrivate(page);
|
|
|
|
set_page_private(page, 0);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
put_page(page);
|
2006-06-26 07:27:48 +00:00
|
|
|
}
|
|
|
|
static void free_buffers(struct page *page)
|
|
|
|
{
|
2012-05-22 03:55:08 +00:00
|
|
|
struct buffer_head *bh;
|
2005-06-22 00:17:21 +00:00
|
|
|
|
2012-05-22 03:55:08 +00:00
|
|
|
if (!PagePrivate(page))
|
|
|
|
return;
|
|
|
|
|
|
|
|
bh = page_buffers(page);
|
2006-06-26 07:27:48 +00:00
|
|
|
while (bh) {
|
|
|
|
struct buffer_head *next = bh->b_this_page;
|
|
|
|
free_buffer_head(bh);
|
|
|
|
bh = next;
|
2005-06-22 00:17:21 +00:00
|
|
|
}
|
2006-06-26 07:27:48 +00:00
|
|
|
__clear_page_buffers(page);
|
|
|
|
put_page(page);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2006-06-26 07:27:48 +00:00
|
|
|
/* read a page from a file.
|
|
|
|
* We both read the page, and attach buffers to the page to record the
|
|
|
|
* address of each block (using bmap). These addresses will be used
|
|
|
|
* to write the block later, completely bypassing the filesystem.
|
|
|
|
* This usage is similar to how swap files are handled, and allows us
|
|
|
|
* to write to a file with no concerns of memory allocation failing.
|
|
|
|
*/
|
2012-05-22 03:55:08 +00:00
|
|
|
static int read_page(struct file *file, unsigned long index,
|
|
|
|
struct bitmap *bitmap,
|
|
|
|
unsigned long count,
|
|
|
|
struct page *page)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:08 +00:00
|
|
|
int ret = 0;
|
2013-01-23 22:07:38 +00:00
|
|
|
struct inode *inode = file_inode(file);
|
2006-06-26 07:27:48 +00:00
|
|
|
struct buffer_head *bh;
|
|
|
|
sector_t block;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2011-10-07 03:23:17 +00:00
|
|
|
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
|
|
|
|
(unsigned long long)index << PAGE_SHIFT);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2017-09-27 11:40:16 +00:00
|
|
|
bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
|
2006-06-26 07:27:48 +00:00
|
|
|
if (!bh) {
|
2012-05-22 03:55:08 +00:00
|
|
|
ret = -ENOMEM;
|
2005-06-22 00:17:14 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2006-06-26 07:27:48 +00:00
|
|
|
attach_page_buffers(page, bh);
|
|
|
|
block = index << (PAGE_SHIFT - inode->i_blkbits);
|
|
|
|
while (bh) {
|
|
|
|
if (count == 0)
|
|
|
|
bh->b_blocknr = 0;
|
|
|
|
else {
|
|
|
|
bh->b_blocknr = bmap(inode, block);
|
|
|
|
if (bh->b_blocknr == 0) {
|
|
|
|
/* Cannot use this file! */
|
2012-05-22 03:55:08 +00:00
|
|
|
ret = -EINVAL;
|
2006-06-26 07:27:48 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
bh->b_bdev = inode->i_sb->s_bdev;
|
|
|
|
if (count < (1<<inode->i_blkbits))
|
|
|
|
count = 0;
|
|
|
|
else
|
|
|
|
count -= (1<<inode->i_blkbits);
|
|
|
|
|
|
|
|
bh->b_end_io = end_bitmap_write;
|
|
|
|
bh->b_private = bitmap;
|
2006-06-26 07:27:49 +00:00
|
|
|
atomic_inc(&bitmap->pending_writes);
|
|
|
|
set_buffer_locked(bh);
|
|
|
|
set_buffer_mapped(bh);
|
2016-06-05 19:31:43 +00:00
|
|
|
submit_bh(REQ_OP_READ, 0, bh);
|
2006-06-26 07:27:48 +00:00
|
|
|
}
|
|
|
|
block++;
|
|
|
|
bh = bh->b_this_page;
|
|
|
|
}
|
|
|
|
page->index = index;
|
2006-06-26 07:27:49 +00:00
|
|
|
|
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
2012-05-22 03:55:08 +00:00
|
|
|
ret = -EIO;
|
2005-06-22 00:17:14 +00:00
|
|
|
out:
|
2012-05-22 03:55:08 +00:00
|
|
|
if (ret)
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
|
|
|
|
(int)PAGE_SIZE,
|
|
|
|
(unsigned long long)index << PAGE_SHIFT,
|
|
|
|
ret);
|
2012-05-22 03:55:08 +00:00
|
|
|
return ret;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap file superblock operations
|
|
|
|
*/
|
|
|
|
|
2016-11-04 05:46:03 +00:00
|
|
|
/*
|
|
|
|
* bitmap_wait_writes() should be called before writing any bitmap
|
|
|
|
* blocks, to ensure previous writes, particularly from
|
|
|
|
* bitmap_daemon_work(), have completed.
|
|
|
|
*/
|
|
|
|
static void bitmap_wait_writes(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
if (bitmap->storage.file)
|
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
|
|
|
else
|
2016-11-18 05:16:11 +00:00
|
|
|
/* Note that we ignore the return value. The writes
|
|
|
|
* might have failed, but that would just mean that
|
|
|
|
* some bits which should be cleared haven't been,
|
|
|
|
* which is safe. The relevant bitmap blocks will
|
|
|
|
* probably get written again, but there is no great
|
|
|
|
* loss if they aren't.
|
|
|
|
*/
|
2016-11-04 05:46:03 +00:00
|
|
|
md_super_wait(bitmap->mddev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
/* update the event counter and sync the superblock to disk */
|
2007-07-17 11:06:13 +00:00
|
|
|
void bitmap_update_sb(struct bitmap *bitmap)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
|
|
|
|
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
|
2007-07-17 11:06:13 +00:00
|
|
|
return;
|
2009-12-14 01:49:56 +00:00
|
|
|
if (bitmap->mddev->bitmap_info.external)
|
|
|
|
return;
|
2012-05-22 03:55:10 +00:00
|
|
|
if (!bitmap->storage.sb_page) /* no superblock */
|
2007-07-17 11:06:13 +00:00
|
|
|
return;
|
2012-05-22 03:55:10 +00:00
|
|
|
sb = kmap_atomic(bitmap->storage.sb_page);
|
2005-06-22 00:17:14 +00:00
|
|
|
sb->events = cpu_to_le64(bitmap->mddev->events);
|
2011-05-11 04:26:30 +00:00
|
|
|
if (bitmap->mddev->events < bitmap->events_cleared)
|
2008-06-27 22:31:22 +00:00
|
|
|
/* rocking back to read-only */
|
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
2011-05-11 04:26:30 +00:00
|
|
|
sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
|
2017-11-06 02:11:25 +00:00
|
|
|
/*
|
|
|
|
* clear BITMAP_WRITE_ERROR bit to protect against the case that
|
|
|
|
* a bitmap write error occurred but the later writes succeeded.
|
|
|
|
*/
|
|
|
|
sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
|
2009-12-14 01:49:55 +00:00
|
|
|
/* Just in case these have been changed via sysfs: */
|
|
|
|
sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
|
|
|
|
sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
|
2012-05-22 03:55:26 +00:00
|
|
|
/* This might have been changed by a reshape */
|
|
|
|
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
|
|
|
|
sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
|
2014-03-29 15:20:02 +00:00
|
|
|
sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
|
2012-05-22 03:55:34 +00:00
|
|
|
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
|
|
|
|
bitmap_info.space);
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(sb);
|
2012-05-22 03:55:10 +00:00
|
|
|
write_page(bitmap, bitmap->storage.sb_page, 1);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2017-03-01 08:42:39 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_update_sb);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
/* print out the bitmap file superblock */
|
|
|
|
void bitmap_print_sb(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
if (!bitmap || !bitmap->storage.sb_page)
|
2005-06-22 00:17:14 +00:00
|
|
|
return;
|
2012-05-22 03:55:10 +00:00
|
|
|
sb = kmap_atomic(bitmap->storage.sb_page);
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
|
|
|
|
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
|
|
|
|
pr_debug(" version: %d\n", le32_to_cpu(sb->version));
|
|
|
|
pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
|
2017-05-24 17:16:27 +00:00
|
|
|
le32_to_cpu(*(__u32 *)(sb->uuid+0)),
|
|
|
|
le32_to_cpu(*(__u32 *)(sb->uuid+4)),
|
|
|
|
le32_to_cpu(*(__u32 *)(sb->uuid+8)),
|
|
|
|
le32_to_cpu(*(__u32 *)(sb->uuid+12)));
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_debug(" events: %llu\n",
|
|
|
|
(unsigned long long) le64_to_cpu(sb->events));
|
|
|
|
pr_debug("events cleared: %llu\n",
|
|
|
|
(unsigned long long) le64_to_cpu(sb->events_cleared));
|
|
|
|
pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
|
|
|
|
pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
|
|
|
|
pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
|
|
|
|
pr_debug(" sync size: %llu KB\n",
|
|
|
|
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
|
|
|
|
pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(sb);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2011-06-08 22:59:30 +00:00
|
|
|
/*
|
|
|
|
* bitmap_new_disk_sb
|
|
|
|
* @bitmap
|
|
|
|
*
|
|
|
|
* This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
|
|
|
|
* reads and verifies the on-disk bitmap superblock and populates bitmap_info.
|
|
|
|
* This function verifies 'bitmap_info' and populates the on-disk bitmap
|
|
|
|
* structure, which is to be written to disk.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, -Exxx on error
|
|
|
|
*/
|
|
|
|
static int bitmap_new_disk_sb(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
unsigned long chunksize, daemon_sleep, write_behind;
|
|
|
|
|
2015-07-22 17:09:17 +00:00
|
|
|
bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
2012-10-11 02:45:36 +00:00
|
|
|
if (bitmap->storage.sb_page == NULL)
|
|
|
|
return -ENOMEM;
|
2012-05-22 03:55:10 +00:00
|
|
|
bitmap->storage.sb_page->index = 0;
|
2011-06-08 22:59:30 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
sb = kmap_atomic(bitmap->storage.sb_page);
|
2011-06-08 22:59:30 +00:00
|
|
|
|
|
|
|
sb->magic = cpu_to_le32(BITMAP_MAGIC);
|
|
|
|
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
|
|
|
|
|
|
|
|
chunksize = bitmap->mddev->bitmap_info.chunksize;
|
|
|
|
BUG_ON(!chunksize);
|
|
|
|
if (!is_power_of_2(chunksize)) {
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(sb);
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("bitmap chunksize not a power of 2\n");
|
2011-06-08 22:59:30 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
sb->chunksize = cpu_to_le32(chunksize);
|
|
|
|
|
|
|
|
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
|
2016-03-07 12:01:05 +00:00
|
|
|
if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_debug("Choosing daemon_sleep default (5 sec)\n");
|
2011-06-08 22:59:30 +00:00
|
|
|
daemon_sleep = 5 * HZ;
|
|
|
|
}
|
|
|
|
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
|
|
|
|
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: write_behind for RAID1. If not specified, what
|
|
|
|
* is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
|
|
|
|
*/
|
|
|
|
write_behind = bitmap->mddev->bitmap_info.max_write_behind;
|
|
|
|
if (write_behind > COUNTER_MAX)
|
|
|
|
write_behind = COUNTER_MAX / 2;
|
|
|
|
sb->write_behind = cpu_to_le32(write_behind);
|
|
|
|
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
|
|
|
|
|
|
|
|
/* keep the array size field of the bitmap superblock up to date */
|
|
|
|
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
|
|
|
|
|
|
|
|
memcpy(sb->uuid, bitmap->mddev->uuid, 16);
|
|
|
|
|
2012-05-22 03:55:15 +00:00
|
|
|
set_bit(BITMAP_STALE, &bitmap->flags);
|
2012-05-22 03:55:14 +00:00
|
|
|
sb->state = cpu_to_le32(bitmap->flags);
|
2011-06-08 22:59:30 +00:00
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
|
|
|
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
|
2015-07-22 17:09:17 +00:00
|
|
|
bitmap->mddev->bitmap_info.nodes = 0;
|
2011-06-08 22:59:30 +00:00
|
|
|
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(sb);
|
2011-06-08 22:59:30 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
/* read the superblock from the bitmap file and initialize some bitmap fields */
|
|
|
|
static int bitmap_read_sb(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
char *reason = NULL;
|
|
|
|
bitmap_super_t *sb;
|
2005-09-09 23:23:47 +00:00
|
|
|
unsigned long chunksize, daemon_sleep, write_behind;
|
2005-06-22 00:17:14 +00:00
|
|
|
unsigned long long events;
|
2014-03-29 15:20:02 +00:00
|
|
|
int nodes = 0;
|
2012-05-22 03:55:34 +00:00
|
|
|
unsigned long sectors_reserved = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
int err = -EINVAL;
|
2012-05-22 03:55:08 +00:00
|
|
|
struct page *sb_page;
|
2015-07-01 02:19:56 +00:00
|
|
|
loff_t offset = bitmap->mddev->bitmap_info.offset;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
|
2012-05-22 03:55:08 +00:00
|
|
|
chunksize = 128 * 1024 * 1024;
|
|
|
|
daemon_sleep = 5 * HZ;
|
|
|
|
write_behind = 0;
|
2012-05-22 03:55:15 +00:00
|
|
|
set_bit(BITMAP_STALE, &bitmap->flags);
|
2012-05-22 03:55:08 +00:00
|
|
|
err = 0;
|
|
|
|
goto out_no_sb;
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
/* page 0 is the superblock, read it... */
|
2012-05-22 03:55:08 +00:00
|
|
|
sb_page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!sb_page)
|
|
|
|
return -ENOMEM;
|
2012-05-22 03:55:10 +00:00
|
|
|
bitmap->storage.sb_page = sb_page;
|
2012-05-22 03:55:08 +00:00
|
|
|
|
2014-06-06 16:50:56 +00:00
|
|
|
re_read:
|
2014-06-06 17:43:49 +00:00
|
|
|
/* If cluster_slot is set, the cluster is setup */
|
|
|
|
if (bitmap->cluster_slot >= 0) {
|
2015-03-03 02:35:31 +00:00
|
|
|
sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
|
2014-06-06 17:43:49 +00:00
|
|
|
|
2015-03-03 02:35:31 +00:00
|
|
|
sector_div(bm_blocks,
|
|
|
|
bitmap->mddev->bitmap_info.chunksize >> 9);
|
2015-03-24 16:29:05 +00:00
|
|
|
/* bits to bytes */
|
|
|
|
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
|
|
|
|
/* to 4k blocks */
|
2015-03-02 06:02:29 +00:00
|
|
|
bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
|
2015-07-01 02:19:56 +00:00
|
|
|
offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
|
2015-07-01 02:19:56 +00:00
|
|
|
bitmap->cluster_slot, offset);
|
2014-06-06 17:43:49 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
if (bitmap->storage.file) {
|
|
|
|
loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
|
2007-01-26 08:57:03 +00:00
|
|
|
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
|
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
err = read_page(bitmap->storage.file, 0,
|
2012-05-22 03:55:08 +00:00
|
|
|
bitmap, bytes, sb_page);
|
2007-01-26 08:57:03 +00:00
|
|
|
} else {
|
2012-05-22 03:55:08 +00:00
|
|
|
err = read_sb_page(bitmap->mddev,
|
2015-07-01 02:19:56 +00:00
|
|
|
offset,
|
2012-05-22 03:55:08 +00:00
|
|
|
sb_page,
|
2017-10-17 02:03:44 +00:00
|
|
|
0, sizeof(bitmap_super_t));
|
2005-06-22 00:17:27 +00:00
|
|
|
}
|
2012-05-22 03:55:08 +00:00
|
|
|
if (err)
|
2005-06-22 00:17:14 +00:00
|
|
|
return err;
|
|
|
|
|
2014-06-06 16:50:56 +00:00
|
|
|
err = -EINVAL;
|
2012-05-22 03:55:08 +00:00
|
|
|
sb = kmap_atomic(sb_page);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
chunksize = le32_to_cpu(sb->chunksize);
|
2009-12-14 01:49:53 +00:00
|
|
|
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
|
2005-09-09 23:23:47 +00:00
|
|
|
write_behind = le32_to_cpu(sb->write_behind);
|
2012-05-22 03:55:34 +00:00
|
|
|
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
|
2015-08-18 21:35:54 +00:00
|
|
|
/* Setup nodes/clustername only if bitmap version is
|
|
|
|
* cluster-compatible
|
2015-07-22 17:09:17 +00:00
|
|
|
*/
|
2015-08-18 21:35:54 +00:00
|
|
|
if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
|
2015-07-22 17:09:17 +00:00
|
|
|
nodes = le32_to_cpu(sb->nodes);
|
|
|
|
strlcpy(bitmap->mddev->bitmap_info.cluster_name,
|
|
|
|
sb->cluster_name, 64);
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
/* verify that the bitmap-specific fields are valid */
|
|
|
|
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
|
|
|
|
reason = "bad magic";
|
2005-11-09 05:39:32 +00:00
|
|
|
else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
|
2015-08-18 21:35:54 +00:00
|
|
|
le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
|
2005-06-22 00:17:14 +00:00
|
|
|
reason = "unrecognized superblock version";
|
2009-03-31 03:27:02 +00:00
|
|
|
else if (chunksize < 512)
|
2006-01-06 08:20:39 +00:00
|
|
|
reason = "bitmap chunksize too small";
|
2011-06-08 23:01:10 +00:00
|
|
|
else if (!is_power_of_2(chunksize))
|
2005-06-22 00:17:14 +00:00
|
|
|
reason = "bitmap chunksize not a power of 2";
|
2009-12-14 01:49:53 +00:00
|
|
|
else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
|
2006-01-06 08:20:39 +00:00
|
|
|
reason = "daemon sleep period out of range";
|
2005-09-09 23:23:47 +00:00
|
|
|
else if (write_behind > COUNTER_MAX)
|
|
|
|
reason = "write-behind limit out of range (0 - 16383)";
|
2005-06-22 00:17:14 +00:00
|
|
|
if (reason) {
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: invalid bitmap file superblock: %s\n",
|
2005-06-22 00:17:14 +00:00
|
|
|
bmname(bitmap), reason);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* keep the array size field of the bitmap superblock up to date */
|
|
|
|
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
|
|
|
|
|
2012-03-19 01:46:40 +00:00
|
|
|
if (bitmap->mddev->persistent) {
|
|
|
|
/*
|
|
|
|
* We have a persistent array superblock, so compare the
|
|
|
|
* bitmap's UUID and event counter to the mddev's
|
|
|
|
*/
|
|
|
|
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: bitmap superblock UUID mismatch\n",
|
|
|
|
bmname(bitmap));
|
2012-03-19 01:46:40 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
events = le64_to_cpu(sb->events);
|
2014-06-06 16:50:56 +00:00
|
|
|
if (!nodes && (events < bitmap->mddev->events)) {
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
|
|
|
|
bmname(bitmap), events,
|
|
|
|
(unsigned long long) bitmap->mddev->events);
|
2012-05-22 03:55:15 +00:00
|
|
|
set_bit(BITMAP_STALE, &bitmap->flags);
|
2012-03-19 01:46:40 +00:00
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2012-03-19 01:46:40 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
/* assign fields using values from superblock */
|
2006-10-21 17:24:09 +00:00
|
|
|
bitmap->flags |= le32_to_cpu(sb->state);
|
2005-11-09 05:39:32 +00:00
|
|
|
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
|
2012-05-22 03:55:15 +00:00
|
|
|
set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
|
2005-06-22 00:17:14 +00:00
|
|
|
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
|
2014-03-30 05:42:49 +00:00
|
|
|
strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
|
2005-06-22 00:17:14 +00:00
|
|
|
err = 0;
|
2014-06-06 16:50:56 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
out:
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(sb);
|
2017-03-15 08:14:53 +00:00
|
|
|
/* Assigning chunksize is required for "re_read" */
|
2014-06-06 17:43:49 +00:00
|
|
|
bitmap->mddev->bitmap_info.chunksize = chunksize;
|
2015-07-22 17:09:16 +00:00
|
|
|
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
|
2014-06-06 16:50:56 +00:00
|
|
|
err = md_setup_cluster(bitmap->mddev, nodes);
|
|
|
|
if (err) {
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: Could not setup cluster service (%d)\n",
|
|
|
|
bmname(bitmap), err);
|
2014-06-06 16:50:56 +00:00
|
|
|
goto out_no_sb;
|
|
|
|
}
|
|
|
|
bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
|
|
|
|
goto re_read;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-22 03:55:08 +00:00
|
|
|
out_no_sb:
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_STALE, &bitmap->flags))
|
2012-05-22 03:55:08 +00:00
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
|
|
|
bitmap->mddev->bitmap_info.chunksize = chunksize;
|
|
|
|
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
|
|
|
|
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
|
2014-03-29 15:20:02 +00:00
|
|
|
bitmap->mddev->bitmap_info.nodes = nodes;
|
2012-05-22 03:55:34 +00:00
|
|
|
if (bitmap->mddev->bitmap_info.space == 0 ||
|
|
|
|
bitmap->mddev->bitmap_info.space > sectors_reserved)
|
|
|
|
bitmap->mddev->bitmap_info.space = sectors_reserved;
|
2014-06-06 16:50:56 +00:00
|
|
|
if (err) {
|
2005-06-22 00:17:14 +00:00
|
|
|
bitmap_print_sb(bitmap);
|
2014-06-06 17:43:49 +00:00
|
|
|
if (bitmap->cluster_slot < 0)
|
2014-06-06 16:50:56 +00:00
|
|
|
md_cluster_stop(bitmap->mddev);
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* general bitmap file operations
|
|
|
|
*/
|
|
|
|
|
2009-12-14 01:49:56 +00:00
|
|
|
/*
|
|
|
|
* on-disk bitmap:
|
|
|
|
*
|
|
|
|
* Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
|
|
|
|
* file a page at a time. There's a superblock at the start of the file.
|
|
|
|
*/
|
2005-06-22 00:17:14 +00:00
|
|
|
/* calculate the index of the page that contains this bit */
|
2012-05-22 03:55:10 +00:00
|
|
|
static inline unsigned long file_page_index(struct bitmap_storage *store,
|
|
|
|
unsigned long chunk)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:10 +00:00
|
|
|
if (store->sb_page)
|
2009-12-14 01:49:56 +00:00
|
|
|
chunk += sizeof(bitmap_super_t) << 3;
|
|
|
|
return chunk >> PAGE_BIT_SHIFT;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* calculate the (bit) offset of this bit within a page */
|
2012-05-22 03:55:10 +00:00
|
|
|
static inline unsigned long file_page_offset(struct bitmap_storage *store,
|
|
|
|
unsigned long chunk)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:10 +00:00
|
|
|
if (store->sb_page)
|
2009-12-14 01:49:56 +00:00
|
|
|
chunk += sizeof(bitmap_super_t) << 3;
|
|
|
|
return chunk & (PAGE_BITS - 1);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* return a pointer to the page in the filemap that contains the given bit
|
|
|
|
*
|
|
|
|
*/
|
2012-05-22 03:55:10 +00:00
|
|
|
static inline struct page *filemap_get_page(struct bitmap_storage *store,
|
2011-07-27 01:00:37 +00:00
|
|
|
unsigned long chunk)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:10 +00:00
|
|
|
if (file_page_index(store, chunk) >= store->file_pages)
|
2010-06-01 09:37:31 +00:00
|
|
|
return NULL;
|
2014-05-28 03:39:23 +00:00
|
|
|
return store->filemap[file_page_index(store, chunk)];
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:12 +00:00
|
|
|
static int bitmap_storage_alloc(struct bitmap_storage *store,
|
2014-06-06 16:50:56 +00:00
|
|
|
unsigned long chunks, int with_super,
|
|
|
|
int slot_number)
|
2012-05-22 03:55:12 +00:00
|
|
|
{
|
2014-06-06 16:50:56 +00:00
|
|
|
int pnum, offset = 0;
|
2012-05-22 03:55:12 +00:00
|
|
|
unsigned long num_pages;
|
|
|
|
unsigned long bytes;
|
|
|
|
|
|
|
|
bytes = DIV_ROUND_UP(chunks, 8);
|
|
|
|
if (with_super)
|
|
|
|
bytes += sizeof(bitmap_super_t);
|
|
|
|
|
|
|
|
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
|
2016-05-02 15:50:13 +00:00
|
|
|
offset = slot_number * num_pages;
|
2012-05-22 03:55:12 +00:00
|
|
|
|
|
|
|
store->filemap = kmalloc(sizeof(struct page *)
|
|
|
|
* num_pages, GFP_KERNEL);
|
|
|
|
if (!store->filemap)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (with_super && !store->sb_page) {
|
2012-05-22 03:55:25 +00:00
|
|
|
store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
2012-05-22 03:55:12 +00:00
|
|
|
if (store->sb_page == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-06-06 16:50:56 +00:00
|
|
|
|
2012-05-22 03:55:12 +00:00
|
|
|
pnum = 0;
|
|
|
|
if (store->sb_page) {
|
|
|
|
store->filemap[0] = store->sb_page;
|
|
|
|
pnum = 1;
|
2014-06-06 16:50:56 +00:00
|
|
|
store->sb_page->index = offset;
|
2012-05-22 03:55:12 +00:00
|
|
|
}
|
2014-06-06 16:50:56 +00:00
|
|
|
|
2012-05-22 03:55:12 +00:00
|
|
|
for ( ; pnum < num_pages; pnum++) {
|
2012-05-22 03:55:25 +00:00
|
|
|
store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
2012-05-22 03:55:12 +00:00
|
|
|
if (!store->filemap[pnum]) {
|
|
|
|
store->file_pages = pnum;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-06-06 16:50:56 +00:00
|
|
|
store->filemap[pnum]->index = pnum + offset;
|
2012-05-22 03:55:12 +00:00
|
|
|
}
|
|
|
|
store->file_pages = pnum;
|
|
|
|
|
|
|
|
/* We need 4 bits per page, rounded up to a multiple
|
|
|
|
* of sizeof(unsigned long) */
|
|
|
|
store->filemap_attr = kzalloc(
|
|
|
|
roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!store->filemap_attr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
store->bytes = bytes;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:21 +00:00
|
|
|
static void bitmap_file_unmap(struct bitmap_storage *store)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
struct page **map, *sb_page;
|
|
|
|
int pages;
|
2012-05-22 03:55:21 +00:00
|
|
|
struct file *file;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:21 +00:00
|
|
|
file = store->file;
|
2012-05-22 03:55:10 +00:00
|
|
|
map = store->filemap;
|
|
|
|
pages = store->file_pages;
|
|
|
|
sb_page = store->sb_page;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
while (pages--)
|
2009-12-14 01:49:56 +00:00
|
|
|
if (map[pages] != sb_page) /* 0 is sb_page, release it below */
|
2006-06-26 07:27:48 +00:00
|
|
|
free_buffers(map[pages]);
|
2005-06-22 00:17:14 +00:00
|
|
|
kfree(map);
|
2012-05-22 03:55:21 +00:00
|
|
|
kfree(store->filemap_attr);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2006-06-26 07:27:48 +00:00
|
|
|
if (sb_page)
|
|
|
|
free_buffers(sb_page);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2006-06-26 07:27:48 +00:00
|
|
|
if (file) {
|
2013-01-23 22:07:38 +00:00
|
|
|
struct inode *inode = file_inode(file);
|
2007-02-10 09:45:39 +00:00
|
|
|
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
2005-06-22 00:17:14 +00:00
|
|
|
fput(file);
|
2006-06-26 07:27:48 +00:00
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap_file_kick - if an error occurs while manipulating the bitmap file
|
|
|
|
* then it is no longer reliable, so we stop using it and we mark the file
|
|
|
|
* as failed in the superblock
|
|
|
|
*/
|
|
|
|
static void bitmap_file_kick(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
char *path, *ptr = NULL;
|
|
|
|
|
2012-05-22 03:55:15 +00:00
|
|
|
if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
|
2007-07-17 11:06:13 +00:00
|
|
|
bitmap_update_sb(bitmap);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
if (bitmap->storage.file) {
|
2007-07-17 11:06:13 +00:00
|
|
|
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
|
if (path)
|
2015-06-19 08:29:13 +00:00
|
|
|
ptr = file_path(bitmap->storage.file,
|
2012-05-22 03:55:10 +00:00
|
|
|
path, PAGE_SIZE);
|
2008-05-23 20:04:34 +00:00
|
|
|
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: kicking failed bitmap file %s from array!\n",
|
|
|
|
bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2007-07-17 11:06:13 +00:00
|
|
|
kfree(path);
|
|
|
|
} else
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: disabling internal bitmap due to errors\n",
|
|
|
|
bmname(bitmap));
|
2005-06-22 00:17:27 +00:00
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
enum bitmap_page_attr {
|
2010-06-01 09:37:31 +00:00
|
|
|
BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
|
2011-09-21 05:37:46 +00:00
|
|
|
BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
|
|
|
|
* i.e. counter is 1 or 2. */
|
2010-06-01 09:37:31 +00:00
|
|
|
BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
|
2005-06-22 00:17:14 +00:00
|
|
|
};
|
|
|
|
|
2012-05-22 03:55:09 +00:00
|
|
|
static inline void set_page_attr(struct bitmap *bitmap, int pnum,
|
|
|
|
enum bitmap_page_attr attr)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:22 +00:00
|
|
|
set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:09 +00:00
|
|
|
static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
|
|
|
|
enum bitmap_page_attr attr)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:22 +00:00
|
|
|
clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:22 +00:00
|
|
|
static inline int test_page_attr(struct bitmap *bitmap, int pnum,
|
|
|
|
enum bitmap_page_attr attr)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:10 +00:00
|
|
|
return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:22 +00:00
|
|
|
static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
|
|
|
|
enum bitmap_page_attr attr)
|
|
|
|
{
|
|
|
|
return test_and_clear_bit((pnum<<2) + attr,
|
|
|
|
bitmap->storage.filemap_attr);
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
/*
|
|
|
|
* bitmap_file_set_bit -- called before performing a write to the md device
|
|
|
|
* to set (and eventually sync) a particular bit in the bitmap file
|
|
|
|
*
|
|
|
|
* we set the bit immediately, then we record the page number so that
|
|
|
|
* when an unplug occurs, we can flush the dirty pages out to disk
|
|
|
|
*/
|
|
|
|
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
|
|
|
|
{
|
|
|
|
unsigned long bit;
|
2011-07-27 01:00:37 +00:00
|
|
|
struct page *page;
|
2005-06-22 00:17:14 +00:00
|
|
|
void *kaddr;
|
2012-05-22 03:55:24 +00:00
|
|
|
unsigned long chunk = block >> bitmap->counts.chunkshift;
|
2016-05-02 15:50:14 +00:00
|
|
|
struct bitmap_storage *store = &bitmap->storage;
|
|
|
|
unsigned long node_offset = 0;
|
|
|
|
|
|
|
|
if (mddev_is_clustered(bitmap->mddev))
|
|
|
|
node_offset = bitmap->cluster_slot * store->file_pages;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
page = filemap_get_page(&bitmap->storage, chunk);
|
2011-07-27 01:00:37 +00:00
|
|
|
if (!page)
|
|
|
|
return;
|
2012-05-22 03:55:10 +00:00
|
|
|
bit = file_page_offset(&bitmap->storage, chunk);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2011-07-27 01:00:37 +00:00
|
|
|
/* set the bit */
|
2011-11-28 05:25:44 +00:00
|
|
|
kaddr = kmap_atomic(page);
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
|
2011-07-27 01:00:37 +00:00
|
|
|
set_bit(bit, kaddr);
|
|
|
|
else
|
2013-04-24 01:42:41 +00:00
|
|
|
set_bit_le(bit, kaddr);
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(kaddr);
|
2011-10-07 03:23:17 +00:00
|
|
|
pr_debug("set file bit %lu page %lu\n", bit, page->index);
|
2005-06-22 00:17:14 +00:00
|
|
|
/* record page number so it gets flushed to disk when unplug occurs */
|
2016-05-02 15:50:14 +00:00
|
|
|
set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:08 +00:00
|
|
|
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
|
|
|
|
{
|
|
|
|
unsigned long bit;
|
|
|
|
struct page *page;
|
|
|
|
void *paddr;
|
2012-05-22 03:55:24 +00:00
|
|
|
unsigned long chunk = block >> bitmap->counts.chunkshift;
|
2016-05-02 15:50:14 +00:00
|
|
|
struct bitmap_storage *store = &bitmap->storage;
|
|
|
|
unsigned long node_offset = 0;
|
|
|
|
|
|
|
|
if (mddev_is_clustered(bitmap->mddev))
|
|
|
|
node_offset = bitmap->cluster_slot * store->file_pages;
|
2012-05-22 03:55:08 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
page = filemap_get_page(&bitmap->storage, chunk);
|
2012-05-22 03:55:08 +00:00
|
|
|
if (!page)
|
|
|
|
return;
|
2012-05-22 03:55:10 +00:00
|
|
|
bit = file_page_offset(&bitmap->storage, chunk);
|
2012-05-22 03:55:08 +00:00
|
|
|
paddr = kmap_atomic(page);
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
|
2012-05-22 03:55:08 +00:00
|
|
|
clear_bit(bit, paddr);
|
|
|
|
else
|
2013-04-24 01:42:41 +00:00
|
|
|
clear_bit_le(bit, paddr);
|
2012-05-22 03:55:08 +00:00
|
|
|
kunmap_atomic(paddr);
|
2016-05-02 15:50:14 +00:00
|
|
|
if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
|
|
|
|
set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
|
2012-05-22 03:55:08 +00:00
|
|
|
bitmap->allclean = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-07 05:36:26 +00:00
|
|
|
static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
|
|
|
|
{
|
|
|
|
unsigned long bit;
|
|
|
|
struct page *page;
|
|
|
|
void *paddr;
|
|
|
|
unsigned long chunk = block >> bitmap->counts.chunkshift;
|
|
|
|
int set = 0;
|
|
|
|
|
|
|
|
page = filemap_get_page(&bitmap->storage, chunk);
|
|
|
|
if (!page)
|
|
|
|
return -EINVAL;
|
|
|
|
bit = file_page_offset(&bitmap->storage, chunk);
|
|
|
|
paddr = kmap_atomic(page);
|
|
|
|
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
|
|
|
|
set = test_bit(bit, paddr);
|
|
|
|
else
|
|
|
|
set = test_bit_le(bit, paddr);
|
|
|
|
kunmap_atomic(paddr);
|
|
|
|
return set;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
/* this gets called when the md device is ready to unplug its underlying
|
|
|
|
* (slave) device queues -- before we let any writes go down, we need to
|
|
|
|
* sync the dirty pages of the bitmap file to disk */
|
2007-07-17 11:06:13 +00:00
|
|
|
void bitmap_unplug(struct bitmap *bitmap)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-05-22 03:55:19 +00:00
|
|
|
unsigned long i;
|
2006-06-26 07:27:45 +00:00
|
|
|
int dirty, need_write;
|
2016-11-04 05:46:03 +00:00
|
|
|
int writing = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:21 +00:00
|
|
|
if (!bitmap || !bitmap->storage.filemap ||
|
|
|
|
test_bit(BITMAP_STALE, &bitmap->flags))
|
2007-07-17 11:06:13 +00:00
|
|
|
return;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
/* look at each page to see if there are any set bits that need to be
|
|
|
|
* flushed out to disk */
|
2012-05-22 03:55:10 +00:00
|
|
|
for (i = 0; i < bitmap->storage.file_pages; i++) {
|
2012-05-22 03:55:22 +00:00
|
|
|
if (!bitmap->storage.filemap)
|
2007-07-17 11:06:13 +00:00
|
|
|
return;
|
2012-05-22 03:55:22 +00:00
|
|
|
dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
|
|
|
|
need_write = test_and_clear_page_attr(bitmap, i,
|
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
|
|
|
if (dirty || need_write) {
|
2016-11-14 05:30:21 +00:00
|
|
|
if (!writing) {
|
2016-11-04 05:46:03 +00:00
|
|
|
bitmap_wait_writes(bitmap);
|
2016-11-14 05:30:21 +00:00
|
|
|
if (bitmap->mddev->queue)
|
|
|
|
blk_add_trace_msg(bitmap->mddev->queue,
|
|
|
|
"md bitmap_unplug");
|
|
|
|
}
|
2012-05-22 03:55:09 +00:00
|
|
|
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
|
2012-05-22 03:55:22 +00:00
|
|
|
write_page(bitmap, bitmap->storage.filemap[i], 0);
|
2016-11-04 05:46:03 +00:00
|
|
|
writing = 1;
|
2012-05-22 03:55:22 +00:00
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2016-11-04 05:46:03 +00:00
|
|
|
if (writing)
|
|
|
|
bitmap_wait_writes(bitmap);
|
2014-09-09 04:13:51 +00:00
|
|
|
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
2006-06-26 07:27:48 +00:00
|
|
|
bitmap_file_kick(bitmap);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_unplug);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2005-09-09 23:23:44 +00:00
|
|
|
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
|
2005-06-22 00:17:14 +00:00
|
|
|
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
|
|
|
|
* the in-memory bitmap from the on-disk bitmap -- also, sets up the
|
|
|
|
* memory mapping of the bitmap file
|
|
|
|
* Special cases:
|
|
|
|
* if there's no bitmap file, or if the bitmap file had been
|
|
|
|
* previously kicked from the array, we mark all the bits as
|
|
|
|
* 1's in order to cause a full resync.
|
2005-09-09 23:23:44 +00:00
|
|
|
*
|
|
|
|
* We ignore all bits for sectors that end earlier than 'start'.
|
|
|
|
* This is used when reading an out-of-date bitmap...
|
2005-06-22 00:17:14 +00:00
|
|
|
*/
|
2005-09-09 23:23:44 +00:00
|
|
|
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2014-06-06 16:50:56 +00:00
|
|
|
unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
|
2012-05-22 03:55:08 +00:00
|
|
|
struct page *page = NULL;
|
2012-05-22 03:55:12 +00:00
|
|
|
unsigned long bit_cnt = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
struct file *file;
|
2012-05-22 03:55:12 +00:00
|
|
|
unsigned long offset;
|
2005-06-22 00:17:14 +00:00
|
|
|
int outofdate;
|
|
|
|
int ret = -ENOSPC;
|
2006-01-06 08:20:34 +00:00
|
|
|
void *paddr;
|
2012-05-22 03:55:10 +00:00
|
|
|
struct bitmap_storage *store = &bitmap->storage;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
chunks = bitmap->counts.chunks;
|
2012-05-22 03:55:10 +00:00
|
|
|
file = store->file;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:08 +00:00
|
|
|
if (!file && !bitmap->mddev->bitmap_info.offset) {
|
|
|
|
/* No permanent bitmap - fill with '1s'. */
|
2012-05-22 03:55:10 +00:00
|
|
|
store->filemap = NULL;
|
|
|
|
store->file_pages = 0;
|
2012-05-22 03:55:08 +00:00
|
|
|
for (i = 0; i < chunks ; i++) {
|
|
|
|
/* if the disk bit is set, set the memory bit */
|
2012-05-22 03:55:24 +00:00
|
|
|
int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
|
2012-05-22 03:55:08 +00:00
|
|
|
>= start);
|
|
|
|
bitmap_set_memory_bits(bitmap,
|
2012-05-22 03:55:24 +00:00
|
|
|
(sector_t)i << bitmap->counts.chunkshift,
|
2012-05-22 03:55:08 +00:00
|
|
|
needed);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:15 +00:00
|
|
|
outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
|
2005-06-22 00:17:14 +00:00
|
|
|
if (outofdate)
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:12 +00:00
|
|
|
if (file && i_size_read(file->f_mapping->host) < store->bytes) {
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: bitmap file too short %lu < %lu\n",
|
|
|
|
bmname(bitmap),
|
|
|
|
(unsigned long) i_size_read(file->f_mapping->host),
|
|
|
|
store->bytes);
|
2007-07-17 11:06:13 +00:00
|
|
|
goto err;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2005-06-22 00:17:17 +00:00
|
|
|
|
2012-05-22 03:55:12 +00:00
|
|
|
oldindex = ~0L;
|
2012-05-22 03:55:08 +00:00
|
|
|
offset = 0;
|
2012-05-22 03:55:12 +00:00
|
|
|
if (!bitmap->mddev->bitmap_info.external)
|
2012-05-22 03:55:08 +00:00
|
|
|
offset = sizeof(bitmap_super_t);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2014-06-06 16:50:56 +00:00
|
|
|
if (mddev_is_clustered(bitmap->mddev))
|
|
|
|
node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
for (i = 0; i < chunks; i++) {
|
2005-11-09 05:39:32 +00:00
|
|
|
int b;
|
2012-05-22 03:55:10 +00:00
|
|
|
index = file_page_index(&bitmap->storage, i);
|
|
|
|
bit = file_page_offset(&bitmap->storage, i);
|
2005-06-22 00:17:14 +00:00
|
|
|
if (index != oldindex) { /* this is a new page, read it in */
|
2006-06-26 07:27:48 +00:00
|
|
|
int count;
|
2005-06-22 00:17:14 +00:00
|
|
|
/* unmap the old page, we're done with it */
|
2012-05-22 03:55:12 +00:00
|
|
|
if (index == store->file_pages-1)
|
|
|
|
count = store->bytes - index * PAGE_SIZE;
|
2006-06-26 07:27:48 +00:00
|
|
|
else
|
|
|
|
count = PAGE_SIZE;
|
2012-05-22 03:55:10 +00:00
|
|
|
page = store->filemap[index];
|
2012-05-22 03:55:08 +00:00
|
|
|
if (file)
|
|
|
|
ret = read_page(file, index, bitmap,
|
|
|
|
count, page);
|
|
|
|
else
|
|
|
|
ret = read_sb_page(
|
|
|
|
bitmap->mddev,
|
|
|
|
bitmap->mddev->bitmap_info.offset,
|
|
|
|
page,
|
2014-06-06 16:50:56 +00:00
|
|
|
index + node_offset, count);
|
2012-05-22 03:55:08 +00:00
|
|
|
|
|
|
|
if (ret)
|
2007-07-17 11:06:13 +00:00
|
|
|
goto err;
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
oldindex = index;
|
|
|
|
|
|
|
|
if (outofdate) {
|
|
|
|
/*
|
|
|
|
* if bitmap is out of date, dirty the
|
2010-06-01 09:37:31 +00:00
|
|
|
* whole page and write it out
|
2005-06-22 00:17:14 +00:00
|
|
|
*/
|
2011-11-28 05:25:44 +00:00
|
|
|
paddr = kmap_atomic(page);
|
2006-01-06 08:20:34 +00:00
|
|
|
memset(paddr + offset, 0xff,
|
2005-09-09 23:23:44 +00:00
|
|
|
PAGE_SIZE - offset);
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(paddr);
|
2007-07-17 11:06:13 +00:00
|
|
|
write_page(bitmap, page, 1);
|
|
|
|
|
|
|
|
ret = -EIO;
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_WRITE_ERROR,
|
|
|
|
&bitmap->flags))
|
2007-07-17 11:06:13 +00:00
|
|
|
goto err;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
2011-11-28 05:25:44 +00:00
|
|
|
paddr = kmap_atomic(page);
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
|
2006-01-06 08:20:34 +00:00
|
|
|
b = test_bit(bit, paddr);
|
2005-11-09 05:39:32 +00:00
|
|
|
else
|
2011-03-23 23:42:13 +00:00
|
|
|
b = test_bit_le(bit, paddr);
|
2011-11-28 05:25:44 +00:00
|
|
|
kunmap_atomic(paddr);
|
2005-11-09 05:39:32 +00:00
|
|
|
if (b) {
|
2005-06-22 00:17:14 +00:00
|
|
|
/* if the disk bit is set, set the memory bit */
|
2012-05-22 03:55:24 +00:00
|
|
|
int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
|
2009-05-07 02:49:06 +00:00
|
|
|
>= start);
|
|
|
|
bitmap_set_memory_bits(bitmap,
|
2012-05-22 03:55:24 +00:00
|
|
|
(sector_t)i << bitmap->counts.chunkshift,
|
2009-05-07 02:49:06 +00:00
|
|
|
needed);
|
2005-06-22 00:17:14 +00:00
|
|
|
bit_cnt++;
|
|
|
|
}
|
2012-05-22 03:55:08 +00:00
|
|
|
offset = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
|
|
|
|
bmname(bitmap), store->file_pages,
|
|
|
|
bit_cnt, chunks);
|
2007-07-17 11:06:13 +00:00
|
|
|
|
|
|
|
return 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2007-07-17 11:06:13 +00:00
|
|
|
err:
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("%s: bitmap initialisation failed: %d\n",
|
|
|
|
bmname(bitmap), ret);
|
2005-06-22 00:17:14 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-06-22 00:17:27 +00:00
|
|
|
void bitmap_write_all(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
/* We don't actually write all bitmap blocks here,
|
|
|
|
* just flag them as needing to be written
|
|
|
|
*/
|
2006-06-26 07:27:45 +00:00
|
|
|
int i;
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
if (!bitmap || !bitmap->storage.filemap)
|
2012-05-22 03:55:08 +00:00
|
|
|
return;
|
2012-05-22 03:55:10 +00:00
|
|
|
if (bitmap->storage.file)
|
2012-05-22 03:55:08 +00:00
|
|
|
/* Only one copy, so nothing needed */
|
|
|
|
return;
|
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
for (i = 0; i < bitmap->storage.file_pages; i++)
|
2012-05-22 03:55:09 +00:00
|
|
|
set_page_attr(bitmap, i,
|
2006-06-26 07:27:45 +00:00
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
2011-09-21 05:37:46 +00:00
|
|
|
bitmap->allclean = 0;
|
2005-06-22 00:17:27 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
static void bitmap_count_page(struct bitmap_counts *bitmap,
|
|
|
|
sector_t offset, int inc)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2012-03-19 01:46:41 +00:00
|
|
|
sector_t chunk = offset >> bitmap->chunkshift;
|
2005-06-22 00:17:14 +00:00
|
|
|
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
|
|
|
bitmap->bp[page].count += inc;
|
|
|
|
bitmap_checkfree(bitmap, page);
|
|
|
|
}
|
2012-05-22 03:55:06 +00:00
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
|
2012-05-22 03:55:06 +00:00
|
|
|
{
|
|
|
|
sector_t chunk = offset >> bitmap->chunkshift;
|
|
|
|
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
|
|
|
struct bitmap_page *bp = &bitmap->bp[page];
|
|
|
|
|
|
|
|
if (!bp->pending)
|
|
|
|
bp->pending = 1;
|
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t offset, sector_t *blocks,
|
2005-06-22 00:17:14 +00:00
|
|
|
int create);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap daemon -- periodically wakes up to clean bits and flush pages
|
|
|
|
* out to disk
|
|
|
|
*/
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
void bitmap_daemon_work(struct mddev *mddev)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2009-12-14 01:49:46 +00:00
|
|
|
struct bitmap *bitmap;
|
2005-06-22 00:17:22 +00:00
|
|
|
unsigned long j;
|
2012-05-22 03:55:06 +00:00
|
|
|
unsigned long nextpage;
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks;
|
2012-05-22 03:55:24 +00:00
|
|
|
struct bitmap_counts *counts;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2009-12-14 01:49:46 +00:00
|
|
|
/* Use a mutex to guard daemon_work against
|
|
|
|
* bitmap_destroy.
|
|
|
|
*/
|
2009-12-14 01:49:52 +00:00
|
|
|
mutex_lock(&mddev->bitmap_info.mutex);
|
2009-12-14 01:49:46 +00:00
|
|
|
bitmap = mddev->bitmap;
|
|
|
|
if (bitmap == NULL) {
|
2009-12-14 01:49:52 +00:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2007-07-17 11:06:13 +00:00
|
|
|
return;
|
2009-12-14 01:49:46 +00:00
|
|
|
}
|
2009-12-14 01:49:53 +00:00
|
|
|
if (time_before(jiffies, bitmap->daemon_lastrun
|
2011-12-22 23:17:50 +00:00
|
|
|
+ mddev->bitmap_info.daemon_sleep))
|
2008-03-10 18:43:48 +00:00
|
|
|
goto done;
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
bitmap->daemon_lastrun = jiffies;
|
2008-03-04 22:29:30 +00:00
|
|
|
if (bitmap->allclean) {
|
2011-12-22 23:17:50 +00:00
|
|
|
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
|
2009-12-14 01:49:46 +00:00
|
|
|
goto done;
|
2008-03-04 22:29:30 +00:00
|
|
|
}
|
|
|
|
bitmap->allclean = 1;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2016-11-14 05:30:21 +00:00
|
|
|
if (bitmap->mddev->queue)
|
|
|
|
blk_add_trace_msg(bitmap->mddev->queue,
|
|
|
|
"md bitmap_daemon_work");
|
|
|
|
|
2012-05-22 03:55:06 +00:00
|
|
|
/* Any file-page which is PENDING now needs to be written.
|
|
|
|
* So set NEEDWRITE now, then after we make any last-minute changes
|
|
|
|
* we will write it.
|
|
|
|
*/
|
2012-05-22 03:55:10 +00:00
|
|
|
for (j = 0; j < bitmap->storage.file_pages; j++)
|
2012-05-22 03:55:22 +00:00
|
|
|
if (test_and_clear_page_attr(bitmap, j,
|
|
|
|
BITMAP_PAGE_PENDING))
|
2012-05-22 03:55:09 +00:00
|
|
|
set_page_attr(bitmap, j,
|
2012-05-22 03:55:06 +00:00
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
|
|
|
|
|
|
|
if (bitmap->need_sync &&
|
|
|
|
mddev->bitmap_info.external == 0) {
|
|
|
|
/* Arrange for superblock update as well as
|
|
|
|
* other changes */
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
bitmap->need_sync = 0;
|
2012-05-22 03:55:10 +00:00
|
|
|
if (bitmap->storage.filemap) {
|
|
|
|
sb = kmap_atomic(bitmap->storage.sb_page);
|
2012-05-22 03:55:08 +00:00
|
|
|
sb->events_cleared =
|
|
|
|
cpu_to_le64(bitmap->events_cleared);
|
|
|
|
kunmap_atomic(sb);
|
2012-05-22 03:55:09 +00:00
|
|
|
set_page_attr(bitmap, 0,
|
2012-05-22 03:55:08 +00:00
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
|
|
|
}
|
2012-05-22 03:55:06 +00:00
|
|
|
}
|
|
|
|
/* Now look at the bitmap counters and if any are '2' or '1',
|
|
|
|
* decrement and handle accordingly.
|
|
|
|
*/
|
2012-05-22 03:55:24 +00:00
|
|
|
counts = &bitmap->counts;
|
|
|
|
spin_lock_irq(&counts->lock);
|
2012-05-22 03:55:06 +00:00
|
|
|
nextpage = 0;
|
2012-05-22 03:55:24 +00:00
|
|
|
for (j = 0; j < counts->chunks; j++) {
|
2005-06-22 00:17:14 +00:00
|
|
|
bitmap_counter_t *bmc;
|
2012-05-22 03:55:24 +00:00
|
|
|
sector_t block = (sector_t)j << counts->chunkshift;
|
2011-07-27 01:00:37 +00:00
|
|
|
|
2012-05-22 03:55:06 +00:00
|
|
|
if (j == nextpage) {
|
|
|
|
nextpage += PAGE_COUNTER_RATIO;
|
2012-05-22 03:55:24 +00:00
|
|
|
if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
|
2012-05-22 03:55:06 +00:00
|
|
|
j |= PAGE_COUNTER_MASK;
|
2005-06-22 00:17:22 +00:00
|
|
|
continue;
|
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
bmc = bitmap_get_counter(counts,
|
2012-05-22 03:55:08 +00:00
|
|
|
block,
|
2009-05-07 02:49:06 +00:00
|
|
|
&blocks, 0);
|
2012-05-22 03:55:06 +00:00
|
|
|
|
|
|
|
if (!bmc) {
|
2011-09-21 05:37:46 +00:00
|
|
|
j |= PAGE_COUNTER_MASK;
|
2012-05-22 03:55:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (*bmc == 1 && !bitmap->need_sync) {
|
|
|
|
/* We can clear the bit */
|
|
|
|
*bmc = 0;
|
2012-05-22 03:55:24 +00:00
|
|
|
bitmap_count_page(counts, block, -1);
|
2012-05-22 03:55:08 +00:00
|
|
|
bitmap_file_clear_bit(bitmap, block);
|
2012-05-22 03:55:06 +00:00
|
|
|
} else if (*bmc && *bmc <= 2) {
|
|
|
|
*bmc = 1;
|
2012-05-22 03:55:24 +00:00
|
|
|
bitmap_set_pending(counts, block);
|
2012-05-22 03:55:06 +00:00
|
|
|
bitmap->allclean = 0;
|
2011-09-21 05:37:46 +00:00
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&counts->lock);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2016-11-04 05:46:03 +00:00
|
|
|
bitmap_wait_writes(bitmap);
|
2012-05-22 03:55:06 +00:00
|
|
|
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
|
|
|
|
* DIRTY pages need to be written by bitmap_unplug so it can wait
|
|
|
|
* for them.
|
|
|
|
* If we find any DIRTY page we stop there and let bitmap_unplug
|
|
|
|
* handle all the rest. This is important in the case where
|
|
|
|
* the first blocking holds the superblock and it has been updated.
|
|
|
|
* We mustn't write any other blocks before the superblock.
|
|
|
|
*/
|
2012-05-22 03:55:21 +00:00
|
|
|
for (j = 0;
|
|
|
|
j < bitmap->storage.file_pages
|
|
|
|
&& !test_bit(BITMAP_STALE, &bitmap->flags);
|
|
|
|
j++) {
|
2012-05-22 03:55:09 +00:00
|
|
|
if (test_page_attr(bitmap, j,
|
2012-05-22 03:55:06 +00:00
|
|
|
BITMAP_PAGE_DIRTY))
|
|
|
|
/* bitmap_unplug will handle the rest */
|
|
|
|
break;
|
2012-05-22 03:55:22 +00:00
|
|
|
if (test_and_clear_page_attr(bitmap, j,
|
|
|
|
BITMAP_PAGE_NEEDWRITE)) {
|
2012-05-22 03:55:10 +00:00
|
|
|
write_page(bitmap, bitmap->storage.filemap[j], 0);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-03-10 18:43:48 +00:00
|
|
|
done:
|
2008-03-04 22:29:30 +00:00
|
|
|
if (bitmap->allclean == 0)
|
2011-12-22 23:17:50 +00:00
|
|
|
mddev->thread->timeout =
|
|
|
|
mddev->bitmap_info.daemon_sleep;
|
2009-12-14 01:49:52 +00:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t offset, sector_t *blocks,
|
2005-06-22 00:17:14 +00:00
|
|
|
int create)
|
2009-09-23 08:06:44 +00:00
|
|
|
__releases(bitmap->lock)
|
|
|
|
__acquires(bitmap->lock)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
/* If 'create', we might release the lock and reclaim it.
|
|
|
|
* The lock must have been taken with interrupts enabled.
|
|
|
|
* If !create, we don't release the lock.
|
|
|
|
*/
|
2012-03-19 01:46:41 +00:00
|
|
|
sector_t chunk = offset >> bitmap->chunkshift;
|
2005-06-22 00:17:14 +00:00
|
|
|
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
|
|
|
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
|
|
|
|
sector_t csize;
|
2010-06-01 09:37:33 +00:00
|
|
|
int err;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2016-05-02 15:50:11 +00:00
|
|
|
err = bitmap_checkpage(bitmap, page, create, 0);
|
2010-06-01 09:37:33 +00:00
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked ||
|
|
|
|
bitmap->bp[page].map == NULL)
|
2012-03-19 01:46:41 +00:00
|
|
|
csize = ((sector_t)1) << (bitmap->chunkshift +
|
2010-06-01 09:37:33 +00:00
|
|
|
PAGE_COUNTER_SHIFT - 1);
|
|
|
|
else
|
2012-03-19 01:46:41 +00:00
|
|
|
csize = ((sector_t)1) << bitmap->chunkshift;
|
2010-06-01 09:37:33 +00:00
|
|
|
*blocks = csize - (offset & (csize - 1));
|
|
|
|
|
|
|
|
if (err < 0)
|
2005-06-22 00:17:14 +00:00
|
|
|
return NULL;
|
2010-06-01 09:37:33 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
/* now locked ... */
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked) { /* hijacked pointer */
|
|
|
|
/* should we use the first or second counter field
|
|
|
|
* of the hijacked pointer? */
|
|
|
|
int hi = (pageoff > PAGE_COUNTER_MASK);
|
|
|
|
return &((bitmap_counter_t *)
|
|
|
|
&bitmap->bp[page].map)[hi];
|
2010-06-01 09:37:33 +00:00
|
|
|
} else /* page is allocated */
|
2005-06-22 00:17:14 +00:00
|
|
|
return (bitmap_counter_t *)
|
|
|
|
&(bitmap->bp[page].map[pageoff]);
|
|
|
|
}
|
|
|
|
|
2005-09-09 23:23:47 +00:00
|
|
|
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2010-06-01 09:37:31 +00:00
|
|
|
if (!bitmap)
|
|
|
|
return 0;
|
2005-09-09 23:23:47 +00:00
|
|
|
|
|
|
|
if (behind) {
|
2010-03-08 05:02:37 +00:00
|
|
|
int bw;
|
2005-09-09 23:23:47 +00:00
|
|
|
atomic_inc(&bitmap->behind_writes);
|
2010-03-08 05:02:37 +00:00
|
|
|
bw = atomic_read(&bitmap->behind_writes);
|
|
|
|
if (bw > bitmap->behind_writes_used)
|
|
|
|
bitmap->behind_writes_used = bw;
|
|
|
|
|
2011-10-07 03:23:17 +00:00
|
|
|
pr_debug("inc write-behind count %d/%lu\n",
|
|
|
|
bw, bitmap->mddev->bitmap_info.max_write_behind);
|
2005-09-09 23:23:47 +00:00
|
|
|
}
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
while (sectors) {
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks;
|
2005-06-22 00:17:14 +00:00
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_lock_irq(&bitmap->counts.lock);
|
|
|
|
bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
|
2005-06-22 00:17:14 +00:00
|
|
|
if (!bmc) {
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
2005-06-22 00:17:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-09 01:42:57 +00:00
|
|
|
if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
|
2007-02-08 22:20:37 +00:00
|
|
|
DEFINE_WAIT(__wait);
|
|
|
|
/* note that it is safe to do the prepare_to_wait
|
|
|
|
* after the test as long as we do it before dropping
|
|
|
|
* the spinlock.
|
|
|
|
*/
|
|
|
|
prepare_to_wait(&bitmap->overflow_wait, &__wait,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
2012-08-01 22:33:20 +00:00
|
|
|
schedule();
|
2007-02-08 22:20:37 +00:00
|
|
|
finish_wait(&bitmap->overflow_wait, &__wait);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-06-01 09:37:31 +00:00
|
|
|
switch (*bmc) {
|
2005-06-22 00:17:14 +00:00
|
|
|
case 0:
|
|
|
|
bitmap_file_set_bit(bitmap, offset);
|
2012-05-22 03:55:24 +00:00
|
|
|
bitmap_count_page(&bitmap->counts, offset, 1);
|
2005-06-22 00:17:14 +00:00
|
|
|
/* fall through */
|
|
|
|
case 1:
|
|
|
|
*bmc = 2;
|
|
|
|
}
|
2007-02-08 22:20:37 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
(*bmc)++;
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
offset += blocks;
|
|
|
|
if (sectors > blocks)
|
|
|
|
sectors -= blocks;
|
2010-06-01 09:37:31 +00:00
|
|
|
else
|
|
|
|
sectors = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_startwrite);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
|
2005-09-09 23:23:47 +00:00
|
|
|
int success, int behind)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
2010-06-01 09:37:31 +00:00
|
|
|
if (!bitmap)
|
|
|
|
return;
|
2005-09-09 23:23:47 +00:00
|
|
|
if (behind) {
|
2010-03-31 00:21:44 +00:00
|
|
|
if (atomic_dec_and_test(&bitmap->behind_writes))
|
|
|
|
wake_up(&bitmap->behind_wait);
|
2011-10-07 03:23:17 +00:00
|
|
|
pr_debug("dec write-behind count %d/%lu\n",
|
|
|
|
atomic_read(&bitmap->behind_writes),
|
|
|
|
bitmap->mddev->bitmap_info.max_write_behind);
|
2005-09-09 23:23:47 +00:00
|
|
|
}
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
while (sectors) {
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks;
|
2005-06-22 00:17:14 +00:00
|
|
|
unsigned long flags;
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_lock_irqsave(&bitmap->counts.lock, flags);
|
|
|
|
bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
|
2005-06-22 00:17:14 +00:00
|
|
|
if (!bmc) {
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
|
2005-06-22 00:17:14 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-12-22 22:57:48 +00:00
|
|
|
if (success && !bitmap->mddev->degraded &&
|
2008-06-27 22:31:22 +00:00
|
|
|
bitmap->events_cleared < bitmap->mddev->events) {
|
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
|
|
|
bitmap->need_sync = 1;
|
2010-06-01 09:37:32 +00:00
|
|
|
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
|
2008-06-27 22:31:22 +00:00
|
|
|
}
|
|
|
|
|
2011-06-09 01:42:57 +00:00
|
|
|
if (!success && !NEEDED(*bmc))
|
2005-06-22 00:17:14 +00:00
|
|
|
*bmc |= NEEDED_MASK;
|
|
|
|
|
2011-06-09 01:42:57 +00:00
|
|
|
if (COUNTER(*bmc) == COUNTER_MAX)
|
2007-02-08 22:20:37 +00:00
|
|
|
wake_up(&bitmap->overflow_wait);
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
(*bmc)--;
|
2011-09-21 05:37:46 +00:00
|
|
|
if (*bmc <= 2) {
|
2012-05-22 03:55:24 +00:00
|
|
|
bitmap_set_pending(&bitmap->counts, offset);
|
2011-09-21 05:37:46 +00:00
|
|
|
bitmap->allclean = 0;
|
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
|
2005-06-22 00:17:14 +00:00
|
|
|
offset += blocks;
|
|
|
|
if (sectors > blocks)
|
|
|
|
sectors -= blocks;
|
2010-06-01 09:37:31 +00:00
|
|
|
else
|
|
|
|
sectors = 0;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_endwrite);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2010-10-18 23:03:39 +00:00
|
|
|
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
2009-03-31 03:27:02 +00:00
|
|
|
int degraded)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
int rv;
|
|
|
|
if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
|
|
|
|
*blocks = 1024;
|
|
|
|
return 1; /* always resync if no bitmap */
|
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_lock_irq(&bitmap->counts.lock);
|
|
|
|
bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
|
2005-06-22 00:17:14 +00:00
|
|
|
rv = 0;
|
|
|
|
if (bmc) {
|
|
|
|
/* locked */
|
|
|
|
if (RESYNC(*bmc))
|
|
|
|
rv = 1;
|
|
|
|
else if (NEEDED(*bmc)) {
|
|
|
|
rv = 1;
|
2005-07-15 10:56:35 +00:00
|
|
|
if (!degraded) { /* don't set/clear bits if degraded */
|
|
|
|
*bmc |= RESYNC_MASK;
|
|
|
|
*bmc &= ~NEEDED_MASK;
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
2005-06-22 00:17:14 +00:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2010-10-18 23:03:39 +00:00
|
|
|
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
2009-03-31 03:27:02 +00:00
|
|
|
int degraded)
|
|
|
|
{
|
|
|
|
/* bitmap_start_sync must always report on multiples of whole
|
|
|
|
* pages, otherwise resync (which is very PAGE_SIZE based) will
|
|
|
|
* get confused.
|
|
|
|
* So call __bitmap_start_sync repeatedly (if needed) until
|
|
|
|
* At least PAGE_SIZE>>9 blocks are covered.
|
|
|
|
* Return the 'or' of the result.
|
|
|
|
*/
|
|
|
|
int rv = 0;
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks1;
|
2009-03-31 03:27:02 +00:00
|
|
|
|
|
|
|
*blocks = 0;
|
|
|
|
while (*blocks < (PAGE_SIZE>>9)) {
|
|
|
|
rv |= __bitmap_start_sync(bitmap, offset,
|
|
|
|
&blocks1, degraded);
|
|
|
|
offset += blocks1;
|
|
|
|
*blocks += blocks1;
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_start_sync);
|
2009-03-31 03:27:02 +00:00
|
|
|
|
2010-10-18 23:03:39 +00:00
|
|
|
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
unsigned long flags;
|
2010-06-01 09:37:31 +00:00
|
|
|
|
|
|
|
if (bitmap == NULL) {
|
2005-06-22 00:17:14 +00:00
|
|
|
*blocks = 1024;
|
|
|
|
return;
|
|
|
|
}
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_lock_irqsave(&bitmap->counts.lock, flags);
|
|
|
|
bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
|
2005-06-22 00:17:14 +00:00
|
|
|
if (bmc == NULL)
|
|
|
|
goto unlock;
|
|
|
|
/* locked */
|
|
|
|
if (RESYNC(*bmc)) {
|
|
|
|
*bmc &= ~RESYNC_MASK;
|
|
|
|
|
|
|
|
if (!NEEDED(*bmc) && aborted)
|
|
|
|
*bmc |= NEEDED_MASK;
|
|
|
|
else {
|
2011-09-21 05:37:46 +00:00
|
|
|
if (*bmc <= 2) {
|
2012-05-22 03:55:24 +00:00
|
|
|
bitmap_set_pending(&bitmap->counts, offset);
|
2011-09-21 05:37:46 +00:00
|
|
|
bitmap->allclean = 0;
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
unlock:
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_end_sync);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
void bitmap_close_sync(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
/* Sync has finished, and any bitmap chunks that weren't synced
|
|
|
|
* properly have been aborted. It remains to us to clear the
|
|
|
|
* RESYNC bit wherever it is still on
|
|
|
|
*/
|
|
|
|
sector_t sector = 0;
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks;
|
2008-02-06 09:39:50 +00:00
|
|
|
if (!bitmap)
|
|
|
|
return;
|
2005-06-22 00:17:14 +00:00
|
|
|
while (sector < bitmap->mddev->resync_max_sectors) {
|
|
|
|
bitmap_end_sync(bitmap, sector, &blocks, 0);
|
2008-02-06 09:39:50 +00:00
|
|
|
sector += blocks;
|
|
|
|
}
|
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_close_sync);
|
2008-02-06 09:39:50 +00:00
|
|
|
|
2015-08-18 22:14:42 +00:00
|
|
|
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
|
2008-02-06 09:39:50 +00:00
|
|
|
{
|
|
|
|
sector_t s = 0;
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks;
|
2008-02-06 09:39:50 +00:00
|
|
|
|
|
|
|
if (!bitmap)
|
|
|
|
return;
|
|
|
|
if (sector == 0) {
|
|
|
|
bitmap->last_end_sync = jiffies;
|
|
|
|
return;
|
|
|
|
}
|
2015-08-18 22:14:42 +00:00
|
|
|
if (!force && time_before(jiffies, (bitmap->last_end_sync
|
2009-12-14 01:49:53 +00:00
|
|
|
+ bitmap->mddev->bitmap_info.daemon_sleep)))
|
2008-02-06 09:39:50 +00:00
|
|
|
return;
|
|
|
|
wait_event(bitmap->mddev->recovery_wait,
|
|
|
|
atomic_read(&bitmap->mddev->recovery_active) == 0);
|
|
|
|
|
2011-01-13 22:14:34 +00:00
|
|
|
bitmap->mddev->curr_resync_completed = sector;
|
2016-12-08 23:48:19 +00:00
|
|
|
set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
|
2012-05-22 03:55:24 +00:00
|
|
|
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
|
2008-02-06 09:39:50 +00:00
|
|
|
s = 0;
|
|
|
|
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
|
|
|
|
bitmap_end_sync(bitmap, s, &blocks, 0);
|
|
|
|
s += blocks;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2008-02-06 09:39:50 +00:00
|
|
|
bitmap->last_end_sync = jiffies;
|
2009-04-14 06:28:34 +00:00
|
|
|
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_cond_end_sync);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2016-05-02 15:50:12 +00:00
|
|
|
void bitmap_sync_with_cluster(struct mddev *mddev,
|
|
|
|
sector_t old_lo, sector_t old_hi,
|
|
|
|
sector_t new_lo, sector_t new_hi)
|
|
|
|
{
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
sector_t sector, blocks = 0;
|
|
|
|
|
|
|
|
for (sector = old_lo; sector < new_lo; ) {
|
|
|
|
bitmap_end_sync(bitmap, sector, &blocks, 0);
|
|
|
|
sector += blocks;
|
|
|
|
}
|
|
|
|
WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
|
|
|
|
|
|
|
|
for (sector = old_hi; sector < new_hi; ) {
|
|
|
|
bitmap_start_sync(bitmap, sector, &blocks, 0);
|
|
|
|
sector += blocks;
|
|
|
|
}
|
|
|
|
WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(bitmap_sync_with_cluster);
|
|
|
|
|
2005-09-09 23:23:44 +00:00
|
|
|
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
/* For each chunk covered by any of these sectors, set the
|
2012-05-22 03:55:08 +00:00
|
|
|
* counter to 2 and possibly set resync_needed. They should all
|
2005-06-22 00:17:14 +00:00
|
|
|
* be 0 at this point
|
|
|
|
*/
|
2005-08-04 19:53:33 +00:00
|
|
|
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t secs;
|
2005-08-04 19:53:33 +00:00
|
|
|
bitmap_counter_t *bmc;
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_lock_irq(&bitmap->counts.lock);
|
|
|
|
bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
|
2005-08-04 19:53:33 +00:00
|
|
|
if (!bmc) {
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
2005-08-04 19:53:33 +00:00
|
|
|
return;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2010-06-01 09:37:31 +00:00
|
|
|
if (!*bmc) {
|
2014-06-07 05:36:26 +00:00
|
|
|
*bmc = 2;
|
2012-05-22 03:55:24 +00:00
|
|
|
bitmap_count_page(&bitmap->counts, offset, 1);
|
|
|
|
bitmap_set_pending(&bitmap->counts, offset);
|
2011-09-21 05:37:46 +00:00
|
|
|
bitmap->allclean = 0;
|
2005-08-04 19:53:33 +00:00
|
|
|
}
|
2014-06-07 05:36:26 +00:00
|
|
|
if (needed)
|
|
|
|
*bmc |= NEEDED_MASK;
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
|
|
|
|
2006-10-03 08:15:49 +00:00
|
|
|
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
|
|
|
|
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
|
|
|
|
{
|
|
|
|
unsigned long chunk;
|
|
|
|
|
|
|
|
for (chunk = s; chunk <= e; chunk++) {
|
2012-05-22 03:55:24 +00:00
|
|
|
sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
|
2006-10-03 08:15:49 +00:00
|
|
|
bitmap_set_memory_bits(bitmap, sec, 1);
|
|
|
|
bitmap_file_set_bit(bitmap, sec);
|
2009-12-14 01:49:56 +00:00
|
|
|
if (sec < bitmap->mddev->recovery_cp)
|
|
|
|
/* We are asserting that the array is dirty,
|
|
|
|
* so move the recovery_cp address back so
|
|
|
|
* that it is obvious that it is dirty
|
|
|
|
*/
|
|
|
|
bitmap->mddev->recovery_cp = sec;
|
2006-10-03 08:15:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-08-04 19:53:35 +00:00
|
|
|
/*
|
|
|
|
* flush out any pending updates
|
|
|
|
*/
|
2011-10-11 05:47:53 +00:00
|
|
|
void bitmap_flush(struct mddev *mddev)
|
2005-08-04 19:53:35 +00:00
|
|
|
{
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
2009-12-14 01:49:53 +00:00
|
|
|
long sleep;
|
2005-08-04 19:53:35 +00:00
|
|
|
|
|
|
|
if (!bitmap) /* there was no bitmap */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* run the daemon_work three time to ensure everything is flushed
|
|
|
|
* that can be
|
|
|
|
*/
|
2009-12-14 01:49:53 +00:00
|
|
|
sleep = mddev->bitmap_info.daemon_sleep * 2;
|
2009-12-14 01:49:53 +00:00
|
|
|
bitmap->daemon_lastrun -= sleep;
|
2009-12-14 01:49:46 +00:00
|
|
|
bitmap_daemon_work(mddev);
|
2009-12-14 01:49:53 +00:00
|
|
|
bitmap->daemon_lastrun -= sleep;
|
2009-12-14 01:49:46 +00:00
|
|
|
bitmap_daemon_work(mddev);
|
2009-12-14 01:49:53 +00:00
|
|
|
bitmap->daemon_lastrun -= sleep;
|
2009-12-14 01:49:46 +00:00
|
|
|
bitmap_daemon_work(mddev);
|
2005-08-04 19:53:35 +00:00
|
|
|
bitmap_update_sb(bitmap);
|
|
|
|
}
|
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
/*
|
|
|
|
* free memory that was allocated
|
|
|
|
*/
|
2017-03-01 08:42:39 +00:00
|
|
|
void bitmap_free(struct bitmap *bitmap)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
unsigned long k, pages;
|
|
|
|
struct bitmap_page *bp;
|
|
|
|
|
|
|
|
if (!bitmap) /* there was no bitmap */
|
|
|
|
return;
|
|
|
|
|
2016-04-01 09:08:49 +00:00
|
|
|
if (bitmap->sysfs_can_clear)
|
|
|
|
sysfs_put(bitmap->sysfs_can_clear);
|
|
|
|
|
2014-06-06 17:43:49 +00:00
|
|
|
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
|
|
|
|
bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
|
2014-06-06 16:50:56 +00:00
|
|
|
md_cluster_stop(bitmap->mddev);
|
|
|
|
|
2012-05-22 03:55:21 +00:00
|
|
|
/* Shouldn't be needed - but just in case.... */
|
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes) == 0);
|
|
|
|
|
|
|
|
/* release the bitmap file */
|
|
|
|
bitmap_file_unmap(&bitmap->storage);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
bp = bitmap->counts.bp;
|
|
|
|
pages = bitmap->counts.pages;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
/* free all allocated memory */
|
|
|
|
|
|
|
|
if (bp) /* deallocate the page memory */
|
|
|
|
for (k = 0; k < pages; k++)
|
|
|
|
if (bp[k].map && !bp[k].hijacked)
|
|
|
|
kfree(bp[k].map);
|
|
|
|
kfree(bp);
|
|
|
|
kfree(bitmap);
|
|
|
|
}
|
2017-03-01 08:42:39 +00:00
|
|
|
EXPORT_SYMBOL(bitmap_free);
|
2009-12-14 01:49:46 +00:00
|
|
|
|
2017-03-14 01:40:20 +00:00
|
|
|
void bitmap_wait_behind_writes(struct mddev *mddev)
|
|
|
|
{
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
|
|
|
|
/* wait for behind writes to complete */
|
|
|
|
if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
|
|
|
|
pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
|
|
|
|
mdname(mddev));
|
|
|
|
/* need to kick something here to make sure I/O goes? */
|
|
|
|
wait_event(bitmap->behind_wait,
|
|
|
|
atomic_read(&bitmap->behind_writes) == 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
void bitmap_destroy(struct mddev *mddev)
|
2005-09-09 23:23:50 +00:00
|
|
|
{
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
|
|
|
|
if (!bitmap) /* there was no bitmap */
|
|
|
|
return;
|
|
|
|
|
2017-03-14 01:40:20 +00:00
|
|
|
bitmap_wait_behind_writes(mddev);
|
|
|
|
|
2009-12-14 01:49:52 +00:00
|
|
|
mutex_lock(&mddev->bitmap_info.mutex);
|
2014-12-15 01:56:58 +00:00
|
|
|
spin_lock(&mddev->lock);
|
2005-09-09 23:23:50 +00:00
|
|
|
mddev->bitmap = NULL; /* disconnect from the md device */
|
2014-12-15 01:56:58 +00:00
|
|
|
spin_unlock(&mddev->lock);
|
2009-12-14 01:49:52 +00:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2006-01-06 08:20:16 +00:00
|
|
|
if (mddev->thread)
|
|
|
|
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
|
2005-09-09 23:23:50 +00:00
|
|
|
|
|
|
|
bitmap_free(bitmap);
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize the bitmap structure
|
|
|
|
* if this returns an error, bitmap_destroy must be called to do clean up
|
2016-04-01 09:08:49 +00:00
|
|
|
* once mddev->bitmap is set
|
2005-06-22 00:17:14 +00:00
|
|
|
*/
|
2014-06-06 17:43:49 +00:00
|
|
|
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
|
2005-06-22 00:17:14 +00:00
|
|
|
{
|
|
|
|
struct bitmap *bitmap;
|
2009-04-20 01:50:24 +00:00
|
|
|
sector_t blocks = mddev->resync_max_sectors;
|
2009-12-14 01:49:52 +00:00
|
|
|
struct file *file = mddev->bitmap_info.file;
|
2005-06-22 00:17:14 +00:00
|
|
|
int err;
|
2013-12-11 19:11:53 +00:00
|
|
|
struct kernfs_node *bm = NULL;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2006-10-11 08:22:26 +00:00
|
|
|
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2009-12-14 01:49:52 +00:00
|
|
|
BUG_ON(file && mddev->bitmap_info.offset);
|
2005-06-22 00:17:27 +00:00
|
|
|
|
2017-10-17 03:24:09 +00:00
|
|
|
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
|
|
|
|
pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
|
|
|
|
mdname(mddev));
|
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
}
|
|
|
|
|
2006-01-06 08:20:32 +00:00
|
|
|
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
|
2005-06-22 00:17:14 +00:00
|
|
|
if (!bitmap)
|
2014-06-06 17:43:49 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
spin_lock_init(&bitmap->counts.lock);
|
2006-06-26 07:27:49 +00:00
|
|
|
atomic_set(&bitmap->pending_writes, 0);
|
|
|
|
init_waitqueue_head(&bitmap->write_wait);
|
2007-02-08 22:20:37 +00:00
|
|
|
init_waitqueue_head(&bitmap->overflow_wait);
|
2010-03-31 00:21:44 +00:00
|
|
|
init_waitqueue_head(&bitmap->behind_wait);
|
2006-06-26 07:27:49 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
bitmap->mddev = mddev;
|
2014-06-06 17:43:49 +00:00
|
|
|
bitmap->cluster_slot = slot;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2010-06-01 09:37:32 +00:00
|
|
|
if (mddev->kobj.sd)
|
2013-09-12 03:19:13 +00:00
|
|
|
bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
|
2009-12-14 01:49:56 +00:00
|
|
|
if (bm) {
|
2013-09-12 03:19:13 +00:00
|
|
|
bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
|
2009-12-14 01:49:56 +00:00
|
|
|
sysfs_put(bm);
|
|
|
|
} else
|
|
|
|
bitmap->sysfs_can_clear = NULL;
|
|
|
|
|
2012-05-22 03:55:10 +00:00
|
|
|
bitmap->storage.file = file;
|
2006-06-26 07:27:49 +00:00
|
|
|
if (file) {
|
|
|
|
get_file(file);
|
2009-10-16 04:56:01 +00:00
|
|
|
/* As future accesses to this file will use bmap,
|
|
|
|
* and bypass the page cache, we must sync the file
|
|
|
|
* first.
|
|
|
|
*/
|
2010-03-22 16:32:25 +00:00
|
|
|
vfs_fsync(file, 1);
|
2006-06-26 07:27:49 +00:00
|
|
|
}
|
2009-12-14 01:49:53 +00:00
|
|
|
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
|
2011-06-08 22:59:30 +00:00
|
|
|
if (!mddev->bitmap_info.external) {
|
|
|
|
/*
|
|
|
|
* If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
|
|
|
|
* instructing us to create a new on-disk bitmap instance.
|
|
|
|
*/
|
|
|
|
if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
|
|
|
|
err = bitmap_new_disk_sb(bitmap);
|
|
|
|
else
|
|
|
|
err = bitmap_read_sb(bitmap);
|
|
|
|
} else {
|
2009-12-14 01:49:56 +00:00
|
|
|
err = 0;
|
|
|
|
if (mddev->bitmap_info.chunksize == 0 ||
|
|
|
|
mddev->bitmap_info.daemon_sleep == 0)
|
|
|
|
/* chunksize and time_base need to be
|
|
|
|
* set first. */
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2005-06-22 00:17:14 +00:00
|
|
|
if (err)
|
2005-09-09 23:23:50 +00:00
|
|
|
goto error;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2009-12-14 01:49:56 +00:00
|
|
|
bitmap->daemon_lastrun = jiffies;
|
2012-05-22 03:55:25 +00:00
|
|
|
err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
|
|
|
|
if (err)
|
2005-09-09 23:23:50 +00:00
|
|
|
goto error;
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_debug("created bitmap (%lu pages) for device %s\n",
|
|
|
|
bitmap->counts.pages, bmname(bitmap));
|
2010-06-01 09:37:35 +00:00
|
|
|
|
2014-06-06 17:43:49 +00:00
|
|
|
err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
|
|
|
|
if (err)
|
|
|
|
goto error;
|
2010-06-01 09:37:35 +00:00
|
|
|
|
2014-06-06 17:43:49 +00:00
|
|
|
return bitmap;
|
2010-06-01 09:37:35 +00:00
|
|
|
error:
|
|
|
|
bitmap_free(bitmap);
|
2014-06-06 17:43:49 +00:00
|
|
|
return ERR_PTR(err);
|
2010-06-01 09:37:35 +00:00
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
int bitmap_load(struct mddev *mddev)
|
2010-06-01 09:37:35 +00:00
|
|
|
{
|
|
|
|
int err = 0;
|
2011-07-27 01:00:37 +00:00
|
|
|
sector_t start = 0;
|
2010-06-01 09:37:35 +00:00
|
|
|
sector_t sector = 0;
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
|
|
|
|
if (!bitmap)
|
|
|
|
goto out;
|
|
|
|
|
2016-05-04 06:17:09 +00:00
|
|
|
if (mddev_is_clustered(mddev))
|
|
|
|
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
|
|
|
|
|
2010-06-01 09:37:35 +00:00
|
|
|
/* Clear out old bitmap info first: Either there is none, or we
|
|
|
|
* are resuming after someone else has possibly changed things,
|
|
|
|
* so we should forget old cached info.
|
|
|
|
* All chunks should be clean, but some might need_sync.
|
|
|
|
*/
|
|
|
|
while (sector < mddev->resync_max_sectors) {
|
2010-10-18 23:03:39 +00:00
|
|
|
sector_t blocks;
|
2010-06-01 09:37:35 +00:00
|
|
|
bitmap_start_sync(bitmap, sector, &blocks, 0);
|
|
|
|
sector += blocks;
|
|
|
|
}
|
|
|
|
bitmap_close_sync(bitmap);
|
|
|
|
|
2011-07-27 01:00:37 +00:00
|
|
|
if (mddev->degraded == 0
|
|
|
|
|| bitmap->events_cleared == mddev->events)
|
|
|
|
/* no need to keep dirty bits to optimise a
|
|
|
|
* re-add of a missing device */
|
|
|
|
start = mddev->recovery_cp;
|
|
|
|
|
2012-04-12 06:05:06 +00:00
|
|
|
mutex_lock(&mddev->bitmap_info.mutex);
|
2011-07-27 01:00:37 +00:00
|
|
|
err = bitmap_init_from_disk(bitmap, start);
|
2012-04-12 06:05:06 +00:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2011-07-27 01:00:37 +00:00
|
|
|
|
2005-06-22 00:17:14 +00:00
|
|
|
if (err)
|
2010-06-01 09:37:35 +00:00
|
|
|
goto out;
|
2012-05-22 03:55:15 +00:00
|
|
|
clear_bit(BITMAP_STALE, &bitmap->flags);
|
2012-05-22 03:55:08 +00:00
|
|
|
|
|
|
|
/* Kick recovery in case any bits were set */
|
|
|
|
set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
|
2005-09-09 23:23:50 +00:00
|
|
|
|
2009-12-14 01:49:53 +00:00
|
|
|
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
|
2009-12-14 01:49:54 +00:00
|
|
|
md_wakeup_thread(mddev->thread);
|
2006-01-06 08:20:16 +00:00
|
|
|
|
2007-07-17 11:06:13 +00:00
|
|
|
bitmap_update_sb(bitmap);
|
|
|
|
|
2012-05-22 03:55:15 +00:00
|
|
|
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
2010-06-01 09:37:35 +00:00
|
|
|
err = -EIO;
|
|
|
|
out:
|
2005-09-09 23:23:50 +00:00
|
|
|
return err;
|
2005-06-22 00:17:14 +00:00
|
|
|
}
|
2010-06-01 09:37:35 +00:00
|
|
|
EXPORT_SYMBOL_GPL(bitmap_load);
|
2005-06-22 00:17:14 +00:00
|
|
|
|
2017-03-01 08:42:39 +00:00
|
|
|
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
|
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
struct bitmap *bitmap;
|
|
|
|
|
|
|
|
bitmap = bitmap_create(mddev, slot);
|
|
|
|
if (IS_ERR(bitmap)) {
|
|
|
|
rv = PTR_ERR(bitmap);
|
|
|
|
return ERR_PTR(rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
rv = bitmap_init_from_disk(bitmap, 0);
|
|
|
|
if (rv) {
|
|
|
|
bitmap_free(bitmap);
|
|
|
|
return ERR_PTR(rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
return bitmap;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(get_bitmap_from_slot);
|
|
|
|
|
2014-06-07 05:36:26 +00:00
|
|
|
/* Loads the bitmap associated with slot and copies the resync information
|
|
|
|
* to our bitmap
|
|
|
|
*/
|
|
|
|
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
2015-04-14 15:45:42 +00:00
|
|
|
sector_t *low, sector_t *high, bool clear_bits)
|
2014-06-07 05:36:26 +00:00
|
|
|
{
|
|
|
|
int rv = 0, i, j;
|
|
|
|
sector_t block, lo = 0, hi = 0;
|
|
|
|
struct bitmap_counts *counts;
|
2017-03-01 08:42:39 +00:00
|
|
|
struct bitmap *bitmap;
|
2014-06-07 05:36:26 +00:00
|
|
|
|
2017-03-01 08:42:39 +00:00
|
|
|
bitmap = get_bitmap_from_slot(mddev, slot);
|
|
|
|
if (IS_ERR(bitmap)) {
|
|
|
|
pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
|
|
|
|
return -1;
|
|
|
|
}
|
2014-06-07 05:36:26 +00:00
|
|
|
|
|
|
|
counts = &bitmap->counts;
|
|
|
|
for (j = 0; j < counts->chunks; j++) {
|
|
|
|
block = (sector_t)j << counts->chunkshift;
|
|
|
|
if (bitmap_file_test_bit(bitmap, block)) {
|
|
|
|
if (!lo)
|
|
|
|
lo = block;
|
|
|
|
hi = block;
|
|
|
|
bitmap_file_clear_bit(bitmap, block);
|
|
|
|
bitmap_set_memory_bits(mddev->bitmap, block, 1);
|
|
|
|
bitmap_file_set_bit(mddev->bitmap, block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 15:45:42 +00:00
|
|
|
if (clear_bits) {
|
|
|
|
bitmap_update_sb(bitmap);
|
2016-05-02 15:50:15 +00:00
|
|
|
/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
|
|
|
|
* BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
|
2015-04-14 15:45:42 +00:00
|
|
|
for (i = 0; i < bitmap->storage.file_pages; i++)
|
2016-05-02 15:50:15 +00:00
|
|
|
if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
|
|
|
|
set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
|
2015-04-14 15:45:42 +00:00
|
|
|
bitmap_unplug(bitmap);
|
|
|
|
}
|
2016-05-02 15:50:15 +00:00
|
|
|
bitmap_unplug(mddev->bitmap);
|
2014-06-07 05:36:26 +00:00
|
|
|
*low = lo;
|
|
|
|
*high = hi;
|
2017-03-01 08:42:39 +00:00
|
|
|
|
2014-06-07 05:36:26 +00:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
|
|
|
|
|
|
|
|
|
2012-03-19 01:46:40 +00:00
|
|
|
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
unsigned long chunk_kb;
|
2012-05-22 03:55:24 +00:00
|
|
|
struct bitmap_counts *counts;
|
2012-03-19 01:46:40 +00:00
|
|
|
|
|
|
|
if (!bitmap)
|
|
|
|
return;
|
|
|
|
|
2012-05-22 03:55:24 +00:00
|
|
|
counts = &bitmap->counts;
|
|
|
|
|
2012-03-19 01:46:40 +00:00
|
|
|
chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
|
|
|
|
seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
|
|
|
|
"%lu%s chunk",
|
2012-05-22 03:55:24 +00:00
|
|
|
counts->pages - counts->missing_pages,
|
|
|
|
counts->pages,
|
|
|
|
(counts->pages - counts->missing_pages)
|
2012-03-19 01:46:40 +00:00
|
|
|
<< (PAGE_SHIFT - 10),
|
|
|
|
chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
|
|
|
|
chunk_kb ? "KB" : "B");
|
2012-05-22 03:55:10 +00:00
|
|
|
if (bitmap->storage.file) {
|
2012-03-19 01:46:40 +00:00
|
|
|
seq_printf(seq, ", file: ");
|
2015-06-19 08:30:28 +00:00
|
|
|
seq_file_path(seq, bitmap->storage.file, " \t\n");
|
2012-03-19 01:46:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(seq, "\n");
|
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:25 +00:00
|
|
|
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|
|
|
int chunksize, int init)
|
|
|
|
{
|
|
|
|
/* If chunk_size is 0, choose an appropriate chunk size.
|
|
|
|
* Then possibly allocate new storage space.
|
|
|
|
* Then quiesce, copy bits, replace bitmap, and re-start
|
|
|
|
*
|
|
|
|
* This function is called both to set up the initial bitmap
|
|
|
|
* and to resize the bitmap while the array is active.
|
|
|
|
* If this happens as a result of the array being resized,
|
|
|
|
* chunksize will be zero, and we need to choose a suitable
|
|
|
|
* chunksize, otherwise we use what we are given.
|
|
|
|
*/
|
|
|
|
struct bitmap_storage store;
|
|
|
|
struct bitmap_counts old_counts;
|
|
|
|
unsigned long chunks;
|
|
|
|
sector_t block;
|
|
|
|
sector_t old_blocks, new_blocks;
|
|
|
|
int chunkshift;
|
|
|
|
int ret = 0;
|
|
|
|
long pages;
|
|
|
|
struct bitmap_page *new_bp;
|
|
|
|
|
2017-08-31 00:23:25 +00:00
|
|
|
if (bitmap->storage.file && !init) {
|
|
|
|
pr_info("md: cannot resize file-based bitmap\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:25 +00:00
|
|
|
if (chunksize == 0) {
|
|
|
|
/* If there is enough space, leave the chunk size unchanged,
|
|
|
|
* else increase by factor of two until there is enough space.
|
|
|
|
*/
|
|
|
|
long bytes;
|
|
|
|
long space = bitmap->mddev->bitmap_info.space;
|
|
|
|
|
|
|
|
if (space == 0) {
|
|
|
|
/* We don't know how much space there is, so limit
|
|
|
|
* to current size - in sectors.
|
|
|
|
*/
|
|
|
|
bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
|
|
|
|
if (!bitmap->mddev->bitmap_info.external)
|
|
|
|
bytes += sizeof(bitmap_super_t);
|
|
|
|
space = DIV_ROUND_UP(bytes, 512);
|
|
|
|
bitmap->mddev->bitmap_info.space = space;
|
|
|
|
}
|
|
|
|
chunkshift = bitmap->counts.chunkshift;
|
|
|
|
chunkshift--;
|
|
|
|
do {
|
|
|
|
/* 'chunkshift' is shift from block size to chunk size */
|
|
|
|
chunkshift++;
|
|
|
|
chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
|
|
|
|
bytes = DIV_ROUND_UP(chunks, 8);
|
|
|
|
if (!bitmap->mddev->bitmap_info.external)
|
|
|
|
bytes += sizeof(bitmap_super_t);
|
|
|
|
} while (bytes > (space << 9));
|
|
|
|
} else
|
|
|
|
chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
|
|
|
|
|
|
|
|
chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
|
|
|
|
memset(&store, 0, sizeof(store));
|
|
|
|
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
|
|
|
|
ret = bitmap_storage_alloc(&store, chunks,
|
2014-06-06 16:50:56 +00:00
|
|
|
!bitmap->mddev->bitmap_info.external,
|
2015-10-01 06:03:38 +00:00
|
|
|
mddev_is_clustered(bitmap->mddev)
|
|
|
|
? bitmap->cluster_slot : 0);
|
2016-10-31 02:19:00 +00:00
|
|
|
if (ret) {
|
|
|
|
bitmap_file_unmap(&store);
|
2012-05-22 03:55:25 +00:00
|
|
|
goto err;
|
2016-10-31 02:19:00 +00:00
|
|
|
}
|
2012-05-22 03:55:25 +00:00
|
|
|
|
|
|
|
pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
|
|
|
|
|
|
|
|
new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
if (!new_bp) {
|
|
|
|
bitmap_file_unmap(&store);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!init)
|
|
|
|
bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
|
|
|
|
|
|
|
|
store.file = bitmap->storage.file;
|
|
|
|
bitmap->storage.file = NULL;
|
|
|
|
|
|
|
|
if (store.sb_page && bitmap->storage.sb_page)
|
|
|
|
memcpy(page_address(store.sb_page),
|
|
|
|
page_address(bitmap->storage.sb_page),
|
2017-10-17 02:03:44 +00:00
|
|
|
sizeof(bitmap_super_t));
|
2012-05-22 03:55:25 +00:00
|
|
|
bitmap_file_unmap(&bitmap->storage);
|
|
|
|
bitmap->storage = store;
|
|
|
|
|
|
|
|
old_counts = bitmap->counts;
|
|
|
|
bitmap->counts.bp = new_bp;
|
|
|
|
bitmap->counts.pages = pages;
|
|
|
|
bitmap->counts.missing_pages = pages;
|
|
|
|
bitmap->counts.chunkshift = chunkshift;
|
|
|
|
bitmap->counts.chunks = chunks;
|
|
|
|
bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
|
|
|
|
BITMAP_BLOCK_SHIFT);
|
|
|
|
|
|
|
|
blocks = min(old_counts.chunks << old_counts.chunkshift,
|
|
|
|
chunks << chunkshift);
|
|
|
|
|
|
|
|
spin_lock_irq(&bitmap->counts.lock);
|
2016-05-02 15:50:11 +00:00
|
|
|
/* For cluster raid, need to pre-allocate bitmap */
|
|
|
|
if (mddev_is_clustered(bitmap->mddev)) {
|
|
|
|
unsigned long page;
|
|
|
|
for (page = 0; page < pages; page++) {
|
|
|
|
ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
|
|
|
|
if (ret) {
|
|
|
|
unsigned long k;
|
|
|
|
|
|
|
|
/* deallocate the page memory */
|
|
|
|
for (k = 0; k < page; k++) {
|
2016-05-02 15:50:16 +00:00
|
|
|
kfree(new_bp[k].map);
|
2016-05-02 15:50:11 +00:00
|
|
|
}
|
2017-11-08 12:44:56 +00:00
|
|
|
kfree(new_bp);
|
2016-05-02 15:50:11 +00:00
|
|
|
|
|
|
|
/* restore some fields from old_counts */
|
|
|
|
bitmap->counts.bp = old_counts.bp;
|
|
|
|
bitmap->counts.pages = old_counts.pages;
|
|
|
|
bitmap->counts.missing_pages = old_counts.pages;
|
|
|
|
bitmap->counts.chunkshift = old_counts.chunkshift;
|
|
|
|
bitmap->counts.chunks = old_counts.chunks;
|
|
|
|
bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
|
|
|
|
BITMAP_BLOCK_SHIFT);
|
|
|
|
blocks = old_counts.chunks << old_counts.chunkshift;
|
2016-11-02 03:16:49 +00:00
|
|
|
pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
|
2016-05-02 15:50:11 +00:00
|
|
|
break;
|
|
|
|
} else
|
|
|
|
bitmap->counts.bp[page].count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:25 +00:00
|
|
|
for (block = 0; block < blocks; ) {
|
|
|
|
bitmap_counter_t *bmc_old, *bmc_new;
|
|
|
|
int set;
|
|
|
|
|
|
|
|
bmc_old = bitmap_get_counter(&old_counts, block,
|
|
|
|
&old_blocks, 0);
|
|
|
|
set = bmc_old && NEEDED(*bmc_old);
|
|
|
|
|
|
|
|
if (set) {
|
|
|
|
bmc_new = bitmap_get_counter(&bitmap->counts, block,
|
|
|
|
&new_blocks, 1);
|
|
|
|
if (*bmc_new == 0) {
|
|
|
|
/* need to set on-disk bits too. */
|
|
|
|
sector_t end = block + new_blocks;
|
|
|
|
sector_t start = block >> chunkshift;
|
|
|
|
start <<= chunkshift;
|
|
|
|
while (start < end) {
|
|
|
|
bitmap_file_set_bit(bitmap, block);
|
|
|
|
start += 1 << chunkshift;
|
|
|
|
}
|
|
|
|
*bmc_new = 2;
|
|
|
|
bitmap_count_page(&bitmap->counts,
|
|
|
|
block, 1);
|
|
|
|
bitmap_set_pending(&bitmap->counts,
|
|
|
|
block);
|
|
|
|
}
|
|
|
|
*bmc_new |= NEEDED_MASK;
|
|
|
|
if (new_blocks < old_blocks)
|
|
|
|
old_blocks = new_blocks;
|
|
|
|
}
|
|
|
|
block += old_blocks;
|
|
|
|
}
|
|
|
|
|
2017-11-08 12:44:56 +00:00
|
|
|
if (bitmap->counts.bp != old_counts.bp) {
|
|
|
|
unsigned long k;
|
|
|
|
for (k = 0; k < old_counts.pages; k++)
|
|
|
|
if (!old_counts.bp[k].hijacked)
|
|
|
|
kfree(old_counts.bp[k].map);
|
|
|
|
kfree(old_counts.bp);
|
|
|
|
}
|
|
|
|
|
2012-05-22 03:55:25 +00:00
|
|
|
if (!init) {
|
|
|
|
int i;
|
|
|
|
while (block < (chunks << chunkshift)) {
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
bmc = bitmap_get_counter(&bitmap->counts, block,
|
|
|
|
&new_blocks, 1);
|
|
|
|
if (bmc) {
|
|
|
|
/* new space. It needs to be resynced, so
|
|
|
|
* we set NEEDED_MASK.
|
|
|
|
*/
|
|
|
|
if (*bmc == 0) {
|
|
|
|
*bmc = NEEDED_MASK | 2;
|
|
|
|
bitmap_count_page(&bitmap->counts,
|
|
|
|
block, 1);
|
|
|
|
bitmap_set_pending(&bitmap->counts,
|
|
|
|
block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
block += new_blocks;
|
|
|
|
}
|
|
|
|
for (i = 0; i < bitmap->storage.file_pages; i++)
|
|
|
|
set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&bitmap->counts.lock);
|
|
|
|
|
|
|
|
if (!init) {
|
|
|
|
bitmap_unplug(bitmap);
|
|
|
|
bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(bitmap_resize);
|
|
|
|
|
2009-12-14 01:49:55 +00:00
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
location_show(struct mddev *mddev, char *page)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
ssize_t len;
|
2010-06-01 09:37:31 +00:00
|
|
|
if (mddev->bitmap_info.file)
|
2009-12-14 01:49:55 +00:00
|
|
|
len = sprintf(page, "file");
|
2010-06-01 09:37:31 +00:00
|
|
|
else if (mddev->bitmap_info.offset)
|
2009-12-14 01:49:55 +00:00
|
|
|
len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
|
2010-06-01 09:37:31 +00:00
|
|
|
else
|
2009-12-14 01:49:55 +00:00
|
|
|
len = sprintf(page, "none");
|
|
|
|
len += sprintf(page+len, "\n");
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
location_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
2016-07-30 17:05:31 +00:00
|
|
|
int rv;
|
2009-12-14 01:49:55 +00:00
|
|
|
|
2016-07-30 17:05:31 +00:00
|
|
|
rv = mddev_lock(mddev);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
2009-12-14 01:49:55 +00:00
|
|
|
if (mddev->pers) {
|
2016-07-30 17:05:31 +00:00
|
|
|
if (!mddev->pers->quiesce) {
|
|
|
|
rv = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (mddev->recovery || mddev->sync_thread) {
|
|
|
|
rv = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-14 01:49:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mddev->bitmap || mddev->bitmap_info.file ||
|
|
|
|
mddev->bitmap_info.offset) {
|
|
|
|
/* bitmap already configured. Only option is to clear it */
|
2016-07-30 17:05:31 +00:00
|
|
|
if (strncmp(buf, "none", 4) != 0) {
|
|
|
|
rv = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-14 01:49:55 +00:00
|
|
|
if (mddev->pers) {
|
|
|
|
mddev->pers->quiesce(mddev, 1);
|
|
|
|
bitmap_destroy(mddev);
|
|
|
|
mddev->pers->quiesce(mddev, 0);
|
|
|
|
}
|
|
|
|
mddev->bitmap_info.offset = 0;
|
|
|
|
if (mddev->bitmap_info.file) {
|
|
|
|
struct file *f = mddev->bitmap_info.file;
|
|
|
|
mddev->bitmap_info.file = NULL;
|
|
|
|
fput(f);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* No bitmap, OK to set a location */
|
|
|
|
long long offset;
|
|
|
|
if (strncmp(buf, "none", 4) == 0)
|
|
|
|
/* nothing to be done */;
|
|
|
|
else if (strncmp(buf, "file:", 5) == 0) {
|
|
|
|
/* Not supported yet */
|
2016-07-30 17:05:31 +00:00
|
|
|
rv = -EINVAL;
|
|
|
|
goto out;
|
2009-12-14 01:49:55 +00:00
|
|
|
} else {
|
|
|
|
if (buf[0] == '+')
|
2013-06-01 07:15:16 +00:00
|
|
|
rv = kstrtoll(buf+1, 10, &offset);
|
2009-12-14 01:49:55 +00:00
|
|
|
else
|
2013-06-01 07:15:16 +00:00
|
|
|
rv = kstrtoll(buf, 10, &offset);
|
2009-12-14 01:49:55 +00:00
|
|
|
if (rv)
|
2016-07-30 17:05:31 +00:00
|
|
|
goto out;
|
|
|
|
if (offset == 0) {
|
|
|
|
rv = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-14 01:49:56 +00:00
|
|
|
if (mddev->bitmap_info.external == 0 &&
|
|
|
|
mddev->major_version == 0 &&
|
2016-07-30 17:05:31 +00:00
|
|
|
offset != mddev->bitmap_info.default_offset) {
|
|
|
|
rv = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-14 01:49:55 +00:00
|
|
|
mddev->bitmap_info.offset = offset;
|
|
|
|
if (mddev->pers) {
|
2014-06-06 17:43:49 +00:00
|
|
|
struct bitmap *bitmap;
|
2009-12-14 01:49:55 +00:00
|
|
|
mddev->pers->quiesce(mddev, 1);
|
2014-06-06 17:43:49 +00:00
|
|
|
bitmap = bitmap_create(mddev, -1);
|
|
|
|
if (IS_ERR(bitmap))
|
|
|
|
rv = PTR_ERR(bitmap);
|
|
|
|
else {
|
|
|
|
mddev->bitmap = bitmap;
|
2012-03-19 01:46:37 +00:00
|
|
|
rv = bitmap_load(mddev);
|
2016-04-01 09:08:49 +00:00
|
|
|
if (rv)
|
2014-06-06 17:43:49 +00:00
|
|
|
mddev->bitmap_info.offset = 0;
|
2009-12-14 01:49:55 +00:00
|
|
|
}
|
|
|
|
mddev->pers->quiesce(mddev, 0);
|
2016-04-01 09:08:49 +00:00
|
|
|
if (rv) {
|
|
|
|
bitmap_destroy(mddev);
|
2016-07-30 17:05:31 +00:00
|
|
|
goto out;
|
2016-04-01 09:08:49 +00:00
|
|
|
}
|
2009-12-14 01:49:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!mddev->external) {
|
|
|
|
/* Ensure new bitmap info is stored in
|
|
|
|
* metadata promptly.
|
|
|
|
*/
|
2016-12-08 23:48:19 +00:00
|
|
|
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
2009-12-14 01:49:55 +00:00
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
}
|
2016-07-30 17:05:31 +00:00
|
|
|
rv = 0;
|
|
|
|
out:
|
|
|
|
mddev_unlock(mddev);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
2009-12-14 01:49:55 +00:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_location =
|
|
|
|
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
|
|
|
|
|
2012-05-22 03:55:07 +00:00
|
|
|
/* 'bitmap/space' is the space available at 'location' for the
|
|
|
|
* bitmap. This allows the kernel to know when it is safe to
|
|
|
|
* resize the bitmap to match a resized array.
|
|
|
|
*/
|
|
|
|
static ssize_t
|
|
|
|
space_show(struct mddev *mddev, char *page)
|
|
|
|
{
|
|
|
|
return sprintf(page, "%lu\n", mddev->bitmap_info.space);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
space_store(struct mddev *mddev, const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
unsigned long sectors;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
rv = kstrtoul(buf, 10, §ors);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
|
|
|
|
if (sectors == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (mddev->bitmap &&
|
2012-05-22 03:55:11 +00:00
|
|
|
sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
|
2012-05-22 03:55:07 +00:00
|
|
|
return -EFBIG; /* Bitmap is too big for this small space */
|
|
|
|
|
|
|
|
/* could make sure it isn't too big, but that isn't really
|
|
|
|
* needed - user-space should be careful.
|
|
|
|
*/
|
|
|
|
mddev->bitmap_info.space = sectors;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_space =
|
|
|
|
__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
|
|
|
|
|
2009-12-14 01:49:55 +00:00
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
timeout_show(struct mddev *mddev, char *page)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
ssize_t len;
|
|
|
|
unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
|
|
|
|
unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
|
2010-06-01 09:37:31 +00:00
|
|
|
|
2009-12-14 01:49:55 +00:00
|
|
|
len = sprintf(page, "%lu", secs);
|
|
|
|
if (jifs)
|
|
|
|
len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
|
|
|
|
len += sprintf(page+len, "\n");
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
timeout_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
/* timeout can be set at any time */
|
|
|
|
unsigned long timeout;
|
|
|
|
int rv = strict_strtoul_scaled(buf, &timeout, 4);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
|
|
|
|
/* just to make sure we don't overflow... */
|
|
|
|
if (timeout >= LONG_MAX / HZ)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
timeout = timeout * HZ / 10000;
|
|
|
|
|
|
|
|
if (timeout >= MAX_SCHEDULE_TIMEOUT)
|
|
|
|
timeout = MAX_SCHEDULE_TIMEOUT-1;
|
|
|
|
if (timeout < 1)
|
|
|
|
timeout = 1;
|
|
|
|
mddev->bitmap_info.daemon_sleep = timeout;
|
|
|
|
if (mddev->thread) {
|
|
|
|
/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
|
|
|
|
* the bitmap is all clean and we don't need to
|
|
|
|
* adjust the timeout right now
|
|
|
|
*/
|
|
|
|
if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
|
|
|
|
mddev->thread->timeout = timeout;
|
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_timeout =
|
|
|
|
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
backlog_show(struct mddev *mddev, char *page)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
backlog_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
unsigned long backlog;
|
2013-06-01 07:15:16 +00:00
|
|
|
int rv = kstrtoul(buf, 10, &backlog);
|
2009-12-14 01:49:55 +00:00
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
if (backlog > COUNTER_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
mddev->bitmap_info.max_write_behind = backlog;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_backlog =
|
|
|
|
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
chunksize_show(struct mddev *mddev, char *page)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 01:49:55 +00:00
|
|
|
{
|
|
|
|
/* Can only be changed when no bitmap is active */
|
|
|
|
int rv;
|
|
|
|
unsigned long csize;
|
|
|
|
if (mddev->bitmap)
|
|
|
|
return -EBUSY;
|
2013-06-01 07:15:16 +00:00
|
|
|
rv = kstrtoul(buf, 10, &csize);
|
2009-12-14 01:49:55 +00:00
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
if (csize < 512 ||
|
|
|
|
!is_power_of_2(csize))
|
|
|
|
return -EINVAL;
|
|
|
|
mddev->bitmap_info.chunksize = csize;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_chunksize =
|
|
|
|
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static ssize_t metadata_show(struct mddev *mddev, char *page)
|
2009-12-14 01:49:56 +00:00
|
|
|
{
|
2014-03-29 15:20:02 +00:00
|
|
|
if (mddev_is_clustered(mddev))
|
|
|
|
return sprintf(page, "clustered\n");
|
2009-12-14 01:49:56 +00:00
|
|
|
return sprintf(page, "%s\n", (mddev->bitmap_info.external
|
|
|
|
? "external" : "internal"));
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 01:49:56 +00:00
|
|
|
{
|
|
|
|
if (mddev->bitmap ||
|
|
|
|
mddev->bitmap_info.file ||
|
|
|
|
mddev->bitmap_info.offset)
|
|
|
|
return -EBUSY;
|
|
|
|
if (strncmp(buf, "external", 8) == 0)
|
|
|
|
mddev->bitmap_info.external = 1;
|
2014-03-29 15:20:02 +00:00
|
|
|
else if ((strncmp(buf, "internal", 8) == 0) ||
|
|
|
|
(strncmp(buf, "clustered", 9) == 0))
|
2009-12-14 01:49:56 +00:00
|
|
|
mddev->bitmap_info.external = 0;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_metadata =
|
|
|
|
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static ssize_t can_clear_show(struct mddev *mddev, char *page)
|
2009-12-14 01:49:56 +00:00
|
|
|
{
|
|
|
|
int len;
|
2014-12-15 01:56:59 +00:00
|
|
|
spin_lock(&mddev->lock);
|
2009-12-14 01:49:56 +00:00
|
|
|
if (mddev->bitmap)
|
|
|
|
len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
|
|
|
|
"false" : "true"));
|
|
|
|
else
|
|
|
|
len = sprintf(page, "\n");
|
2014-12-15 01:56:59 +00:00
|
|
|
spin_unlock(&mddev->lock);
|
2009-12-14 01:49:56 +00:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2011-10-11 05:47:53 +00:00
|
|
|
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 01:49:56 +00:00
|
|
|
{
|
|
|
|
if (mddev->bitmap == NULL)
|
|
|
|
return -ENOENT;
|
|
|
|
if (strncmp(buf, "false", 5) == 0)
|
|
|
|
mddev->bitmap->need_sync = 1;
|
|
|
|
else if (strncmp(buf, "true", 4) == 0) {
|
|
|
|
if (mddev->degraded)
|
|
|
|
return -EBUSY;
|
|
|
|
mddev->bitmap->need_sync = 0;
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_can_clear =
|
|
|
|
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
|
|
|
|
|
2010-03-08 05:02:37 +00:00
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
behind_writes_used_show(struct mddev *mddev, char *page)
|
2010-03-08 05:02:37 +00:00
|
|
|
{
|
2014-12-15 01:56:59 +00:00
|
|
|
ssize_t ret;
|
|
|
|
spin_lock(&mddev->lock);
|
2010-03-08 05:02:37 +00:00
|
|
|
if (mddev->bitmap == NULL)
|
2014-12-15 01:56:59 +00:00
|
|
|
ret = sprintf(page, "0\n");
|
|
|
|
else
|
|
|
|
ret = sprintf(page, "%lu\n",
|
|
|
|
mddev->bitmap->behind_writes_used);
|
|
|
|
spin_unlock(&mddev->lock);
|
|
|
|
return ret;
|
2010-03-08 05:02:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 05:47:53 +00:00
|
|
|
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
|
2010-03-08 05:02:37 +00:00
|
|
|
{
|
|
|
|
if (mddev->bitmap)
|
|
|
|
mddev->bitmap->behind_writes_used = 0;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry max_backlog_used =
|
|
|
|
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
|
|
|
|
behind_writes_used_show, behind_writes_used_reset);
|
|
|
|
|
2009-12-14 01:49:55 +00:00
|
|
|
static struct attribute *md_bitmap_attrs[] = {
|
|
|
|
&bitmap_location.attr,
|
2012-05-22 03:55:07 +00:00
|
|
|
&bitmap_space.attr,
|
2009-12-14 01:49:55 +00:00
|
|
|
&bitmap_timeout.attr,
|
|
|
|
&bitmap_backlog.attr,
|
|
|
|
&bitmap_chunksize.attr,
|
2009-12-14 01:49:56 +00:00
|
|
|
&bitmap_metadata.attr,
|
|
|
|
&bitmap_can_clear.attr,
|
2010-03-08 05:02:37 +00:00
|
|
|
&max_backlog_used.attr,
|
2009-12-14 01:49:55 +00:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
struct attribute_group md_bitmap_group = {
|
|
|
|
.name = "bitmap",
|
|
|
|
.attrs = md_bitmap_attrs,
|
|
|
|
};
|
|
|
|
|