drbd: Consider the disk-timeout also for meta-data IO operations
If the backing device is already frozen during attach, we failed to recognize that. The current disk-timeout code works on top of the drbd_request objects. During attach we do not allow IO and therefore never generate a drbd_request object but block before that in drbd_make_request(). This patch adds the timeout to all drbd_md_sync_page_io(). Before this patch we used to go from D_ATTACHING directly to D_DISKLESS if IO failed during attach. We can no longer do this since we have to stay in D_FAILED until all IO ops issued to the backing device returned. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
parent
25b0d6c8c1
commit
32db80f6f6
@ -129,9 +129,21 @@ static bool md_io_allowed(struct drbd_conf *mdev)
|
|||||||
return ds >= D_NEGOTIATING || ds == D_ATTACHING;
|
return ds >= D_NEGOTIATING || ds == D_ATTACHING;
|
||||||
}
|
}
|
||||||
|
|
||||||
void wait_until_done_or_disk_failure(struct drbd_conf *mdev, unsigned int *done)
|
void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
|
||||||
|
unsigned int *done)
|
||||||
{
|
{
|
||||||
wait_event(mdev->misc_wait, *done || !md_io_allowed(mdev));
|
long dt;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
|
||||||
|
rcu_read_unlock();
|
||||||
|
dt = dt * HZ / 10;
|
||||||
|
if (dt == 0)
|
||||||
|
dt = MAX_SCHEDULE_TIMEOUT;
|
||||||
|
|
||||||
|
dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
|
||||||
|
if (dt == 0)
|
||||||
|
dev_err(DEV, "meta-data IO operation timed out\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||||
@ -171,7 +183,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
|||||||
bio_endio(bio, -EIO);
|
bio_endio(bio, -EIO);
|
||||||
else
|
else
|
||||||
submit_bio(rw, bio);
|
submit_bio(rw, bio);
|
||||||
wait_until_done_or_disk_failure(mdev, &mdev->md_io.done);
|
wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
|
||||||
if (bio_flagged(bio, BIO_UPTODATE))
|
if (bio_flagged(bio, BIO_UPTODATE))
|
||||||
err = mdev->md_io.error;
|
err = mdev->md_io.error;
|
||||||
|
|
||||||
|
@ -1123,7 +1123,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
|
|||||||
* "in_flight reached zero, all done" event.
|
* "in_flight reached zero, all done" event.
|
||||||
*/
|
*/
|
||||||
if (!atomic_dec_and_test(&ctx->in_flight))
|
if (!atomic_dec_and_test(&ctx->in_flight))
|
||||||
wait_until_done_or_disk_failure(mdev, &ctx->done);
|
wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
|
||||||
else
|
else
|
||||||
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
|
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
|
||||||
|
|
||||||
@ -1242,7 +1242,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
|
|||||||
}
|
}
|
||||||
|
|
||||||
bm_page_io_async(ctx, idx, WRITE_SYNC);
|
bm_page_io_async(ctx, idx, WRITE_SYNC);
|
||||||
wait_until_done_or_disk_failure(mdev, &ctx->done);
|
wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
|
||||||
|
|
||||||
if (ctx->error)
|
if (ctx->error)
|
||||||
drbd_chk_io_error(mdev, 1, true);
|
drbd_chk_io_error(mdev, 1, true);
|
||||||
|
@ -1428,7 +1428,8 @@ extern void drbd_md_put_buffer(struct drbd_conf *mdev);
|
|||||||
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
|
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||||
struct drbd_backing_dev *bdev, sector_t sector, int rw);
|
struct drbd_backing_dev *bdev, sector_t sector, int rw);
|
||||||
extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
|
extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
|
||||||
extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, unsigned int *done);
|
extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
|
||||||
|
unsigned int *done);
|
||||||
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
|
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
|
||||||
|
|
||||||
static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
|
static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
|
||||||
|
@ -1308,37 +1308,41 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
|
|||||||
/* first half of local IO error, failure to attach,
|
/* first half of local IO error, failure to attach,
|
||||||
* or administrative detach */
|
* or administrative detach */
|
||||||
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
|
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
|
||||||
enum drbd_io_error_p eh;
|
enum drbd_io_error_p eh = EP_PASS_ON;
|
||||||
int was_io_error;
|
int was_io_error = 0;
|
||||||
/* corresponding get_ldev was in __drbd_set_state, to serialize
|
/* corresponding get_ldev was in __drbd_set_state, to serialize
|
||||||
* our cleanup here with the transition to D_DISKLESS,
|
* our cleanup here with the transition to D_DISKLESS.
|
||||||
* so it is safe to dreference ldev here. */
|
* But is is still not save to dreference ldev here, since
|
||||||
rcu_read_lock();
|
* we might come from an failed Attach before ldev was set. */
|
||||||
eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
|
if (mdev->ldev) {
|
||||||
rcu_read_unlock();
|
rcu_read_lock();
|
||||||
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
|
eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
/* Immediately allow completion of all application IO, that waits
|
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
|
||||||
for completion from the local disk. */
|
|
||||||
tl_abort_disk_io(mdev);
|
|
||||||
|
|
||||||
/* current state still has to be D_FAILED,
|
/* Immediately allow completion of all application IO, that waits
|
||||||
* there is only one way out: to D_DISKLESS,
|
for completion from the local disk. */
|
||||||
* and that may only happen after our put_ldev below. */
|
tl_abort_disk_io(mdev);
|
||||||
if (mdev->state.disk != D_FAILED)
|
|
||||||
dev_err(DEV,
|
|
||||||
"ASSERT FAILED: disk is %s during detach\n",
|
|
||||||
drbd_disk_str(mdev->state.disk));
|
|
||||||
|
|
||||||
if (ns.conn >= C_CONNECTED)
|
/* current state still has to be D_FAILED,
|
||||||
drbd_send_state(mdev, ns);
|
* there is only one way out: to D_DISKLESS,
|
||||||
|
* and that may only happen after our put_ldev below. */
|
||||||
|
if (mdev->state.disk != D_FAILED)
|
||||||
|
dev_err(DEV,
|
||||||
|
"ASSERT FAILED: disk is %s during detach\n",
|
||||||
|
drbd_disk_str(mdev->state.disk));
|
||||||
|
|
||||||
drbd_rs_cancel_all(mdev);
|
if (ns.conn >= C_CONNECTED)
|
||||||
|
drbd_send_state(mdev, ns);
|
||||||
|
|
||||||
/* In case we want to get something to stable storage still,
|
drbd_rs_cancel_all(mdev);
|
||||||
* this may be the last chance.
|
|
||||||
* Following put_ldev may transition to D_DISKLESS. */
|
/* In case we want to get something to stable storage still,
|
||||||
drbd_md_sync(mdev);
|
* this may be the last chance.
|
||||||
|
* Following put_ldev may transition to D_DISKLESS. */
|
||||||
|
drbd_md_sync(mdev);
|
||||||
|
}
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
|
|
||||||
if (was_io_error && eh == EP_CALL_HELPER)
|
if (was_io_error && eh == EP_CALL_HELPER)
|
||||||
|
Loading…
Reference in New Issue
Block a user