mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 07:01:57 +00:00
ide-tape: remove pipeline-specific code from idetape_add_chrdev_write_request
Refrain from adding more write requests to the pipeline and queue them directly on the device's request queue instead. [bart: re-do for minimal behavior changes] Signed-off-by: Borislav Petkov <petkovbb@gmail.com> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
This commit is contained in:
parent
f64eee7bb2
commit
97c566cebe
@ -2202,28 +2202,16 @@ static void idetape_wait_first_stage(ide_drive_t *drive)
|
||||
spin_unlock_irqrestore(&tape->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to add a character device originated write request to our pipeline. In
|
||||
* case we don't succeed, we revert to non-pipelined operation mode for this
|
||||
* request. In order to accomplish that, we
|
||||
*
|
||||
* 1. Try to allocate a new pipeline stage.
|
||||
* 2. If we can't, wait for more and more requests to be serviced and try again
|
||||
* each time.
|
||||
* 3. If we still can't allocate a stage, fallback to non-pipelined operation
|
||||
* mode for this request.
|
||||
*/
|
||||
/* Queue up a character device originated write request. */
|
||||
static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
|
||||
{
|
||||
idetape_tape_t *tape = drive->driver_data;
|
||||
idetape_stage_t *new_stage;
|
||||
unsigned long flags;
|
||||
struct request *rq;
|
||||
|
||||
debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
|
||||
|
||||
/* Attempt to allocate a new stage. Beware possible race conditions. */
|
||||
while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
|
||||
while (1) {
|
||||
spin_lock_irqsave(&tape->lock, flags);
|
||||
if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
|
||||
idetape_wait_for_request(drive, tape->active_data_rq);
|
||||
@ -2234,49 +2222,10 @@ static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
|
||||
if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
|
||||
&tape->flags))
|
||||
continue;
|
||||
/*
|
||||
* The machine is short on memory. Fallback to non-
|
||||
* pipelined operation mode for this request.
|
||||
*/
|
||||
return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
|
||||
blocks, tape->merge_stage->bh);
|
||||
}
|
||||
}
|
||||
rq = &new_stage->rq;
|
||||
idetape_init_rq(rq, REQ_IDETAPE_WRITE);
|
||||
/* Doesn't actually matter - We always assume sequential access */
|
||||
rq->sector = tape->first_frame;
|
||||
rq->current_nr_sectors = blocks;
|
||||
rq->nr_sectors = blocks;
|
||||
|
||||
idetape_switch_buffers(tape, new_stage);
|
||||
idetape_add_stage_tail(drive, new_stage);
|
||||
tape->pipeline_head++;
|
||||
idetape_calculate_speeds(drive);
|
||||
|
||||
/*
|
||||
* Estimate whether the tape has stopped writing by checking if our
|
||||
* write pipeline is currently empty. If we are not writing anymore,
|
||||
* wait for the pipeline to be almost completely full (90%) before
|
||||
* starting to service requests, so that we will be able to keep up with
|
||||
* the higher speeds of the tape.
|
||||
*/
|
||||
if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
|
||||
if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
|
||||
tape->nr_stages >= tape->max_stages -
|
||||
tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
|
||||
tape->blk_size) {
|
||||
tape->measure_insert_time = 1;
|
||||
tape->insert_time = jiffies;
|
||||
tape->insert_size = 0;
|
||||
tape->insert_speed = 0;
|
||||
idetape_plug_pipeline(drive);
|
||||
}
|
||||
}
|
||||
if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
|
||||
/* Return a deferred error */
|
||||
return -EIO;
|
||||
return blocks;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user