mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 09:02:00 +00:00
[PATCH] Revert ide softirq handling
There's a problem with the REQ_BLOCK_PC handling as well (bad ->data_len
handling) where it could actually complete a request ahead of time. I
suggest we just back this out for now, I will resubmit it later when I'm
fully confident in it.
This reverts commit 8672d57138
Signed-off-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
661dd5c840
commit
ba027def7b
@ -55,22 +55,9 @@
|
|||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/bitops.h>
|
#include <asm/bitops.h>
|
||||||
|
|
||||||
void ide_softirq_done(struct request *rq)
|
|
||||||
{
|
|
||||||
request_queue_t *q = rq->q;
|
|
||||||
|
|
||||||
add_disk_randomness(rq->rq_disk);
|
|
||||||
end_that_request_chunk(rq, 1, rq->data_len);
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
end_that_request_last(rq, 1);
|
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
||||||
int nr_sectors)
|
int nr_sectors)
|
||||||
{
|
{
|
||||||
unsigned int nbytes;
|
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
BUG_ON(!(rq->flags & REQ_STARTED));
|
BUG_ON(!(rq->flags & REQ_STARTED));
|
||||||
@ -94,27 +81,12 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
|
|||||||
HWGROUP(drive)->hwif->ide_dma_on(drive);
|
HWGROUP(drive)->hwif->ide_dma_on(drive);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
|
||||||
* For partial completions (or non fs/pc requests), use the regular
|
add_disk_randomness(rq->rq_disk);
|
||||||
* direct completion path. Same thing for requests that failed, to
|
|
||||||
* preserve the ->errors value we use the normal completion path
|
|
||||||
* for those
|
|
||||||
*/
|
|
||||||
nbytes = nr_sectors << 9;
|
|
||||||
if (!rq->errors && rq_all_done(rq, nbytes)) {
|
|
||||||
rq->data_len = nbytes;
|
|
||||||
blkdev_dequeue_request(rq);
|
blkdev_dequeue_request(rq);
|
||||||
HWGROUP(drive)->rq = NULL;
|
HWGROUP(drive)->rq = NULL;
|
||||||
blk_complete_request(rq);
|
end_that_request_last(rq, uptodate);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else {
|
|
||||||
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
|
|
||||||
add_disk_randomness(rq->rq_disk);
|
|
||||||
blkdev_dequeue_request(rq);
|
|
||||||
HWGROUP(drive)->rq = NULL;
|
|
||||||
end_that_request_last(rq, uptodate);
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1011,8 +1011,6 @@ static int ide_init_queue(ide_drive_t *drive)
|
|||||||
blk_queue_max_hw_segments(q, max_sg_entries);
|
blk_queue_max_hw_segments(q, max_sg_entries);
|
||||||
blk_queue_max_phys_segments(q, max_sg_entries);
|
blk_queue_max_phys_segments(q, max_sg_entries);
|
||||||
|
|
||||||
blk_queue_softirq_done(q, ide_softirq_done);
|
|
||||||
|
|
||||||
/* assign drive queue */
|
/* assign drive queue */
|
||||||
drive->queue = q;
|
drive->queue = q;
|
||||||
|
|
||||||
|
@ -1002,7 +1002,6 @@ extern int noautodma;
|
|||||||
|
|
||||||
extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
|
extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
|
||||||
extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);
|
extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);
|
||||||
extern void ide_softirq_done(struct request *rq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is used on exit from the driver to designate the next irq handler
|
* This is used on exit from the driver to designate the next irq handler
|
||||||
|
Loading…
Reference in New Issue
Block a user