forked from Minki/linux
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: cciss: fix build for PROC_FS disabled block: fix amiga and atari floppy driver compile warning blk-throttle: Fix calculation of max number of WRITES to be dispatched ioprio: grab rcu_read_lock in sys_ioprio_{set,get}() xen/blkfront: cope with backend that fail empty BLKIF_OP_WRITE_BARRIER requests xen/blkfront: Implement FUA with BLKIF_OP_WRITE_BARRIER xen/blkfront: change blk_shadow.request to proper pointer xen/blkfront: map REQ_FLUSH into a full barrier
This commit is contained in:
commit
78daa87b1d
@ -645,7 +645,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
|
||||
{
|
||||
unsigned int nr_reads = 0, nr_writes = 0;
|
||||
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
|
||||
unsigned int max_nr_writes = throtl_grp_quantum - nr_reads;
|
||||
unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
|
||||
struct bio *bio;
|
||||
|
||||
/* Try to dispatch 75% READS and 25% WRITES */
|
||||
|
@ -1341,7 +1341,7 @@ static struct request *set_next_request(void)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int cnt = FD_MAX_UNITS;
|
||||
struct request *rq;
|
||||
struct request *rq = NULL;
|
||||
|
||||
/* Find next queue we can dispatch from */
|
||||
fdc_queue = fdc_queue + 1;
|
||||
|
@ -1399,7 +1399,7 @@ static struct request *set_next_request(void)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int old_pos = fdc_queue;
|
||||
struct request *rq;
|
||||
struct request *rq = NULL;
|
||||
|
||||
do {
|
||||
q = unit[fdc_queue].disk->queue;
|
||||
|
@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static DEFINE_MUTEX(cciss_mutex);
|
||||
static struct proc_dir_entry *proc_cciss;
|
||||
|
||||
#include "cciss_cmd.h"
|
||||
#include "cciss.h"
|
||||
@ -363,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
|
||||
#define ENG_GIG_FACTOR (ENG_GIG/512)
|
||||
#define ENGAGE_SCSI "engage scsi"
|
||||
|
||||
static struct proc_dir_entry *proc_cciss;
|
||||
|
||||
static void cciss_seq_show_header(struct seq_file *seq)
|
||||
{
|
||||
ctlr_info_t *h = seq->private;
|
||||
|
@ -65,7 +65,7 @@ enum blkif_state {
|
||||
|
||||
struct blk_shadow {
|
||||
struct blkif_request req;
|
||||
unsigned long request;
|
||||
struct request *request;
|
||||
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||
};
|
||||
|
||||
@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info,
|
||||
unsigned long id)
|
||||
{
|
||||
info->shadow[id].req.id = info->shadow_free;
|
||||
info->shadow[id].request = 0;
|
||||
info->shadow[id].request = NULL;
|
||||
info->shadow_free = id;
|
||||
}
|
||||
|
||||
@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
}
|
||||
|
||||
/*
|
||||
* blkif_queue_request
|
||||
* Generate a Xen blkfront IO request from a blk layer request. Reads
|
||||
* and writes are handled as expected. Since we lack a loose flush
|
||||
* request, we map flushes into a full ordered barrier.
|
||||
*
|
||||
* request block io
|
||||
*
|
||||
* id: for guest use only.
|
||||
* operation: BLKIF_OP_{READ,WRITE,PROBE}
|
||||
* buffer: buffer to read/write into. this should be a
|
||||
* virtual address in the guest os.
|
||||
* @req: a request struct
|
||||
*/
|
||||
static int blkif_queue_request(struct request *req)
|
||||
{
|
||||
@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req)
|
||||
/* Fill out a communications ring structure. */
|
||||
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
|
||||
id = get_id_from_freelist(info);
|
||||
info->shadow[id].request = (unsigned long)req;
|
||||
info->shadow[id].request = req;
|
||||
|
||||
ring_req->id = id;
|
||||
ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||
@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req)
|
||||
ring_req->operation = rq_data_dir(req) ?
|
||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||
|
||||
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
|
||||
/*
|
||||
* Ideally we could just do an unordered
|
||||
* flush-to-disk, but all we have is a full write
|
||||
* barrier at the moment. However, a barrier write is
|
||||
* a superset of FUA, so we can implement it the same
|
||||
* way. (It's also a FLUSH+FUA, since it is
|
||||
* guaranteed ordered WRT previous writes.)
|
||||
*/
|
||||
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
|
||||
}
|
||||
|
||||
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
|
||||
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
|
||||
@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
|
||||
bret = RING_GET_RESPONSE(&info->ring, i);
|
||||
id = bret->id;
|
||||
req = (struct request *)info->shadow[id].request;
|
||||
req = info->shadow[id].request;
|
||||
|
||||
blkif_completion(&info->shadow[id]);
|
||||
|
||||
@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
|
||||
info->gd->disk_name);
|
||||
error = -EOPNOTSUPP;
|
||||
}
|
||||
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
|
||||
info->shadow[id].req.nr_segments == 0)) {
|
||||
printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n",
|
||||
info->gd->disk_name);
|
||||
error = -EOPNOTSUPP;
|
||||
}
|
||||
if (unlikely(error)) {
|
||||
if (error == -EOPNOTSUPP)
|
||||
error = 0;
|
||||
info->feature_flush = 0;
|
||||
xlvbd_flush(info);
|
||||
}
|
||||
@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
/* Stage 3: Find pending requests and requeue them. */
|
||||
for (i = 0; i < BLK_RING_SIZE; i++) {
|
||||
/* Not in use? */
|
||||
if (copy[i].request == 0)
|
||||
if (!copy[i].request)
|
||||
continue;
|
||||
|
||||
/* Grab a request slot and copy shadow state into it. */
|
||||
@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info)
|
||||
req->seg[j].gref,
|
||||
info->xbdev->otherend_id,
|
||||
pfn_to_mfn(info->shadow[req->id].frame[j]),
|
||||
rq_data_dir(
|
||||
(struct request *)
|
||||
info->shadow[req->id].request));
|
||||
rq_data_dir(info->shadow[req->id].request));
|
||||
info->shadow[req->id].req = *req;
|
||||
|
||||
info->ring.req_prod_pvt++;
|
||||
@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info)
|
||||
*/
|
||||
info->feature_flush = 0;
|
||||
|
||||
/*
|
||||
* The driver doesn't properly handled empty flushes, so
|
||||
* lets disable barrier support for now.
|
||||
*/
|
||||
#if 0
|
||||
if (!err && barrier)
|
||||
info->feature_flush = REQ_FLUSH;
|
||||
#endif
|
||||
info->feature_flush = REQ_FLUSH | REQ_FUA;
|
||||
|
||||
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
|
||||
if (err) {
|
||||
|
31
fs/ioprio.c
31
fs/ioprio.c
@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
|
||||
}
|
||||
|
||||
ret = -ESRCH;
|
||||
/*
|
||||
* We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
|
||||
* so we can't use rcu_read_lock(). See re-copy of ->ioprio
|
||||
* in copy_process().
|
||||
*/
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
switch (which) {
|
||||
case IOPRIO_WHO_PROCESS:
|
||||
rcu_read_lock();
|
||||
if (!who)
|
||||
p = current;
|
||||
else
|
||||
p = find_task_by_vpid(who);
|
||||
if (p)
|
||||
ret = set_task_ioprio(p, ioprio);
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
case IOPRIO_WHO_PGRP:
|
||||
if (!who)
|
||||
@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
|
||||
break;
|
||||
|
||||
do_each_thread(g, p) {
|
||||
int match;
|
||||
|
||||
rcu_read_lock();
|
||||
match = __task_cred(p)->uid == who;
|
||||
rcu_read_unlock();
|
||||
if (!match)
|
||||
if (__task_cred(p)->uid != who)
|
||||
continue;
|
||||
ret = set_task_ioprio(p, ioprio);
|
||||
if (ret)
|
||||
@ -160,7 +148,7 @@ free_uid:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
||||
int ret = -ESRCH;
|
||||
int tmpio;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
rcu_read_lock();
|
||||
switch (which) {
|
||||
case IOPRIO_WHO_PROCESS:
|
||||
rcu_read_lock();
|
||||
if (!who)
|
||||
p = current;
|
||||
else
|
||||
p = find_task_by_vpid(who);
|
||||
if (p)
|
||||
ret = get_task_ioprio(p);
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
case IOPRIO_WHO_PGRP:
|
||||
if (!who)
|
||||
@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
||||
break;
|
||||
|
||||
do_each_thread(g, p) {
|
||||
int match;
|
||||
|
||||
rcu_read_lock();
|
||||
match = __task_cred(p)->uid == user->uid;
|
||||
rcu_read_unlock();
|
||||
if (!match)
|
||||
if (__task_cred(p)->uid != user->uid)
|
||||
continue;
|
||||
tmpio = get_task_ioprio(p);
|
||||
if (tmpio < 0)
|
||||
@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
read_unlock(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user