mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: Export __splice_from_pipe() 2/2 splice: dont readpage 1/2 splice: dont steal make elv_register() output atomic block: blk_max_pfn is somtimes wrong
This commit is contained in:
commit
e0ab0bb6d2
@ -964,17 +964,18 @@ void elv_unregister_queue(struct request_queue *q)
|
||||
|
||||
int elv_register(struct elevator_type *e)
|
||||
{
|
||||
char *def = "";
|
||||
spin_lock_irq(&elv_list_lock);
|
||||
BUG_ON(elevator_find(e->elevator_name));
|
||||
list_add_tail(&e->list, &elv_list);
|
||||
spin_unlock_irq(&elv_list_lock);
|
||||
|
||||
printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
|
||||
if (!strcmp(e->elevator_name, chosen_elevator) ||
|
||||
(!*chosen_elevator &&
|
||||
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
|
||||
printk(" (default)");
|
||||
printk("\n");
|
||||
def = " (default)";
|
||||
|
||||
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(elv_register);
|
||||
|
@ -1221,7 +1221,7 @@ void blk_recount_segments(request_queue_t *q, struct bio *bio)
|
||||
* considered part of another segment, since that might
|
||||
* change with the bounce page.
|
||||
*/
|
||||
high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
|
||||
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
|
||||
if (high || highprv)
|
||||
goto new_hw_segment;
|
||||
if (cluster) {
|
||||
@ -3658,8 +3658,8 @@ int __init blk_dev_init(void)
|
||||
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
|
||||
register_hotcpu_notifier(&blk_cpu_notifier);
|
||||
|
||||
blk_max_low_pfn = max_low_pfn;
|
||||
blk_max_pfn = max_pfn;
|
||||
blk_max_low_pfn = max_low_pfn - 1;
|
||||
blk_max_pfn = max_pfn - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
84
fs/splice.c
84
fs/splice.c
@ -576,76 +576,21 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
|
||||
if (this_len + offset > PAGE_CACHE_SIZE)
|
||||
this_len = PAGE_CACHE_SIZE - offset;
|
||||
|
||||
/*
|
||||
* Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
|
||||
* page.
|
||||
*/
|
||||
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
|
||||
/*
|
||||
* If steal succeeds, buf->page is now pruned from the
|
||||
* pagecache and we can reuse it. The page will also be
|
||||
* locked on successful return.
|
||||
*/
|
||||
if (buf->ops->steal(pipe, buf))
|
||||
goto find_page;
|
||||
|
||||
page = buf->page;
|
||||
if (add_to_page_cache(page, mapping, index, GFP_KERNEL)) {
|
||||
unlock_page(page);
|
||||
goto find_page;
|
||||
}
|
||||
|
||||
page_cache_get(page);
|
||||
|
||||
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
|
||||
lru_cache_add(page);
|
||||
} else {
|
||||
find_page:
|
||||
page = find_lock_page(mapping, index);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
page = page_cache_alloc_cold(mapping);
|
||||
if (unlikely(!page))
|
||||
goto out_ret;
|
||||
|
||||
/*
|
||||
* This will also lock the page
|
||||
*/
|
||||
ret = add_to_page_cache_lru(page, mapping, index,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
}
|
||||
page = find_lock_page(mapping, index);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
page = page_cache_alloc_cold(mapping);
|
||||
if (unlikely(!page))
|
||||
goto out_ret;
|
||||
|
||||
/*
|
||||
* We get here with the page locked. If the page is also
|
||||
* uptodate, we don't need to do more. If it isn't, we
|
||||
* may need to bring it in if we are not going to overwrite
|
||||
* the full page.
|
||||
* This will also lock the page
|
||||
*/
|
||||
if (!PageUptodate(page)) {
|
||||
if (this_len < PAGE_CACHE_SIZE) {
|
||||
ret = mapping->a_ops->readpage(file, page);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
lock_page(page);
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
/*
|
||||
* Page got invalidated, repeat.
|
||||
*/
|
||||
if (!page->mapping) {
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
goto find_page;
|
||||
}
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
} else
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
ret = add_to_page_cache_lru(page, mapping, index,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
|
||||
@ -706,9 +651,9 @@ out_ret:
|
||||
* key here is the 'actor' worker passed in that actually moves the data
|
||||
* to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
|
||||
*/
|
||||
static ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
|
||||
struct file *out, loff_t *ppos, size_t len,
|
||||
unsigned int flags, splice_actor *actor)
|
||||
ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
|
||||
struct file *out, loff_t *ppos, size_t len,
|
||||
unsigned int flags, splice_actor *actor)
|
||||
{
|
||||
int ret, do_wakeup, err;
|
||||
struct splice_desc sd;
|
||||
@ -802,6 +747,7 @@ static ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__splice_from_pipe);
|
||||
|
||||
ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
|
||||
loff_t *ppos, size_t len, unsigned int flags,
|
||||
|
@ -99,4 +99,8 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
|
||||
loff_t *, size_t, unsigned int,
|
||||
splice_actor *);
|
||||
|
||||
extern ssize_t __splice_from_pipe(struct pipe_inode_info *, struct file *,
|
||||
loff_t *, size_t, unsigned int,
|
||||
splice_actor *);
|
||||
|
||||
#endif
|
||||
|
@ -204,7 +204,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
|
||||
/*
|
||||
* is destination page below bounce pfn?
|
||||
*/
|
||||
if (page_to_pfn(page) < q->bounce_pfn)
|
||||
if (page_to_pfn(page) <= q->bounce_pfn)
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user