2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/drivers/block/loop.c
|
|
|
|
*
|
|
|
|
* Written by Theodore Ts'o, 3/29/93
|
|
|
|
*
|
|
|
|
* Copyright 1993 by Theodore Ts'o. Redistribution of this file is
|
|
|
|
* permitted under the GNU General Public License.
|
|
|
|
*
|
|
|
|
* DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
|
|
|
|
* more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
|
|
|
|
*
|
|
|
|
* Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
|
|
|
|
* Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
|
|
|
|
*
|
|
|
|
* Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
|
|
|
|
*
|
|
|
|
* Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
|
|
|
|
*
|
|
|
|
* Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
|
|
|
|
*
|
|
|
|
* Loadable modules and other fixes by AK, 1998
|
|
|
|
*
|
|
|
|
* Make real block number available to downstream transfer functions, enables
|
|
|
|
* CBC (and relatives) mode encryption requiring unique IVs per data block.
|
|
|
|
* Reed H. Petty, rhp@draper.net
|
|
|
|
*
|
|
|
|
* Maximum number of loop devices now dynamic via max_loop module parameter.
|
|
|
|
* Russell Kroll <rkroll@exploits.org> 19990701
|
|
|
|
*
|
|
|
|
* Maximum number of loop devices when compiled-in now selectable by passing
|
|
|
|
* max_loop=<1-255> to the kernel on boot.
|
2007-10-19 21:21:04 +00:00
|
|
|
* Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Completely rewrite request handling to be make_request_fn style and
|
|
|
|
* non blocking, pushing work to a helper thread. Lots of fixes from
|
|
|
|
* Al Viro too.
|
|
|
|
* Jens Axboe <axboe@suse.de>, Nov 2000
|
|
|
|
*
|
|
|
|
* Support up to 256 loop devices
|
|
|
|
* Heinz Mauelshagen <mge@sistina.com>, Feb 2002
|
|
|
|
*
|
|
|
|
* Support for falling back on the write file operation when the address space
|
2008-10-29 21:00:55 +00:00
|
|
|
* operations write_begin is not available on the backing filesystem.
|
2005-04-16 22:20:36 +00:00
|
|
|
* Anton Altaparmakov, 16 Feb 2005
|
|
|
|
*
|
|
|
|
* Still To Fix:
|
|
|
|
* - Advisory locking is ignored here.
|
|
|
|
* - Should use an own CAP_* category instead of CAP_SYS_ADMIN
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/major.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/blkpg.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/loop.h>
|
2006-08-29 18:06:14 +00:00
|
|
|
#include <linux/compat.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/suspend.h>
|
2007-07-17 11:03:35 +00:00
|
|
|
#include <linux/freezer.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/buffer_head.h> /* for invalidate_bdev() */
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/gfp.h>
|
2006-09-29 08:59:11 +00:00
|
|
|
#include <linux/kthread.h>
|
2007-06-04 07:59:47 +00:00
|
|
|
#include <linux/splice.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
static LIST_HEAD(loop_devices);
|
|
|
|
static DEFINE_MUTEX(loop_devices_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
static int max_part;
|
|
|
|
static int part_shift;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Transfer functions
|
|
|
|
*/
|
|
|
|
static int transfer_none(struct loop_device *lo, int cmd,
|
|
|
|
struct page *raw_page, unsigned raw_off,
|
|
|
|
struct page *loop_page, unsigned loop_off,
|
|
|
|
int size, sector_t real_block)
|
|
|
|
{
|
|
|
|
char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
|
|
|
|
char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
|
|
|
|
|
|
|
|
if (cmd == READ)
|
|
|
|
memcpy(loop_buf, raw_buf, size);
|
|
|
|
else
|
|
|
|
memcpy(raw_buf, loop_buf, size);
|
|
|
|
|
|
|
|
kunmap_atomic(raw_buf, KM_USER0);
|
|
|
|
kunmap_atomic(loop_buf, KM_USER1);
|
|
|
|
cond_resched();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int transfer_xor(struct loop_device *lo, int cmd,
|
|
|
|
struct page *raw_page, unsigned raw_off,
|
|
|
|
struct page *loop_page, unsigned loop_off,
|
|
|
|
int size, sector_t real_block)
|
|
|
|
{
|
|
|
|
char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
|
|
|
|
char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
|
|
|
|
char *in, *out, *key;
|
|
|
|
int i, keysize;
|
|
|
|
|
|
|
|
if (cmd == READ) {
|
|
|
|
in = raw_buf;
|
|
|
|
out = loop_buf;
|
|
|
|
} else {
|
|
|
|
in = loop_buf;
|
|
|
|
out = raw_buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
key = lo->lo_encrypt_key;
|
|
|
|
keysize = lo->lo_encrypt_key_size;
|
|
|
|
for (i = 0; i < size; i++)
|
|
|
|
*out++ = *in++ ^ key[(i & 511) % keysize];
|
|
|
|
|
|
|
|
kunmap_atomic(raw_buf, KM_USER0);
|
|
|
|
kunmap_atomic(loop_buf, KM_USER1);
|
|
|
|
cond_resched();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
|
|
|
|
{
|
|
|
|
if (unlikely(info->lo_encrypt_key_size <= 0))
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct loop_func_table none_funcs = {
|
|
|
|
.number = LO_CRYPT_NONE,
|
|
|
|
.transfer = transfer_none,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct loop_func_table xor_funcs = {
|
|
|
|
.number = LO_CRYPT_XOR,
|
|
|
|
.transfer = transfer_xor,
|
|
|
|
.init = xor_init
|
|
|
|
};
|
|
|
|
|
|
|
|
/* xfer_funcs[0] is special - its release function is never called */
|
|
|
|
static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
|
|
|
|
&none_funcs,
|
|
|
|
&xor_funcs
|
|
|
|
};
|
|
|
|
|
|
|
|
static loff_t get_loop_size(struct loop_device *lo, struct file *file)
|
|
|
|
{
|
|
|
|
loff_t size, offset, loopsize;
|
|
|
|
|
|
|
|
/* Compute loopsize in bytes */
|
|
|
|
size = i_size_read(file->f_mapping->host);
|
|
|
|
offset = lo->lo_offset;
|
|
|
|
loopsize = size - offset;
|
|
|
|
if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
|
|
|
|
loopsize = lo->lo_sizelimit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unfortunately, if we want to do I/O on the device,
|
|
|
|
* the number of 512-byte sectors has to fit into a sector_t.
|
|
|
|
*/
|
|
|
|
return loopsize >> 9;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
figure_loop_size(struct loop_device *lo)
|
|
|
|
{
|
|
|
|
loff_t size = get_loop_size(lo, lo->lo_backing_file);
|
|
|
|
sector_t x = (sector_t)size;
|
|
|
|
|
|
|
|
if (unlikely((loff_t)x != size))
|
|
|
|
return -EFBIG;
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
set_capacity(lo->lo_disk, x);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
lo_do_transfer(struct loop_device *lo, int cmd,
|
|
|
|
struct page *rpage, unsigned roffs,
|
|
|
|
struct page *lpage, unsigned loffs,
|
|
|
|
int size, sector_t rblock)
|
|
|
|
{
|
|
|
|
if (unlikely(!lo->transfer))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do_lo_send_aops - helper for writing data to a loop device
|
|
|
|
*
|
|
|
|
* This is the fast version for backing filesystems which implement the address
|
2007-10-16 08:25:01 +00:00
|
|
|
* space operations write_begin and write_end.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
|
2007-10-08 16:10:13 +00:00
|
|
|
loff_t pos, struct page *unused)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
pgoff_t index;
|
|
|
|
unsigned offset, bv_offs;
|
2005-12-15 22:28:17 +00:00
|
|
|
int len, ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-09 23:59:24 +00:00
|
|
|
mutex_lock(&mapping->host->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
index = pos >> PAGE_CACHE_SHIFT;
|
|
|
|
offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
|
|
|
|
bv_offs = bvec->bv_offset;
|
|
|
|
len = bvec->bv_len;
|
|
|
|
while (len > 0) {
|
|
|
|
sector_t IV;
|
2007-10-16 08:25:01 +00:00
|
|
|
unsigned size, copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
int transfer_result;
|
2007-10-16 08:25:01 +00:00
|
|
|
struct page *page;
|
|
|
|
void *fsdata;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
|
|
|
|
size = PAGE_CACHE_SIZE - offset;
|
|
|
|
if (size > len)
|
|
|
|
size = len;
|
2007-10-16 08:25:01 +00:00
|
|
|
|
|
|
|
ret = pagecache_write_begin(file, mapping, pos, size, 0,
|
|
|
|
&page, &fsdata);
|
|
|
|
if (ret)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto fail;
|
2007-10-16 08:25:01 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
transfer_result = lo_do_transfer(lo, WRITE, page, offset,
|
|
|
|
bvec->bv_page, bv_offs, size, IV);
|
2007-10-16 08:25:01 +00:00
|
|
|
copied = size;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (unlikely(transfer_result))
|
2007-10-16 08:25:01 +00:00
|
|
|
copied = 0;
|
|
|
|
|
|
|
|
ret = pagecache_write_end(file, mapping, pos, size, copied,
|
|
|
|
page, fsdata);
|
2007-10-16 08:25:02 +00:00
|
|
|
if (ret < 0 || ret != copied)
|
2007-10-16 08:25:01 +00:00
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (unlikely(transfer_result))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
bv_offs += copied;
|
|
|
|
len -= copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
offset = 0;
|
|
|
|
index++;
|
2007-10-16 08:25:01 +00:00
|
|
|
pos += copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-12-15 22:28:17 +00:00
|
|
|
ret = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
2006-01-09 23:59:24 +00:00
|
|
|
mutex_unlock(&mapping->host->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
fail:
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __do_lo_send_write - helper for writing data to a loop device
|
|
|
|
*
|
|
|
|
* This helper just factors out common code between do_lo_send_direct_write()
|
|
|
|
* and do_lo_send_write().
|
|
|
|
*/
|
2006-01-14 21:20:43 +00:00
|
|
|
static int __do_lo_send_write(struct file *file,
|
2006-10-10 21:45:07 +00:00
|
|
|
u8 *buf, const int len, loff_t pos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
ssize_t bw;
|
|
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
|
|
|
|
set_fs(get_ds());
|
|
|
|
bw = file->f_op->write(file, buf, len, &pos);
|
|
|
|
set_fs(old_fs);
|
|
|
|
if (likely(bw == len))
|
|
|
|
return 0;
|
|
|
|
printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
|
|
|
|
(unsigned long long)pos, len);
|
|
|
|
if (bw >= 0)
|
|
|
|
bw = -EIO;
|
|
|
|
return bw;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do_lo_send_direct_write - helper for writing data to a loop device
|
|
|
|
*
|
|
|
|
* This is the fast, non-transforming version for backing filesystems which do
|
2007-10-16 08:25:01 +00:00
|
|
|
* not implement the address space operations write_begin and write_end.
|
2005-04-16 22:20:36 +00:00
|
|
|
* It uses the write file operation which should be present on all writeable
|
|
|
|
* filesystems.
|
|
|
|
*/
|
|
|
|
static int do_lo_send_direct_write(struct loop_device *lo,
|
2007-10-08 16:10:13 +00:00
|
|
|
struct bio_vec *bvec, loff_t pos, struct page *page)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
|
2006-10-10 21:45:07 +00:00
|
|
|
kmap(bvec->bv_page) + bvec->bv_offset,
|
2005-04-16 22:20:36 +00:00
|
|
|
bvec->bv_len, pos);
|
|
|
|
kunmap(bvec->bv_page);
|
|
|
|
cond_resched();
|
|
|
|
return bw;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do_lo_send_write - helper for writing data to a loop device
|
|
|
|
*
|
|
|
|
* This is the slow, transforming version for filesystems which do not
|
2007-10-16 08:25:01 +00:00
|
|
|
* implement the address space operations write_begin and write_end. It
|
2005-04-16 22:20:36 +00:00
|
|
|
* uses the write file operation which should be present on all writeable
|
|
|
|
* filesystems.
|
|
|
|
*
|
|
|
|
* Using fops->write is slower than using aops->{prepare,commit}_write in the
|
|
|
|
* transforming case because we need to double buffer the data as we cannot do
|
|
|
|
* the transformations in place as we do not have direct access to the
|
|
|
|
* destination pages of the backing file.
|
|
|
|
*/
|
|
|
|
static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
|
2007-10-08 16:10:13 +00:00
|
|
|
loff_t pos, struct page *page)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
|
|
|
|
bvec->bv_offset, bvec->bv_len, pos >> 9);
|
|
|
|
if (likely(!ret))
|
|
|
|
return __do_lo_send_write(lo->lo_backing_file,
|
2006-10-10 21:45:07 +00:00
|
|
|
page_address(page), bvec->bv_len,
|
2005-04-16 22:20:36 +00:00
|
|
|
pos);
|
|
|
|
printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
|
|
|
|
"length %i.\n", (unsigned long long)pos, bvec->bv_len);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = -EIO;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-10-08 16:10:13 +00:00
|
|
|
static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-08 16:10:13 +00:00
|
|
|
int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
|
2005-04-16 22:20:36 +00:00
|
|
|
struct page *page);
|
|
|
|
struct bio_vec *bvec;
|
|
|
|
struct page *page = NULL;
|
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
do_lo_send = do_lo_send_aops;
|
|
|
|
if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
|
|
|
|
do_lo_send = do_lo_send_direct_write;
|
|
|
|
if (lo->transfer != transfer_none) {
|
|
|
|
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
|
|
|
if (unlikely(!page))
|
|
|
|
goto fail;
|
|
|
|
kmap(page);
|
|
|
|
do_lo_send = do_lo_send_write;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bio_for_each_segment(bvec, bio, i) {
|
2007-10-08 16:10:13 +00:00
|
|
|
ret = do_lo_send(lo, bvec, pos, page);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
pos += bvec->bv_len;
|
|
|
|
}
|
|
|
|
if (page) {
|
|
|
|
kunmap(page);
|
|
|
|
__free_page(page);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
fail:
|
|
|
|
printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct lo_read_data {
|
|
|
|
struct loop_device *lo;
|
|
|
|
struct page *page;
|
|
|
|
unsigned offset;
|
|
|
|
int bsize;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2007-06-12 19:20:37 +00:00
|
|
|
lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
|
|
|
|
struct splice_desc *sd)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-06-12 19:20:37 +00:00
|
|
|
struct lo_read_data *p = sd->u.data;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct loop_device *lo = p->lo;
|
2007-06-12 19:20:37 +00:00
|
|
|
struct page *page = buf->page;
|
2005-04-16 22:20:36 +00:00
|
|
|
sector_t IV;
|
2009-03-05 07:03:53 +00:00
|
|
|
int size, ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-14 11:10:48 +00:00
|
|
|
ret = buf->ops->confirm(pipe, buf);
|
2007-06-12 19:20:37 +00:00
|
|
|
if (unlikely(ret))
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-12 19:20:37 +00:00
|
|
|
IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
|
|
|
|
(buf->offset >> 9);
|
|
|
|
size = sd->len;
|
|
|
|
if (size > p->bsize)
|
|
|
|
size = p->bsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-12 19:20:37 +00:00
|
|
|
if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
printk(KERN_ERR "loop: transfer error block %ld\n",
|
|
|
|
page->index);
|
2007-06-12 19:20:37 +00:00
|
|
|
size = -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
flush_dcache_page(p->page);
|
|
|
|
|
2007-06-12 19:20:37 +00:00
|
|
|
if (size > 0)
|
|
|
|
p->offset += size;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2007-06-12 19:20:37 +00:00
|
|
|
static int
|
|
|
|
lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
|
|
|
|
{
|
|
|
|
return __splice_from_pipe(pipe, sd, lo_splice_actor);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int
|
|
|
|
do_lo_receive(struct loop_device *lo,
|
|
|
|
struct bio_vec *bvec, int bsize, loff_t pos)
|
|
|
|
{
|
|
|
|
struct lo_read_data cookie;
|
2007-06-12 19:20:37 +00:00
|
|
|
struct splice_desc sd;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct file *file;
|
2007-06-12 19:20:37 +00:00
|
|
|
long retval;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
cookie.lo = lo;
|
|
|
|
cookie.page = bvec->bv_page;
|
|
|
|
cookie.offset = bvec->bv_offset;
|
|
|
|
cookie.bsize = bsize;
|
2007-06-12 19:20:37 +00:00
|
|
|
|
|
|
|
sd.len = 0;
|
|
|
|
sd.total_len = bvec->bv_len;
|
|
|
|
sd.flags = 0;
|
|
|
|
sd.pos = pos;
|
|
|
|
sd.u.data = &cookie;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
file = lo->lo_backing_file;
|
2007-06-12 19:20:37 +00:00
|
|
|
retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
|
|
|
|
|
|
|
|
if (retval < 0)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
|
|
|
|
{
|
|
|
|
struct bio_vec *bvec;
|
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
bio_for_each_segment(bvec, bio, i) {
|
|
|
|
ret = do_lo_receive(lo, bvec, bsize, pos);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
pos += bvec->bv_len;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
|
|
|
{
|
|
|
|
loff_t pos;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
|
2009-03-24 11:29:54 +00:00
|
|
|
|
|
|
|
if (bio_rw(bio) == WRITE) {
|
|
|
|
int barrier = bio_barrier(bio);
|
|
|
|
struct file *file = lo->lo_backing_file;
|
|
|
|
|
|
|
|
if (barrier) {
|
|
|
|
if (unlikely(!file->f_op->fsync)) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = vfs_fsync(file, file->f_path.dentry, 0);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-08 16:10:13 +00:00
|
|
|
ret = lo_send(lo, bio, pos);
|
2009-03-24 11:29:54 +00:00
|
|
|
|
|
|
|
if (barrier && !ret) {
|
|
|
|
ret = vfs_fsync(file, file->f_path.dentry, 0);
|
|
|
|
if (unlikely(ret))
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
} else
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
|
2009-03-24 11:29:54 +00:00
|
|
|
|
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add bio to back of pending list
|
|
|
|
*/
|
|
|
|
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
|
|
|
|
{
|
2009-04-17 06:41:21 +00:00
|
|
|
bio_list_add(&lo->lo_bio_list, bio);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Grab first pending buffer
|
|
|
|
*/
|
|
|
|
static struct bio *loop_get_bio(struct loop_device *lo)
|
|
|
|
{
|
2009-04-17 06:41:21 +00:00
|
|
|
return bio_list_pop(&lo->lo_bio_list);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-24 07:28:11 +00:00
|
|
|
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct loop_device *lo = q->queuedata;
|
|
|
|
int rw = bio_rw(old_bio);
|
|
|
|
|
2005-06-23 07:09:06 +00:00
|
|
|
if (rw == READA)
|
|
|
|
rw = READ;
|
|
|
|
|
|
|
|
BUG_ON(!lo || (rw != READ && rw != WRITE));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_irq(&lo->lo_lock);
|
|
|
|
if (lo->lo_state != Lo_bound)
|
2005-06-23 07:09:06 +00:00
|
|
|
goto out;
|
|
|
|
if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
loop_add_bio(lo, old_bio);
|
2006-09-29 08:59:11 +00:00
|
|
|
wake_up(&lo->lo_event);
|
2005-06-23 07:09:06 +00:00
|
|
|
spin_unlock_irq(&lo->lo_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2005-06-23 07:09:06 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
out:
|
2005-06-23 07:09:06 +00:00
|
|
|
spin_unlock_irq(&lo->lo_lock);
|
2007-09-27 10:47:43 +00:00
|
|
|
bio_io_error(old_bio);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kick off io on the underlying address space
|
|
|
|
*/
|
2007-07-24 07:28:11 +00:00
|
|
|
static void loop_unplug(struct request_queue *q)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct loop_device *lo = q->queuedata;
|
|
|
|
|
2008-04-29 12:48:33 +00:00
|
|
|
queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
|
2005-04-16 22:20:36 +00:00
|
|
|
blk_run_address_space(lo->lo_backing_file->f_mapping);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct switch_request {
|
|
|
|
struct file *file;
|
|
|
|
struct completion wait;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void do_loop_switch(struct loop_device *, struct switch_request *);
|
|
|
|
|
|
|
|
static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
|
|
|
|
{
|
|
|
|
if (unlikely(!bio->bi_bdev)) {
|
|
|
|
do_loop_switch(lo, bio->bi_private);
|
|
|
|
bio_put(bio);
|
|
|
|
} else {
|
2005-06-23 07:09:06 +00:00
|
|
|
int ret = do_bio_filebacked(lo, bio);
|
2007-09-27 10:47:43 +00:00
|
|
|
bio_endio(bio, ret);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* worker thread that handles reads/writes to file backed loop devices,
|
|
|
|
* to avoid blocking in our make_request_fn. it also does loop decrypting
|
|
|
|
* on reads for block backed loop, as that is too heavy to do from
|
|
|
|
* b_end_io context where irqs may be disabled.
|
2006-09-29 08:59:11 +00:00
|
|
|
*
|
|
|
|
* Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before
|
|
|
|
* calling kthread_stop(). Therefore once kthread_should_stop() is
|
|
|
|
* true, make_request will not place any more requests. Therefore
|
|
|
|
* once kthread_should_stop() is true and lo_bio is NULL, we are
|
|
|
|
* done with the loop.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static int loop_thread(void *data)
|
|
|
|
{
|
|
|
|
struct loop_device *lo = data;
|
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
set_user_nice(current, -20);
|
|
|
|
|
2009-04-17 06:41:21 +00:00
|
|
|
while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
|
2006-06-26 18:55:42 +00:00
|
|
|
|
2006-09-29 08:59:11 +00:00
|
|
|
wait_event_interruptible(lo->lo_event,
|
2009-04-17 06:41:21 +00:00
|
|
|
!bio_list_empty(&lo->lo_bio_list) ||
|
|
|
|
kthread_should_stop());
|
2005-06-23 07:09:06 +00:00
|
|
|
|
2009-04-17 06:41:21 +00:00
|
|
|
if (bio_list_empty(&lo->lo_bio_list))
|
2005-06-23 07:09:06 +00:00
|
|
|
continue;
|
|
|
|
spin_lock_irq(&lo->lo_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
bio = loop_get_bio(lo);
|
2005-06-23 07:09:06 +00:00
|
|
|
spin_unlock_irq(&lo->lo_lock);
|
|
|
|
|
|
|
|
BUG_ON(!bio);
|
2005-04-16 22:20:36 +00:00
|
|
|
loop_handle_bio(lo, bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* loop_switch performs the hard work of switching a backing store.
|
|
|
|
* First it needs to flush existing IO, it does this by sending a magic
|
|
|
|
* BIO down the pipe. The completion of this BIO does the actual switch.
|
|
|
|
*/
|
|
|
|
static int loop_switch(struct loop_device *lo, struct file *file)
|
|
|
|
{
|
|
|
|
struct switch_request w;
|
2008-01-11 09:14:40 +00:00
|
|
|
struct bio *bio = bio_alloc(GFP_KERNEL, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!bio)
|
|
|
|
return -ENOMEM;
|
|
|
|
init_completion(&w.wait);
|
|
|
|
w.file = file;
|
|
|
|
bio->bi_private = &w;
|
|
|
|
bio->bi_bdev = NULL;
|
|
|
|
loop_make_request(lo->lo_queue, bio);
|
|
|
|
wait_for_completion(&w.wait);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-12 13:48:27 +00:00
|
|
|
/*
|
|
|
|
* Helper to flush the IOs in loop, but keeping loop thread running
|
|
|
|
*/
|
|
|
|
static int loop_flush(struct loop_device *lo)
|
|
|
|
{
|
|
|
|
/* loop not yet configured, no running thread, nothing to flush */
|
|
|
|
if (!lo->lo_thread)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return loop_switch(lo, NULL);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Do the actual switch; called from the BIO completion routine
|
|
|
|
*/
|
|
|
|
static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
|
|
|
|
{
|
|
|
|
struct file *file = p->file;
|
|
|
|
struct file *old_file = lo->lo_backing_file;
|
2008-12-12 13:48:27 +00:00
|
|
|
struct address_space *mapping;
|
|
|
|
|
|
|
|
/* if no new file, only flush of queued bios requested */
|
|
|
|
if (!file)
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-12 13:48:27 +00:00
|
|
|
mapping = file->f_mapping;
|
2005-04-16 22:20:36 +00:00
|
|
|
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
|
|
|
|
lo->lo_backing_file = file;
|
2006-09-27 08:50:49 +00:00
|
|
|
lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
|
|
|
|
mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->old_gfp_mask = mapping_gfp_mask(mapping);
|
|
|
|
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
2008-12-12 13:48:27 +00:00
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
complete(&p->wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* loop_change_fd switched the backing store of a loopback device to
|
|
|
|
* a new file. This is useful for operating system installers to free up
|
|
|
|
* the original file and in High Availability environments to switch to
|
|
|
|
* an alternative location for the content in case of server meltdown.
|
|
|
|
* This can only work if the loop device is used read-only, and if the
|
|
|
|
* new backing store is the same size and type as the old backing store.
|
|
|
|
*/
|
2008-03-02 14:29:48 +00:00
|
|
|
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|
|
|
unsigned int arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct file *file, *old_file;
|
|
|
|
struct inode *inode;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = -ENXIO;
|
|
|
|
if (lo->lo_state != Lo_bound)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* the loop device has to be read-only */
|
|
|
|
error = -EINVAL;
|
|
|
|
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
error = -EBADF;
|
|
|
|
file = fget(arg);
|
|
|
|
if (!file)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
inode = file->f_mapping->host;
|
|
|
|
old_file = lo->lo_backing_file;
|
|
|
|
|
|
|
|
error = -EINVAL;
|
|
|
|
|
|
|
|
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
|
|
|
goto out_putf;
|
|
|
|
|
|
|
|
/* size of the new backing store needs to be the same */
|
|
|
|
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
|
|
|
goto out_putf;
|
|
|
|
|
|
|
|
/* and ... switch */
|
|
|
|
error = loop_switch(lo, file);
|
|
|
|
if (error)
|
|
|
|
goto out_putf;
|
|
|
|
|
|
|
|
fput(old_file);
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
if (max_part > 0)
|
|
|
|
ioctl_by_bdev(bdev, BLKRRPART, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_putf:
|
|
|
|
fput(file);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_loop_device(struct file *file)
|
|
|
|
{
|
|
|
|
struct inode *i = file->f_mapping->host;
|
|
|
|
|
|
|
|
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
|
|
|
|
}
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
2005-04-16 22:20:36 +00:00
|
|
|
struct block_device *bdev, unsigned int arg)
|
|
|
|
{
|
|
|
|
struct file *file, *f;
|
|
|
|
struct inode *inode;
|
|
|
|
struct address_space *mapping;
|
|
|
|
unsigned lo_blocksize;
|
|
|
|
int lo_flags = 0;
|
|
|
|
int error;
|
|
|
|
loff_t size;
|
|
|
|
|
|
|
|
/* This is safe, since we have a reference from open(). */
|
|
|
|
__module_get(THIS_MODULE);
|
|
|
|
|
|
|
|
error = -EBADF;
|
|
|
|
file = fget(arg);
|
|
|
|
if (!file)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
error = -EBUSY;
|
|
|
|
if (lo->lo_state != Lo_unbound)
|
|
|
|
goto out_putf;
|
|
|
|
|
|
|
|
/* Avoid recursion */
|
|
|
|
f = file;
|
|
|
|
while (is_loop_device(f)) {
|
|
|
|
struct loop_device *l;
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
if (f->f_mapping->host->i_bdev == bdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out_putf;
|
|
|
|
|
|
|
|
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
|
|
|
if (l->lo_state == Lo_unbound) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_putf;
|
|
|
|
}
|
|
|
|
f = l->lo_backing_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
mapping = file->f_mapping;
|
|
|
|
inode = mapping->host;
|
|
|
|
|
|
|
|
if (!(file->f_mode & FMODE_WRITE))
|
|
|
|
lo_flags |= LO_FLAGS_READ_ONLY;
|
|
|
|
|
|
|
|
error = -EINVAL;
|
|
|
|
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
|
2006-06-28 11:26:44 +00:00
|
|
|
const struct address_space_operations *aops = mapping->a_ops;
|
2009-05-07 13:37:36 +00:00
|
|
|
|
2008-10-29 21:00:55 +00:00
|
|
|
if (aops->write_begin)
|
2005-04-16 22:20:36 +00:00
|
|
|
lo_flags |= LO_FLAGS_USE_AOPS;
|
|
|
|
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
|
|
|
|
lo_flags |= LO_FLAGS_READ_ONLY;
|
|
|
|
|
2006-09-27 08:50:49 +00:00
|
|
|
lo_blocksize = S_ISBLK(inode->i_mode) ?
|
|
|
|
inode->i_bdev->bd_block_size : PAGE_SIZE;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
error = 0;
|
|
|
|
} else {
|
|
|
|
goto out_putf;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = get_loop_size(lo, file);
|
|
|
|
|
|
|
|
if ((loff_t)(sector_t)size != size) {
|
|
|
|
error = -EFBIG;
|
|
|
|
goto out_putf;
|
|
|
|
}
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
if (!(mode & FMODE_WRITE))
|
2005-04-16 22:20:36 +00:00
|
|
|
lo_flags |= LO_FLAGS_READ_ONLY;
|
|
|
|
|
|
|
|
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
|
|
|
|
|
|
|
|
lo->lo_blocksize = lo_blocksize;
|
|
|
|
lo->lo_device = bdev;
|
|
|
|
lo->lo_flags = lo_flags;
|
|
|
|
lo->lo_backing_file = file;
|
2006-06-23 09:06:08 +00:00
|
|
|
lo->transfer = transfer_none;
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->ioctl = NULL;
|
|
|
|
lo->lo_sizelimit = 0;
|
|
|
|
lo->old_gfp_mask = mapping_gfp_mask(mapping);
|
|
|
|
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
|
|
|
|
2009-04-17 06:41:21 +00:00
|
|
|
bio_list_init(&lo->lo_bio_list);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* set queue make_request_fn, and add limits based on lower level
|
|
|
|
* device
|
|
|
|
*/
|
|
|
|
blk_queue_make_request(lo->lo_queue, loop_make_request);
|
|
|
|
lo->lo_queue->queuedata = lo;
|
|
|
|
lo->lo_queue->unplug_fn = loop_unplug;
|
|
|
|
|
2009-03-24 11:29:54 +00:00
|
|
|
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
|
|
|
blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
set_capacity(lo->lo_disk, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
bd_set_size(bdev, size << 9);
|
|
|
|
|
|
|
|
set_blocksize(bdev, lo_blocksize);
|
|
|
|
|
2006-09-29 08:59:11 +00:00
|
|
|
lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
|
|
|
|
lo->lo_number);
|
|
|
|
if (IS_ERR(lo->lo_thread)) {
|
|
|
|
error = PTR_ERR(lo->lo_thread);
|
2006-09-29 09:01:18 +00:00
|
|
|
goto out_clr;
|
2006-09-29 08:59:11 +00:00
|
|
|
}
|
|
|
|
lo->lo_state = Lo_bound;
|
|
|
|
wake_up_process(lo->lo_thread);
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
if (max_part > 0)
|
|
|
|
ioctl_by_bdev(bdev, BLKRRPART, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
2006-09-29 09:01:18 +00:00
|
|
|
out_clr:
|
|
|
|
lo->lo_thread = NULL;
|
|
|
|
lo->lo_device = NULL;
|
|
|
|
lo->lo_backing_file = NULL;
|
|
|
|
lo->lo_flags = 0;
|
2007-05-08 07:28:20 +00:00
|
|
|
set_capacity(lo->lo_disk, 0);
|
2007-05-06 21:49:54 +00:00
|
|
|
invalidate_bdev(bdev);
|
2006-09-29 09:01:18 +00:00
|
|
|
bd_set_size(bdev, 0);
|
|
|
|
mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
|
|
|
|
lo->lo_state = Lo_unbound;
|
2005-04-16 22:20:36 +00:00
|
|
|
out_putf:
|
|
|
|
fput(file);
|
|
|
|
out:
|
|
|
|
/* This is safe: open() is still holding a reference. */
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_release_xfer(struct loop_device *lo)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct loop_func_table *xfer = lo->lo_encryption;
|
|
|
|
|
|
|
|
if (xfer) {
|
|
|
|
if (xfer->release)
|
|
|
|
err = xfer->release(lo);
|
|
|
|
lo->transfer = NULL;
|
|
|
|
lo->lo_encryption = NULL;
|
|
|
|
module_put(xfer->owner);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
|
|
|
|
const struct loop_info64 *i)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (xfer) {
|
|
|
|
struct module *owner = xfer->owner;
|
|
|
|
|
|
|
|
if (!try_module_get(owner))
|
|
|
|
return -EINVAL;
|
|
|
|
if (xfer->init)
|
|
|
|
err = xfer->init(lo, i);
|
|
|
|
if (err)
|
|
|
|
module_put(owner);
|
|
|
|
else
|
|
|
|
lo->lo_encryption = xfer;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
|
|
|
|
{
|
|
|
|
struct file *filp = lo->lo_backing_file;
|
2005-10-21 07:22:34 +00:00
|
|
|
gfp_t gfp = lo->old_gfp_mask;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (lo->lo_state != Lo_bound)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
if (filp == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irq(&lo->lo_lock);
|
|
|
|
lo->lo_state = Lo_rundown;
|
|
|
|
spin_unlock_irq(&lo->lo_lock);
|
|
|
|
|
2006-09-29 08:59:11 +00:00
|
|
|
kthread_stop(lo->lo_thread);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-12 13:50:49 +00:00
|
|
|
lo->lo_queue->unplug_fn = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->lo_backing_file = NULL;
|
|
|
|
|
|
|
|
loop_release_xfer(lo);
|
|
|
|
lo->transfer = NULL;
|
|
|
|
lo->ioctl = NULL;
|
|
|
|
lo->lo_device = NULL;
|
|
|
|
lo->lo_encryption = NULL;
|
|
|
|
lo->lo_offset = 0;
|
|
|
|
lo->lo_sizelimit = 0;
|
|
|
|
lo->lo_encrypt_key_size = 0;
|
|
|
|
lo->lo_flags = 0;
|
2006-09-29 08:59:11 +00:00
|
|
|
lo->lo_thread = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
|
|
|
|
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
|
|
|
|
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
|
2008-03-02 14:29:48 +00:00
|
|
|
if (bdev)
|
|
|
|
invalidate_bdev(bdev);
|
2007-05-08 07:28:20 +00:00
|
|
|
set_capacity(lo->lo_disk, 0);
|
2008-03-02 14:29:48 +00:00
|
|
|
if (bdev)
|
|
|
|
bd_set_size(bdev, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
|
|
|
lo->lo_state = Lo_unbound;
|
|
|
|
/* This is safe: open() is still holding a reference. */
|
|
|
|
module_put(THIS_MODULE);
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
if (max_part > 0)
|
|
|
|
ioctl_by_bdev(bdev, BLKRRPART, 0);
|
loop: fix circular locking in loop_clr_fd()
With CONFIG_PROVE_LOCKING enabled
$ losetup /dev/loop0 file
$ losetup -o 32256 /dev/loop1 /dev/loop0
$ losetup -d /dev/loop1
$ losetup -d /dev/loop0
triggers a [ INFO: possible circular locking dependency detected ]
I think this warning is a false positive.
Open/close on a loop device acquires bd_mutex of the device before
acquiring lo_ctl_mutex of the same device. For ioctl(LOOP_CLR_FD) after
acquiring lo_ctl_mutex, fput on the backing_file might acquire the bd_mutex of
a device, if backing file is a device and this is the last reference to the
file being dropped . But it is guaranteed that it is impossible to have a
circular list of backing devices.(say loop2->loop1->loop0->loop2 is not
possible), which guarantees that this can never deadlock.
So this warning should be suppressed. It is very difficult to annotate lockdep
not to warn here in the correct way. A simple way to silence lockdep could be
to mark the lo_ctl_mutex in ioctl to be a sub class, but this might mask some
other real bugs.
@@ -1164,7 +1164,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Or actually marking the bd_mutex after lo_ctl_mutex as a sub class could be
a better solution.
Luckily it is easy to avoid calling fput on backing file with lo_ctl_mutex
held, so no lockdep annotation is required.
If you do not like the special handling of the lo_ctl_mutex just for the
LOOP_CLR_FD ioctl in lo_ioctl(), the mutex handling could be moved inside
each of the individual ioctl handlers and I could send you another patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-03-24 11:33:41 +00:00
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
|
|
|
/*
|
|
|
|
* Need not hold lo_ctl_mutex to fput backing file.
|
|
|
|
* Calling fput holding lo_ctl_mutex triggers a circular
|
|
|
|
* lock dependency possibility warning as fput can take
|
|
|
|
* bd_mutex which is usually taken before lo_ctl_mutex.
|
|
|
|
*/
|
|
|
|
fput(filp);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct loop_func_table *xfer;
|
2008-11-13 23:38:41 +00:00
|
|
|
uid_t uid = current_uid();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-13 23:38:41 +00:00
|
|
|
if (lo->lo_encrypt_key_size &&
|
|
|
|
lo->lo_key_owner != uid &&
|
2005-04-16 22:20:36 +00:00
|
|
|
!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
if (lo->lo_state != Lo_bound)
|
|
|
|
return -ENXIO;
|
|
|
|
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = loop_release_xfer(lo);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (info->lo_encrypt_type) {
|
|
|
|
unsigned int type = info->lo_encrypt_type;
|
|
|
|
|
|
|
|
if (type >= MAX_LO_CRYPT)
|
|
|
|
return -EINVAL;
|
|
|
|
xfer = xfer_funcs[type];
|
|
|
|
if (xfer == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
} else
|
|
|
|
xfer = NULL;
|
|
|
|
|
|
|
|
err = loop_init_xfer(lo, xfer, info);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (lo->lo_offset != info->lo_offset ||
|
|
|
|
lo->lo_sizelimit != info->lo_sizelimit) {
|
|
|
|
lo->lo_offset = info->lo_offset;
|
|
|
|
lo->lo_sizelimit = info->lo_sizelimit;
|
|
|
|
if (figure_loop_size(lo))
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
|
|
|
|
memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
|
|
|
|
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
|
|
|
|
lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
|
|
|
|
|
|
|
|
if (!xfer)
|
|
|
|
xfer = &none_funcs;
|
|
|
|
lo->transfer = xfer->transfer;
|
|
|
|
lo->ioctl = xfer->ioctl;
|
|
|
|
|
2008-02-06 09:36:27 +00:00
|
|
|
if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
|
|
|
|
(info->lo_flags & LO_FLAGS_AUTOCLEAR))
|
|
|
|
lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
|
|
|
|
lo->lo_init[0] = info->lo_init[0];
|
|
|
|
lo->lo_init[1] = info->lo_init[1];
|
|
|
|
if (info->lo_encrypt_key_size) {
|
|
|
|
memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
|
|
|
|
info->lo_encrypt_key_size);
|
2008-11-13 23:38:41 +00:00
|
|
|
lo->lo_key_owner = uid;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
|
|
|
{
|
|
|
|
struct file *file = lo->lo_backing_file;
|
|
|
|
struct kstat stat;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (lo->lo_state != Lo_bound)
|
|
|
|
return -ENXIO;
|
2006-12-08 10:36:55 +00:00
|
|
|
error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->lo_number = lo->lo_number;
|
|
|
|
info->lo_device = huge_encode_dev(stat.dev);
|
|
|
|
info->lo_inode = stat.ino;
|
|
|
|
info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
|
|
|
|
info->lo_offset = lo->lo_offset;
|
|
|
|
info->lo_sizelimit = lo->lo_sizelimit;
|
|
|
|
info->lo_flags = lo->lo_flags;
|
|
|
|
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
|
|
|
|
memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
|
|
|
|
info->lo_encrypt_type =
|
|
|
|
lo->lo_encryption ? lo->lo_encryption->number : 0;
|
|
|
|
if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
|
|
|
|
info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
|
|
|
|
memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
|
|
|
|
lo->lo_encrypt_key_size);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
|
|
|
|
{
|
|
|
|
memset(info64, 0, sizeof(*info64));
|
|
|
|
info64->lo_number = info->lo_number;
|
|
|
|
info64->lo_device = info->lo_device;
|
|
|
|
info64->lo_inode = info->lo_inode;
|
|
|
|
info64->lo_rdevice = info->lo_rdevice;
|
|
|
|
info64->lo_offset = info->lo_offset;
|
|
|
|
info64->lo_sizelimit = 0;
|
|
|
|
info64->lo_encrypt_type = info->lo_encrypt_type;
|
|
|
|
info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
|
|
|
|
info64->lo_flags = info->lo_flags;
|
|
|
|
info64->lo_init[0] = info->lo_init[0];
|
|
|
|
info64->lo_init[1] = info->lo_init[1];
|
|
|
|
if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
|
|
|
|
memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
|
|
|
|
else
|
|
|
|
memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
|
|
|
|
memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
|
|
|
|
{
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->lo_number = info64->lo_number;
|
|
|
|
info->lo_device = info64->lo_device;
|
|
|
|
info->lo_inode = info64->lo_inode;
|
|
|
|
info->lo_rdevice = info64->lo_rdevice;
|
|
|
|
info->lo_offset = info64->lo_offset;
|
|
|
|
info->lo_encrypt_type = info64->lo_encrypt_type;
|
|
|
|
info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
|
|
|
|
info->lo_flags = info64->lo_flags;
|
|
|
|
info->lo_init[0] = info64->lo_init[0];
|
|
|
|
info->lo_init[1] = info64->lo_init[1];
|
|
|
|
if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
|
|
|
|
memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
|
|
|
|
else
|
|
|
|
memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
|
|
|
|
memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
|
|
|
|
|
|
|
|
/* error in case values were truncated */
|
|
|
|
if (info->lo_device != info64->lo_device ||
|
|
|
|
info->lo_rdevice != info64->lo_rdevice ||
|
|
|
|
info->lo_inode != info64->lo_inode ||
|
|
|
|
info->lo_offset != info64->lo_offset)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info info;
|
|
|
|
struct loop_info64 info64;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
|
|
|
|
return -EFAULT;
|
|
|
|
loop_info64_from_old(&info, &info64);
|
|
|
|
return loop_set_status(lo, &info64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info64 info64;
|
|
|
|
|
|
|
|
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
|
|
|
|
return -EFAULT;
|
|
|
|
return loop_set_status(lo, &info64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
|
|
|
|
struct loop_info info;
|
|
|
|
struct loop_info64 info64;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!err)
|
|
|
|
err = loop_get_status(lo, &info64);
|
|
|
|
if (!err)
|
|
|
|
err = loop_info64_to_old(&info64, &info);
|
|
|
|
if (!err && copy_to_user(arg, &info, sizeof(info)))
|
|
|
|
err = -EFAULT;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
|
|
|
|
struct loop_info64 info64;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!err)
|
|
|
|
err = loop_get_status(lo, &info64);
|
|
|
|
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
|
|
|
|
err = -EFAULT;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
sector_t sec;
|
|
|
|
loff_t sz;
|
|
|
|
|
|
|
|
err = -ENXIO;
|
|
|
|
if (unlikely(lo->lo_state != Lo_bound))
|
|
|
|
goto out;
|
|
|
|
err = figure_loop_size(lo);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto out;
|
|
|
|
sec = get_capacity(lo->lo_disk);
|
|
|
|
/* the width of sector_t may be narrow for bit-shift */
|
|
|
|
sz = sec;
|
|
|
|
sz <<= 9;
|
|
|
|
mutex_lock(&bdev->bd_mutex);
|
|
|
|
bd_set_size(bdev, sz);
|
|
|
|
mutex_unlock(&bdev->bd_mutex);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
2008-03-02 14:29:48 +00:00
|
|
|
struct loop_device *lo = bdev->bd_disk->private_data;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
|
loop: fix circular locking in loop_clr_fd()
With CONFIG_PROVE_LOCKING enabled
$ losetup /dev/loop0 file
$ losetup -o 32256 /dev/loop1 /dev/loop0
$ losetup -d /dev/loop1
$ losetup -d /dev/loop0
triggers a [ INFO: possible circular locking dependency detected ]
I think this warning is a false positive.
Open/close on a loop device acquires bd_mutex of the device before
acquiring lo_ctl_mutex of the same device. For ioctl(LOOP_CLR_FD) after
acquiring lo_ctl_mutex, fput on the backing_file might acquire the bd_mutex of
a device, if backing file is a device and this is the last reference to the
file being dropped . But it is guaranteed that it is impossible to have a
circular list of backing devices.(say loop2->loop1->loop0->loop2 is not
possible), which guarantees that this can never deadlock.
So this warning should be suppressed. It is very difficult to annotate lockdep
not to warn here in the correct way. A simple way to silence lockdep could be
to mark the lo_ctl_mutex in ioctl to be a sub class, but this might mask some
other real bugs.
@@ -1164,7 +1164,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Or actually marking the bd_mutex after lo_ctl_mutex as a sub class could be
a better solution.
Luckily it is easy to avoid calling fput on backing file with lo_ctl_mutex
held, so no lockdep annotation is required.
If you do not like the special handling of the lo_ctl_mutex just for the
LOOP_CLR_FD ioctl in lo_ioctl(), the mutex handling could be moved inside
each of the individual ioctl handlers and I could send you another patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-03-24 11:33:41 +00:00
|
|
|
mutex_lock_nested(&lo->lo_ctl_mutex, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case LOOP_SET_FD:
|
2008-03-02 14:29:48 +00:00
|
|
|
err = loop_set_fd(lo, mode, bdev, arg);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case LOOP_CHANGE_FD:
|
2008-03-02 14:29:48 +00:00
|
|
|
err = loop_change_fd(lo, bdev, arg);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case LOOP_CLR_FD:
|
loop: fix circular locking in loop_clr_fd()
With CONFIG_PROVE_LOCKING enabled
$ losetup /dev/loop0 file
$ losetup -o 32256 /dev/loop1 /dev/loop0
$ losetup -d /dev/loop1
$ losetup -d /dev/loop0
triggers a [ INFO: possible circular locking dependency detected ]
I think this warning is a false positive.
Open/close on a loop device acquires bd_mutex of the device before
acquiring lo_ctl_mutex of the same device. For ioctl(LOOP_CLR_FD) after
acquiring lo_ctl_mutex, fput on the backing_file might acquire the bd_mutex of
a device, if backing file is a device and this is the last reference to the
file being dropped . But it is guaranteed that it is impossible to have a
circular list of backing devices.(say loop2->loop1->loop0->loop2 is not
possible), which guarantees that this can never deadlock.
So this warning should be suppressed. It is very difficult to annotate lockdep
not to warn here in the correct way. A simple way to silence lockdep could be
to mark the lo_ctl_mutex in ioctl to be a sub class, but this might mask some
other real bugs.
@@ -1164,7 +1164,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Or actually marking the bd_mutex after lo_ctl_mutex as a sub class could be
a better solution.
Luckily it is easy to avoid calling fput on backing file with lo_ctl_mutex
held, so no lockdep annotation is required.
If you do not like the special handling of the lo_ctl_mutex just for the
LOOP_CLR_FD ioctl in lo_ioctl(), the mutex handling could be moved inside
each of the individual ioctl handlers and I could send you another patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-03-24 11:33:41 +00:00
|
|
|
/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
|
2008-03-02 14:29:48 +00:00
|
|
|
err = loop_clr_fd(lo, bdev);
|
loop: fix circular locking in loop_clr_fd()
With CONFIG_PROVE_LOCKING enabled
$ losetup /dev/loop0 file
$ losetup -o 32256 /dev/loop1 /dev/loop0
$ losetup -d /dev/loop1
$ losetup -d /dev/loop0
triggers a [ INFO: possible circular locking dependency detected ]
I think this warning is a false positive.
Open/close on a loop device acquires bd_mutex of the device before
acquiring lo_ctl_mutex of the same device. For ioctl(LOOP_CLR_FD) after
acquiring lo_ctl_mutex, fput on the backing_file might acquire the bd_mutex of
a device, if backing file is a device and this is the last reference to the
file being dropped . But it is guaranteed that it is impossible to have a
circular list of backing devices.(say loop2->loop1->loop0->loop2 is not
possible), which guarantees that this can never deadlock.
So this warning should be suppressed. It is very difficult to annotate lockdep
not to warn here in the correct way. A simple way to silence lockdep could be
to mark the lo_ctl_mutex in ioctl to be a sub class, but this might mask some
other real bugs.
@@ -1164,7 +1164,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Or actually marking the bd_mutex after lo_ctl_mutex as a sub class could be
a better solution.
Luckily it is easy to avoid calling fput on backing file with lo_ctl_mutex
held, so no lockdep annotation is required.
If you do not like the special handling of the lo_ctl_mutex just for the
LOOP_CLR_FD ioctl in lo_ioctl(), the mutex handling could be moved inside
each of the individual ioctl handlers and I could send you another patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-03-24 11:33:41 +00:00
|
|
|
if (!err)
|
|
|
|
goto out_unlocked;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case LOOP_SET_STATUS:
|
|
|
|
err = loop_set_status_old(lo, (struct loop_info __user *) arg);
|
|
|
|
break;
|
|
|
|
case LOOP_GET_STATUS:
|
|
|
|
err = loop_get_status_old(lo, (struct loop_info __user *) arg);
|
|
|
|
break;
|
|
|
|
case LOOP_SET_STATUS64:
|
|
|
|
err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
|
|
|
|
break;
|
|
|
|
case LOOP_GET_STATUS64:
|
|
|
|
err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
|
|
|
|
break;
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
case LOOP_SET_CAPACITY:
|
|
|
|
err = -EPERM;
|
|
|
|
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
|
|
|
|
err = loop_set_capacity(lo, bdev);
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
|
|
|
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
|
|
|
|
}
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
loop: fix circular locking in loop_clr_fd()
With CONFIG_PROVE_LOCKING enabled
$ losetup /dev/loop0 file
$ losetup -o 32256 /dev/loop1 /dev/loop0
$ losetup -d /dev/loop1
$ losetup -d /dev/loop0
triggers a [ INFO: possible circular locking dependency detected ]
I think this warning is a false positive.
Open/close on a loop device acquires bd_mutex of the device before
acquiring lo_ctl_mutex of the same device. For ioctl(LOOP_CLR_FD) after
acquiring lo_ctl_mutex, fput on the backing_file might acquire the bd_mutex of
a device, if backing file is a device and this is the last reference to the
file being dropped . But it is guaranteed that it is impossible to have a
circular list of backing devices.(say loop2->loop1->loop0->loop2 is not
possible), which guarantees that this can never deadlock.
So this warning should be suppressed. It is very difficult to annotate lockdep
not to warn here in the correct way. A simple way to silence lockdep could be
to mark the lo_ctl_mutex in ioctl to be a sub class, but this might mask some
other real bugs.
@@ -1164,7 +1164,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Or actually marking the bd_mutex after lo_ctl_mutex as a sub class could be
a better solution.
Luckily it is easy to avoid calling fput on backing file with lo_ctl_mutex
held, so no lockdep annotation is required.
If you do not like the special handling of the lo_ctl_mutex just for the
LOOP_CLR_FD ioctl in lo_ioctl(), the mutex handling could be moved inside
each of the individual ioctl handlers and I could send you another patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-03-24 11:33:41 +00:00
|
|
|
|
|
|
|
out_unlocked:
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-08-29 18:06:14 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
struct compat_loop_info {
|
|
|
|
compat_int_t lo_number; /* ioctl r/o */
|
|
|
|
compat_dev_t lo_device; /* ioctl r/o */
|
|
|
|
compat_ulong_t lo_inode; /* ioctl r/o */
|
|
|
|
compat_dev_t lo_rdevice; /* ioctl r/o */
|
|
|
|
compat_int_t lo_offset;
|
|
|
|
compat_int_t lo_encrypt_type;
|
|
|
|
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
|
|
|
|
compat_int_t lo_flags; /* ioctl r/o */
|
|
|
|
char lo_name[LO_NAME_SIZE];
|
|
|
|
unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
|
|
|
|
compat_ulong_t lo_init[2];
|
|
|
|
char reserved[4];
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer 32-bit compatibility structure in userspace to 64-bit loop info
|
|
|
|
* - noinlined to reduce stack space usage in main part of driver
|
|
|
|
*/
|
|
|
|
static noinline int
|
2006-10-10 21:48:27 +00:00
|
|
|
loop_info64_from_compat(const struct compat_loop_info __user *arg,
|
2006-08-29 18:06:14 +00:00
|
|
|
struct loop_info64 *info64)
|
|
|
|
{
|
|
|
|
struct compat_loop_info info;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, arg, sizeof(info)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
memset(info64, 0, sizeof(*info64));
|
|
|
|
info64->lo_number = info.lo_number;
|
|
|
|
info64->lo_device = info.lo_device;
|
|
|
|
info64->lo_inode = info.lo_inode;
|
|
|
|
info64->lo_rdevice = info.lo_rdevice;
|
|
|
|
info64->lo_offset = info.lo_offset;
|
|
|
|
info64->lo_sizelimit = 0;
|
|
|
|
info64->lo_encrypt_type = info.lo_encrypt_type;
|
|
|
|
info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
|
|
|
|
info64->lo_flags = info.lo_flags;
|
|
|
|
info64->lo_init[0] = info.lo_init[0];
|
|
|
|
info64->lo_init[1] = info.lo_init[1];
|
|
|
|
if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
|
|
|
|
memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
|
|
|
|
else
|
|
|
|
memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
|
|
|
|
memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer 64-bit loop info to 32-bit compatibility structure in userspace
|
|
|
|
* - noinlined to reduce stack space usage in main part of driver
|
|
|
|
*/
|
|
|
|
static noinline int
|
|
|
|
loop_info64_to_compat(const struct loop_info64 *info64,
|
|
|
|
struct compat_loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct compat_loop_info info;
|
|
|
|
|
|
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
info.lo_number = info64->lo_number;
|
|
|
|
info.lo_device = info64->lo_device;
|
|
|
|
info.lo_inode = info64->lo_inode;
|
|
|
|
info.lo_rdevice = info64->lo_rdevice;
|
|
|
|
info.lo_offset = info64->lo_offset;
|
|
|
|
info.lo_encrypt_type = info64->lo_encrypt_type;
|
|
|
|
info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
|
|
|
|
info.lo_flags = info64->lo_flags;
|
|
|
|
info.lo_init[0] = info64->lo_init[0];
|
|
|
|
info.lo_init[1] = info64->lo_init[1];
|
|
|
|
if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
|
|
|
|
memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
|
|
|
|
else
|
|
|
|
memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
|
|
|
|
memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
|
|
|
|
|
|
|
|
/* error in case values were truncated */
|
|
|
|
if (info.lo_device != info64->lo_device ||
|
|
|
|
info.lo_rdevice != info64->lo_rdevice ||
|
|
|
|
info.lo_inode != info64->lo_inode ||
|
|
|
|
info.lo_offset != info64->lo_offset ||
|
|
|
|
info.lo_init[0] != info64->lo_init[0] ||
|
|
|
|
info.lo_init[1] != info64->lo_init[1])
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
if (copy_to_user(arg, &info, sizeof(info)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status_compat(struct loop_device *lo,
|
|
|
|
const struct compat_loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info64 info64;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = loop_info64_from_compat(arg, &info64);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return loop_set_status(lo, &info64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status_compat(struct loop_device *lo,
|
|
|
|
struct compat_loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info64 info64;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!err)
|
|
|
|
err = loop_get_status(lo, &info64);
|
|
|
|
if (!err)
|
|
|
|
err = loop_info64_to_compat(&info64, arg);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
2006-08-29 18:06:14 +00:00
|
|
|
{
|
2008-03-02 14:29:48 +00:00
|
|
|
struct loop_device *lo = bdev->bd_disk->private_data;
|
2006-08-29 18:06:14 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
switch(cmd) {
|
|
|
|
case LOOP_SET_STATUS:
|
|
|
|
mutex_lock(&lo->lo_ctl_mutex);
|
|
|
|
err = loop_set_status_compat(
|
|
|
|
lo, (const struct compat_loop_info __user *) arg);
|
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
|
|
|
break;
|
|
|
|
case LOOP_GET_STATUS:
|
|
|
|
mutex_lock(&lo->lo_ctl_mutex);
|
|
|
|
err = loop_get_status_compat(
|
|
|
|
lo, (struct compat_loop_info __user *) arg);
|
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
|
|
|
break;
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
case LOOP_SET_CAPACITY:
|
2006-08-29 18:06:14 +00:00
|
|
|
case LOOP_CLR_FD:
|
|
|
|
case LOOP_GET_STATUS64:
|
|
|
|
case LOOP_SET_STATUS64:
|
|
|
|
arg = (unsigned long) compat_ptr(arg);
|
|
|
|
case LOOP_SET_FD:
|
|
|
|
case LOOP_CHANGE_FD:
|
2008-03-02 14:29:48 +00:00
|
|
|
err = lo_ioctl(bdev, mode, cmd, arg);
|
2006-08-29 18:06:14 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -ENOIOCTLCMD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
static int lo_open(struct block_device *bdev, fmode_t mode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-03-02 14:29:48 +00:00
|
|
|
struct loop_device *lo = bdev->bd_disk->private_data;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_lock(&lo->lo_ctl_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->lo_refcnt++;
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-03-02 14:29:48 +00:00
|
|
|
static int lo_release(struct gendisk *disk, fmode_t mode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-03-02 14:29:48 +00:00
|
|
|
struct loop_device *lo = disk->private_data;
|
2009-04-07 11:48:21 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_lock(&lo->lo_ctl_mutex);
|
2008-02-06 09:36:27 +00:00
|
|
|
|
2008-12-12 13:48:27 +00:00
|
|
|
if (--lo->lo_refcnt)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
|
|
|
|
/*
|
|
|
|
* In autoclear mode, stop the loop thread
|
|
|
|
* and remove configuration after last close.
|
|
|
|
*/
|
2009-04-07 11:48:21 +00:00
|
|
|
err = loop_clr_fd(lo, NULL);
|
|
|
|
if (!err)
|
|
|
|
goto out_unlocked;
|
2008-12-12 13:48:27 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Otherwise keep thread (if running) and config,
|
|
|
|
* but flush possible ongoing bios in thread.
|
|
|
|
*/
|
|
|
|
loop_flush(lo);
|
|
|
|
}
|
2008-02-06 09:36:27 +00:00
|
|
|
|
2008-12-12 13:48:27 +00:00
|
|
|
out:
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
2009-04-07 11:48:21 +00:00
|
|
|
out_unlocked:
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct block_device_operations lo_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
2008-03-02 14:29:48 +00:00
|
|
|
.open = lo_open,
|
|
|
|
.release = lo_release,
|
|
|
|
.ioctl = lo_ioctl,
|
2006-08-29 18:06:14 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-03-02 14:29:48 +00:00
|
|
|
.compat_ioctl = lo_compat_ioctl,
|
2006-08-29 18:06:14 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And now the modules code and kernel interface.
|
|
|
|
*/
|
2007-05-08 07:28:20 +00:00
|
|
|
static int max_loop;
|
2005-04-16 22:20:36 +00:00
|
|
|
module_param(max_loop, int, 0);
|
2007-06-08 20:46:44 +00:00
|
|
|
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
module_param(max_part, int, 0);
|
|
|
|
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
|
|
|
|
|
|
|
|
int loop_register_transfer(struct loop_func_table *funcs)
|
|
|
|
{
|
|
|
|
unsigned int n = funcs->number;
|
|
|
|
|
|
|
|
if (n >= MAX_LO_CRYPT || xfer_funcs[n])
|
|
|
|
return -EINVAL;
|
|
|
|
xfer_funcs[n] = funcs;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int loop_unregister_transfer(int number)
|
|
|
|
{
|
|
|
|
unsigned int n = number;
|
|
|
|
struct loop_device *lo;
|
|
|
|
struct loop_func_table *xfer;
|
|
|
|
|
|
|
|
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
xfer_funcs[n] = NULL;
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
list_for_each_entry(lo, &loop_devices, lo_list) {
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_lock(&lo->lo_ctl_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (lo->lo_encryption == xfer)
|
|
|
|
loop_release_xfer(lo);
|
|
|
|
|
2006-03-23 11:00:38 +00:00
|
|
|
mutex_unlock(&lo->lo_ctl_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(loop_register_transfer);
|
|
|
|
EXPORT_SYMBOL(loop_unregister_transfer);
|
|
|
|
|
2007-06-08 20:46:44 +00:00
|
|
|
static struct loop_device *loop_alloc(int i)
|
2007-05-08 07:28:20 +00:00
|
|
|
{
|
|
|
|
struct loop_device *lo;
|
|
|
|
struct gendisk *disk;
|
|
|
|
|
|
|
|
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
|
|
|
|
if (!lo)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
|
|
|
|
if (!lo->lo_queue)
|
|
|
|
goto out_free_dev;
|
|
|
|
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
disk = lo->lo_disk = alloc_disk(1 << part_shift);
|
2007-05-08 07:28:20 +00:00
|
|
|
if (!disk)
|
|
|
|
goto out_free_queue;
|
|
|
|
|
|
|
|
mutex_init(&lo->lo_ctl_mutex);
|
|
|
|
lo->lo_number = i;
|
|
|
|
lo->lo_thread = NULL;
|
|
|
|
init_waitqueue_head(&lo->lo_event);
|
|
|
|
spin_lock_init(&lo->lo_lock);
|
|
|
|
disk->major = LOOP_MAJOR;
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
disk->first_minor = i << part_shift;
|
2007-05-08 07:28:20 +00:00
|
|
|
disk->fops = &lo_fops;
|
|
|
|
disk->private_data = lo;
|
|
|
|
disk->queue = lo->lo_queue;
|
|
|
|
sprintf(disk->disk_name, "loop%d", i);
|
|
|
|
return lo;
|
|
|
|
|
|
|
|
out_free_queue:
|
|
|
|
blk_cleanup_queue(lo->lo_queue);
|
|
|
|
out_free_dev:
|
|
|
|
kfree(lo);
|
|
|
|
out:
|
2007-05-12 20:23:15 +00:00
|
|
|
return NULL;
|
2007-05-08 07:28:20 +00:00
|
|
|
}
|
|
|
|
|
2007-06-08 20:46:44 +00:00
|
|
|
static void loop_free(struct loop_device *lo)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-05-08 07:28:20 +00:00
|
|
|
blk_cleanup_queue(lo->lo_queue);
|
|
|
|
put_disk(lo->lo_disk);
|
|
|
|
list_del(&lo->lo_list);
|
|
|
|
kfree(lo);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-08 20:46:44 +00:00
|
|
|
static struct loop_device *loop_init_one(int i)
|
|
|
|
{
|
|
|
|
struct loop_device *lo;
|
|
|
|
|
|
|
|
list_for_each_entry(lo, &loop_devices, lo_list) {
|
|
|
|
if (lo->lo_number == i)
|
|
|
|
return lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
lo = loop_alloc(i);
|
|
|
|
if (lo) {
|
|
|
|
add_disk(lo->lo_disk);
|
|
|
|
list_add_tail(&lo->lo_list, &loop_devices);
|
|
|
|
}
|
|
|
|
return lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void loop_del_one(struct loop_device *lo)
|
|
|
|
{
|
|
|
|
del_gendisk(lo->lo_disk);
|
|
|
|
loop_free(lo);
|
|
|
|
}
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
static struct kobject *loop_probe(dev_t dev, int *part, void *data)
|
|
|
|
{
|
2007-05-13 09:52:32 +00:00
|
|
|
struct loop_device *lo;
|
2007-05-12 20:23:15 +00:00
|
|
|
struct kobject *kobj;
|
2007-05-08 07:28:20 +00:00
|
|
|
|
2007-05-13 09:52:32 +00:00
|
|
|
mutex_lock(&loop_devices_mutex);
|
|
|
|
lo = loop_init_one(dev & MINORMASK);
|
2007-05-12 20:23:15 +00:00
|
|
|
kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
|
2007-05-08 07:28:20 +00:00
|
|
|
mutex_unlock(&loop_devices_mutex);
|
|
|
|
|
|
|
|
*part = 0;
|
2007-05-12 20:23:15 +00:00
|
|
|
return kobj;
|
2007-05-08 07:28:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init loop_init(void)
|
|
|
|
{
|
2007-06-08 20:46:44 +00:00
|
|
|
int i, nr;
|
|
|
|
unsigned long range;
|
|
|
|
struct loop_device *lo, *next;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* loop module now has a feature to instantiate underlying device
|
|
|
|
* structure on-demand, provided that there is an access dev node.
|
|
|
|
* However, this will not work well with user space tool that doesn't
|
|
|
|
* know about such "feature". In order to not break any existing
|
|
|
|
* tool, we do the following:
|
|
|
|
*
|
|
|
|
* (1) if max_loop is specified, create that many upfront, and this
|
|
|
|
* also becomes a hard limit.
|
|
|
|
* (2) if max_loop is not specified, create 8 loop device on module
|
|
|
|
* load, user can further extend loop device by create dev node
|
|
|
|
* themselves and have kernel automatically instantiate actual
|
|
|
|
* device on-demand.
|
|
|
|
*/
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
|
|
|
|
part_shift = 0;
|
|
|
|
if (max_part > 0)
|
|
|
|
part_shift = fls(max_part);
|
|
|
|
|
|
|
|
if (max_loop > 1UL << (MINORBITS - part_shift))
|
2007-06-08 20:46:44 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
if (max_loop) {
|
2007-06-08 20:46:44 +00:00
|
|
|
nr = max_loop;
|
|
|
|
range = max_loop;
|
|
|
|
} else {
|
|
|
|
nr = 8;
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
range = 1UL << (MINORBITS - part_shift);
|
2007-06-08 20:46:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (register_blkdev(LOOP_MAJOR, "loop"))
|
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-08 20:46:44 +00:00
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
lo = loop_alloc(i);
|
|
|
|
if (!lo)
|
|
|
|
goto Enomem;
|
|
|
|
list_add_tail(&lo->lo_list, &loop_devices);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-06-08 20:46:44 +00:00
|
|
|
|
|
|
|
/* point of no return */
|
|
|
|
|
|
|
|
list_for_each_entry(lo, &loop_devices, lo_list)
|
|
|
|
add_disk(lo->lo_disk);
|
|
|
|
|
|
|
|
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
|
|
|
|
THIS_MODULE, loop_probe, NULL, NULL);
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
printk(KERN_INFO "loop: module loaded\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2007-06-08 20:46:44 +00:00
|
|
|
|
|
|
|
Enomem:
|
|
|
|
printk(KERN_INFO "loop: out of memory\n");
|
|
|
|
|
|
|
|
list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
|
|
|
|
loop_free(lo);
|
|
|
|
|
|
|
|
unregister_blkdev(LOOP_MAJOR, "loop");
|
|
|
|
return -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
static void __exit loop_exit(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-06-08 20:46:44 +00:00
|
|
|
unsigned long range;
|
2007-05-08 07:28:20 +00:00
|
|
|
struct loop_device *lo, *next;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
|
2007-06-08 20:46:44 +00:00
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
|
|
|
|
loop_del_one(lo);
|
|
|
|
|
2007-06-08 20:46:44 +00:00
|
|
|
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
|
2007-07-17 11:03:46 +00:00
|
|
|
unregister_blkdev(LOOP_MAJOR, "loop");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(loop_init);
|
|
|
|
module_exit(loop_exit);
|
|
|
|
|
|
|
|
#ifndef MODULE
|
|
|
|
static int __init max_loop_setup(char *str)
|
|
|
|
{
|
|
|
|
max_loop = simple_strtol(str, NULL, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("max_loop=", max_loop_setup);
|
|
|
|
#endif
|