2022-04-19 06:33:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2022-04-19 06:33:01 +00:00
|
|
|
* Copyright 1993 by Theodore Ts'o.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/fs.h>
|
2021-05-07 01:02:27 +00:00
|
|
|
#include <linux/pagemap.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/major.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/blkpg.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/slab.h>
|
2006-08-29 18:06:14 +00:00
|
|
|
#include <linux/compat.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/suspend.h>
|
2007-07-17 11:03:35 +00:00
|
|
|
#include <linux/freezer.h>
|
2010-06-02 12:28:52 +00:00
|
|
|
#include <linux/mutex.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/highmem.h>
|
2007-06-04 07:59:47 +00:00
|
|
|
#include <linux/splice.h>
|
2010-08-23 13:16:00 +00:00
|
|
|
#include <linux/sysfs.h>
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
#include <linux/miscdevice.h>
|
2011-08-19 12:50:46 +00:00
|
|
|
#include <linux/falloc.h>
|
2015-04-03 19:21:59 +00:00
|
|
|
#include <linux/uio.h>
|
2018-05-22 17:52:19 +00:00
|
|
|
#include <linux/ioprio.h>
|
2018-12-05 17:10:35 +00:00
|
|
|
#include <linux/blk-cgroup.h>
|
2021-06-29 02:38:21 +00:00
|
|
|
#include <linux/sched/mm.h>
|
block: loop:use kstatfs.f_bsize of backing file to set discard granularity
If backing file's filesystem has implemented ->fallocate(), we think the
loop device can support discard, then pass sb->s_blocksize as
discard_granularity. However, some underlying FS, such as overlayfs,
doesn't set sb->s_blocksize, and causes discard_granularity to be set as
zero, then the warning in __blkdev_issue_discard() is triggered.
Christoph suggested to pass kstatfs.f_bsize as discard granularity, and
this way is fine because kstatfs.f_bsize means 'Optimal transfer block
size', which still matches with definition of discard granularity.
So fix the issue by setting discard_granularity as kstatfs.f_bsize if it
is available, otherwise claims discard isn't supported.
Cc: Christoph Hellwig <hch@lst.de>
Cc: Vivek Goyal <vgoyal@redhat.com>
Reported-by: Pei Zhang <pezhang@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220126035830.296465-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-01-26 03:58:30 +00:00
|
|
|
#include <linux/statfs.h>
|
2022-04-19 06:33:00 +00:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <uapi/linux/loop.h>
|
2018-05-22 17:52:19 +00:00
|
|
|
|
2022-04-19 06:33:00 +00:00
|
|
|
/* Possible states of device */
|
|
|
|
enum {
|
|
|
|
Lo_unbound,
|
|
|
|
Lo_bound,
|
|
|
|
Lo_rundown,
|
|
|
|
Lo_deleting,
|
|
|
|
};
|
2018-05-22 17:52:19 +00:00
|
|
|
|
2022-04-19 06:33:00 +00:00
|
|
|
struct loop_func_table;
|
|
|
|
|
|
|
|
struct loop_device {
|
|
|
|
int lo_number;
|
|
|
|
loff_t lo_offset;
|
|
|
|
loff_t lo_sizelimit;
|
|
|
|
int lo_flags;
|
|
|
|
char lo_file_name[LO_NAME_SIZE];
|
|
|
|
|
|
|
|
struct file * lo_backing_file;
|
|
|
|
struct block_device *lo_device;
|
|
|
|
|
|
|
|
gfp_t old_gfp_mask;
|
|
|
|
|
|
|
|
spinlock_t lo_lock;
|
|
|
|
int lo_state;
|
|
|
|
spinlock_t lo_work_lock;
|
|
|
|
struct workqueue_struct *workqueue;
|
|
|
|
struct work_struct rootcg_work;
|
|
|
|
struct list_head rootcg_cmd_list;
|
|
|
|
struct list_head idle_worker_list;
|
|
|
|
struct rb_root worker_tree;
|
|
|
|
struct timer_list timer;
|
|
|
|
bool use_dio;
|
|
|
|
bool sysfs_inited;
|
|
|
|
|
|
|
|
struct request_queue *lo_queue;
|
|
|
|
struct blk_mq_tag_set tag_set;
|
|
|
|
struct gendisk *lo_disk;
|
|
|
|
struct mutex lo_mutex;
|
|
|
|
bool idr_visible;
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-04-19 06:33:00 +00:00
|
|
|
struct loop_cmd {
|
|
|
|
struct list_head list_entry;
|
|
|
|
bool use_aio; /* use AIO interface to handle I/O */
|
|
|
|
atomic_t ref; /* only for aio */
|
|
|
|
long ret;
|
|
|
|
struct kiocb iocb;
|
|
|
|
struct bio_vec *bvec;
|
|
|
|
struct cgroup_subsys_state *blkcg_css;
|
|
|
|
struct cgroup_subsys_state *memcg_css;
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
|
2023-01-30 21:13:47 +00:00
|
|
|
#define LOOP_DEFAULT_HW_Q_DEPTH 128
|
2021-06-29 02:38:15 +00:00
|
|
|
|
2011-07-31 20:08:04 +00:00
|
|
|
static DEFINE_IDR(loop_index_idr);
|
2018-11-08 13:01:02 +00:00
|
|
|
static DEFINE_MUTEX(loop_ctl_mutex);
|
2021-07-06 14:40:34 +00:00
|
|
|
static DEFINE_MUTEX(loop_validate_mutex);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* loop_global_lock_killable() - take locks for safe loop_validate_file() test
|
|
|
|
*
|
|
|
|
* @lo: struct loop_device
|
|
|
|
* @global: true if @lo is about to bind another "struct loop_device", false otherwise
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -EINTR otherwise.
|
|
|
|
*
|
|
|
|
* Since loop_validate_file() traverses on other "struct loop_device" if
|
|
|
|
* is_loop_device() is true, we need a global lock for serializing concurrent
|
|
|
|
* loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
|
|
|
|
*/
|
|
|
|
static int loop_global_lock_killable(struct loop_device *lo, bool global)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (global) {
|
|
|
|
err = mutex_lock_killable(&loop_validate_mutex);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
err = mutex_lock_killable(&lo->lo_mutex);
|
|
|
|
if (err && global)
|
|
|
|
mutex_unlock(&loop_validate_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* loop_global_unlock() - release locks taken by loop_global_lock_killable()
|
|
|
|
*
|
|
|
|
* @lo: struct loop_device
|
|
|
|
* @global: true if @lo was about to bind another "struct loop_device", false otherwise
|
|
|
|
*/
|
|
|
|
static void loop_global_unlock(struct loop_device *lo, bool global)
|
|
|
|
{
|
|
|
|
mutex_unlock(&lo->lo_mutex);
|
|
|
|
if (global)
|
|
|
|
mutex_unlock(&loop_validate_mutex);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
static int max_part;
|
|
|
|
static int part_shift;
|
|
|
|
|
2011-11-16 08:21:49 +00:00
|
|
|
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-02-21 23:16:50 +00:00
|
|
|
loff_t loopsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Compute loopsize in bytes */
|
2013-02-21 23:16:50 +00:00
|
|
|
loopsize = i_size_read(file->f_mapping->host);
|
|
|
|
if (offset > 0)
|
|
|
|
loopsize -= offset;
|
|
|
|
/* offset is beyond i_size, weird but possible */
|
2011-11-16 08:21:49 +00:00
|
|
|
if (loopsize < 0)
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-11-16 08:21:49 +00:00
|
|
|
if (sizelimit > 0 && sizelimit < loopsize)
|
|
|
|
loopsize = sizelimit;
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Unfortunately, if we want to do I/O on the device,
|
|
|
|
* the number of 512-byte sectors has to fit into a sector_t.
|
|
|
|
*/
|
|
|
|
return loopsize >> 9;
|
|
|
|
}
|
|
|
|
|
2011-11-16 08:21:49 +00:00
|
|
|
static loff_t get_loop_size(struct loop_device *lo, struct file *file)
|
|
|
|
{
|
|
|
|
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
|
|
|
|
}
|
|
|
|
|
2024-01-17 17:59:01 +00:00
|
|
|
/*
|
|
|
|
* We support direct I/O only if lo_offset is aligned with the logical I/O size
|
|
|
|
* of backing device, and the logical block size of loop is bigger than that of
|
|
|
|
* the backing device.
|
|
|
|
*/
|
|
|
|
static bool lo_bdev_can_use_dio(struct loop_device *lo,
|
|
|
|
struct block_device *backing_bdev)
|
|
|
|
{
|
|
|
|
unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
|
|
|
|
|
|
|
|
if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
|
|
|
|
return false;
|
|
|
|
if (lo->lo_offset & (sb_bsize - 1))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-08-17 02:31:49 +00:00
|
|
|
static void __loop_update_dio(struct loop_device *lo, bool dio)
|
|
|
|
{
|
|
|
|
struct file *file = lo->lo_backing_file;
|
2024-01-17 17:59:01 +00:00
|
|
|
struct inode *inode = file->f_mapping->host;
|
|
|
|
struct block_device *backing_bdev = NULL;
|
2015-08-17 02:31:49 +00:00
|
|
|
bool use_dio;
|
|
|
|
|
2024-01-17 17:59:01 +00:00
|
|
|
if (S_ISBLK(inode->i_mode))
|
|
|
|
backing_bdev = I_BDEV(inode);
|
|
|
|
else if (inode->i_sb->s_bdev)
|
|
|
|
backing_bdev = inode->i_sb->s_bdev;
|
2015-08-17 02:31:49 +00:00
|
|
|
|
2024-01-17 17:59:01 +00:00
|
|
|
use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
|
|
|
|
(!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
|
2015-08-17 02:31:49 +00:00
|
|
|
|
|
|
|
if (lo->use_dio == use_dio)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* flush dirty pages before changing direct IO */
|
|
|
|
vfs_fsync(file, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The flag of LO_FLAGS_DIRECT_IO is handled similarly with
|
|
|
|
* LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
|
|
|
|
* will get updated by ioctl(LOOP_GET_STATUS)
|
|
|
|
*/
|
2020-03-10 13:06:54 +00:00
|
|
|
if (lo->lo_state == Lo_bound)
|
|
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
2015-08-17 02:31:49 +00:00
|
|
|
lo->use_dio = use_dio;
|
2024-06-27 12:49:11 +00:00
|
|
|
if (use_dio)
|
2015-08-17 02:31:49 +00:00
|
|
|
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
|
2024-06-27 12:49:11 +00:00
|
|
|
else
|
2015-08-17 02:31:49 +00:00
|
|
|
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
|
2020-03-10 13:06:54 +00:00
|
|
|
if (lo->lo_state == Lo_bound)
|
|
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
2015-08-17 02:31:49 +00:00
|
|
|
}
|
|
|
|
|
2020-05-13 13:38:37 +00:00
|
|
|
/**
|
|
|
|
* loop_set_size() - sets device size and notifies userspace
|
|
|
|
* @lo: struct loop_device to set the size for
|
|
|
|
* @size: new size of the loop device
|
|
|
|
*
|
|
|
|
* Callers must validate that the size passed into this function fits into
|
|
|
|
* a sector_t, eg using loop_validate_size()
|
|
|
|
*/
|
|
|
|
static void loop_set_size(struct loop_device *lo, loff_t size)
|
|
|
|
{
|
2020-11-16 14:56:56 +00:00
|
|
|
if (!set_capacity_and_notify(lo->lo_disk, size))
|
2020-11-16 14:56:53 +00:00
|
|
|
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
2020-05-13 13:38:37 +00:00
|
|
|
}
|
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-04-07 16:23:29 +00:00
|
|
|
struct iov_iter i;
|
2005-04-16 22:20:36 +00:00
|
|
|
ssize_t bw;
|
2015-04-03 19:21:59 +00:00
|
|
|
|
2022-09-16 00:25:47 +00:00
|
|
|
iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-05-27 08:16:52 +00:00
|
|
|
bw = vfs_iter_write(file, &i, ppos, 0);
|
2015-04-07 16:23:29 +00:00
|
|
|
|
|
|
|
if (likely(bw == bvec->bv_len))
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2015-04-07 16:23:29 +00:00
|
|
|
|
|
|
|
printk_ratelimited(KERN_ERR
|
|
|
|
"loop: Write error at byte offset %llu, length %i.\n",
|
|
|
|
(unsigned long long)*ppos, bvec->bv_len);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (bw >= 0)
|
|
|
|
bw = -EIO;
|
|
|
|
return bw;
|
|
|
|
}
|
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
static int lo_write_simple(struct loop_device *lo, struct request *rq,
|
|
|
|
loff_t pos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-04-07 16:23:29 +00:00
|
|
|
struct bio_vec bvec;
|
|
|
|
struct req_iterator iter;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
rq_for_each_segment(bvec, rq, iter) {
|
|
|
|
ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
static int lo_read_simple(struct loop_device *lo, struct request *rq,
|
|
|
|
loff_t pos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-04-07 16:23:29 +00:00
|
|
|
struct bio_vec bvec;
|
|
|
|
struct req_iterator iter;
|
|
|
|
struct iov_iter i;
|
|
|
|
ssize_t len;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
rq_for_each_segment(bvec, rq, iter) {
|
2022-09-16 00:25:47 +00:00
|
|
|
iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
|
2017-05-27 08:16:51 +00:00
|
|
|
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
|
2015-04-07 16:23:29 +00:00
|
|
|
if (len < 0)
|
|
|
|
return len;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
flush_dcache_page(bvec.bv_page);
|
2007-06-12 19:20:37 +00:00
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
if (len != bvec.bv_len) {
|
|
|
|
struct bio *bio;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-07 16:23:29 +00:00
|
|
|
__rq_for_each_bio(bio, rq)
|
|
|
|
zero_fill_bio(bio);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2007-06-12 19:20:37 +00:00
|
|
|
}
|
|
|
|
|
loop: Disable fallocate() zero and discard if not supported
If fallcate is implemented but zero and discard operations are not
supported by the filesystem the backing file is on we continue to fill
dmesg with errors from the blk_mq_end_request() since each time we call
fallocate() on the loop device the EOPNOTSUPP error from lo_fallocate()
ends up propagated into the block layer. In the end syscall succeeds
since the blkdev_issue_zeroout() falls back to writing zeroes which
makes the errors even more misleading and confusing.
How to reproduce:
1. make sure /tmp is mounted as tmpfs
2. dd if=/dev/zero of=/tmp/disk.img bs=1M count=100
3. losetup /dev/loop0 /tmp/disk.img
4. mkfs.ext2 /dev/loop0
5. dmesg |tail
[710690.898214] operation not supported error, dev loop0, sector 204672 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.898279] operation not supported error, dev loop0, sector 522 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.898603] operation not supported error, dev loop0, sector 16906 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.898917] operation not supported error, dev loop0, sector 32774 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.899218] operation not supported error, dev loop0, sector 49674 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.899484] operation not supported error, dev loop0, sector 65542 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.899743] operation not supported error, dev loop0, sector 82442 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.900015] operation not supported error, dev loop0, sector 98310 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.900276] operation not supported error, dev loop0, sector 115210 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.900546] operation not supported error, dev loop0, sector 131078 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
This patch changes the lo_fallocate() to clear the flags for zero and
discard operations if we get EOPNOTSUPP from the backing file fallocate
callback, that way we at least stop spewing errors after the first
unsuccessful try.
CC: Jan Kara <jack@suse.cz>
Signed-off-by: Cyril Hrubis <chrubis@suse.cz>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20240613163817.22640-1-chrubis@suse.cz
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-13 16:38:17 +00:00
|
|
|
static void loop_clear_limits(struct loop_device *lo, int mode)
|
|
|
|
{
|
|
|
|
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
|
|
|
|
|
|
|
|
if (mode & FALLOC_FL_ZERO_RANGE)
|
|
|
|
lim.max_write_zeroes_sectors = 0;
|
|
|
|
|
|
|
|
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
|
|
|
lim.max_hw_discard_sectors = 0;
|
|
|
|
lim.discard_granularity = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_limits_commit_update(lo->lo_queue, &lim);
|
|
|
|
}
|
|
|
|
|
2019-10-31 03:29:48 +00:00
|
|
|
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
|
|
|
|
int mode)
|
2014-12-31 13:22:59 +00:00
|
|
|
{
|
|
|
|
/*
|
2019-10-31 03:29:48 +00:00
|
|
|
* We use fallocate to manipulate the space mappings used by the image
|
2021-10-19 07:56:39 +00:00
|
|
|
* a.k.a. discard/zerorange.
|
2014-12-31 13:22:59 +00:00
|
|
|
*/
|
|
|
|
struct file *file = lo->lo_backing_file;
|
|
|
|
int ret;
|
|
|
|
|
2019-10-31 03:29:48 +00:00
|
|
|
mode |= FALLOC_FL_KEEP_SIZE;
|
|
|
|
|
2022-04-15 04:52:55 +00:00
|
|
|
if (!bdev_max_discard_sectors(lo->lo_device))
|
|
|
|
return -EOPNOTSUPP;
|
2014-12-31 13:22:59 +00:00
|
|
|
|
|
|
|
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
|
|
|
|
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
|
2022-04-15 04:52:55 +00:00
|
|
|
return -EIO;
|
loop: Disable fallocate() zero and discard if not supported
If fallcate is implemented but zero and discard operations are not
supported by the filesystem the backing file is on we continue to fill
dmesg with errors from the blk_mq_end_request() since each time we call
fallocate() on the loop device the EOPNOTSUPP error from lo_fallocate()
ends up propagated into the block layer. In the end syscall succeeds
since the blkdev_issue_zeroout() falls back to writing zeroes which
makes the errors even more misleading and confusing.
How to reproduce:
1. make sure /tmp is mounted as tmpfs
2. dd if=/dev/zero of=/tmp/disk.img bs=1M count=100
3. losetup /dev/loop0 /tmp/disk.img
4. mkfs.ext2 /dev/loop0
5. dmesg |tail
[710690.898214] operation not supported error, dev loop0, sector 204672 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.898279] operation not supported error, dev loop0, sector 522 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.898603] operation not supported error, dev loop0, sector 16906 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.898917] operation not supported error, dev loop0, sector 32774 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.899218] operation not supported error, dev loop0, sector 49674 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.899484] operation not supported error, dev loop0, sector 65542 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.899743] operation not supported error, dev loop0, sector 82442 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.900015] operation not supported error, dev loop0, sector 98310 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.900276] operation not supported error, dev loop0, sector 115210 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
[710690.900546] operation not supported error, dev loop0, sector 131078 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0
This patch changes the lo_fallocate() to clear the flags for zero and
discard operations if we get EOPNOTSUPP from the backing file fallocate
callback, that way we at least stop spewing errors after the first
unsuccessful try.
CC: Jan Kara <jack@suse.cz>
Signed-off-by: Cyril Hrubis <chrubis@suse.cz>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20240613163817.22640-1-chrubis@suse.cz
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-13 16:38:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We initially configure the limits in a hope that fallocate is
|
|
|
|
* supported and clear them here if that turns out not to be true.
|
|
|
|
*/
|
|
|
|
if (unlikely(ret == -EOPNOTSUPP))
|
|
|
|
loop_clear_limits(lo, mode);
|
|
|
|
|
2014-12-31 13:22:59 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lo_req_flush(struct loop_device *lo, struct request *rq)
|
|
|
|
{
|
2022-02-15 21:33:09 +00:00
|
|
|
int ret = vfs_fsync(lo->lo_backing_file, 0);
|
2014-12-31 13:22:59 +00:00
|
|
|
if (unlikely(ret && ret != -EINVAL))
|
|
|
|
ret = -EIO;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-20 14:03:02 +00:00
|
|
|
static void lo_complete_rq(struct request *rq)
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
{
|
2017-04-20 14:03:02 +00:00
|
|
|
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
2018-04-13 22:25:57 +00:00
|
|
|
blk_status_t ret = BLK_STS_OK;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
2018-04-13 22:25:57 +00:00
|
|
|
if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
|
|
|
|
req_op(rq) != REQ_OP_READ) {
|
|
|
|
if (cmd->ret < 0)
|
2020-04-03 14:43:03 +00:00
|
|
|
ret = errno_to_blk_status(cmd->ret);
|
2018-04-13 22:25:57 +00:00
|
|
|
goto end_io;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
}
|
2017-04-20 14:03:02 +00:00
|
|
|
|
2018-04-13 22:25:57 +00:00
|
|
|
/*
|
|
|
|
* Short READ - if we got some data, advance our request and
|
|
|
|
* retry it. If we got no data, end the rest with EIO.
|
|
|
|
*/
|
|
|
|
if (cmd->ret) {
|
|
|
|
blk_update_request(rq, BLK_STS_OK, cmd->ret);
|
|
|
|
cmd->ret = 0;
|
|
|
|
blk_mq_requeue_request(rq, true);
|
|
|
|
} else {
|
|
|
|
if (cmd->use_aio) {
|
|
|
|
struct bio *bio = rq->bio;
|
|
|
|
|
|
|
|
while (bio) {
|
|
|
|
zero_fill_bio(bio);
|
|
|
|
bio = bio->bi_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = BLK_STS_IOERR;
|
|
|
|
end_io:
|
|
|
|
blk_mq_end_request(rq, ret);
|
|
|
|
}
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
}
|
|
|
|
|
2017-09-01 18:15:17 +00:00
|
|
|
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
|
|
|
|
{
|
2018-04-13 22:24:29 +00:00
|
|
|
struct request *rq = blk_mq_rq_from_pdu(cmd);
|
|
|
|
|
2017-09-01 18:15:17 +00:00
|
|
|
if (!atomic_dec_and_test(&cmd->ref))
|
|
|
|
return;
|
|
|
|
kfree(cmd->bvec);
|
|
|
|
cmd->bvec = NULL;
|
2020-06-11 06:44:47 +00:00
|
|
|
if (likely(!blk_should_fake_timeout(rq->q)))
|
|
|
|
blk_mq_complete_request(rq);
|
2017-09-01 18:15:17 +00:00
|
|
|
}
|
|
|
|
|
2021-10-21 15:22:35 +00:00
|
|
|
static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
{
|
|
|
|
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
|
|
|
|
|
2017-04-20 14:03:02 +00:00
|
|
|
cmd->ret = ret;
|
2017-09-01 18:15:17 +00:00
|
|
|
lo_rw_aio_do_completion(cmd);
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
2022-09-16 00:25:47 +00:00
|
|
|
loff_t pos, int rw)
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
{
|
|
|
|
struct iov_iter iter;
|
2019-02-15 11:13:17 +00:00
|
|
|
struct req_iterator rq_iter;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
struct bio_vec *bvec;
|
2018-04-13 22:24:29 +00:00
|
|
|
struct request *rq = blk_mq_rq_from_pdu(cmd);
|
2017-09-01 05:09:46 +00:00
|
|
|
struct bio *bio = rq->bio;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
struct file *file = lo->lo_backing_file;
|
2019-02-15 11:13:17 +00:00
|
|
|
struct bio_vec tmp;
|
2017-09-01 05:09:46 +00:00
|
|
|
unsigned int offset;
|
2019-02-15 11:13:17 +00:00
|
|
|
int nr_bvec = 0;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-02-15 11:13:17 +00:00
|
|
|
rq_for_each_bvec(tmp, rq, rq_iter)
|
|
|
|
nr_bvec++;
|
|
|
|
|
2017-09-01 05:09:46 +00:00
|
|
|
if (rq->bio != rq->biotail) {
|
|
|
|
|
2019-02-15 11:13:17 +00:00
|
|
|
bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
GFP_NOIO);
|
2017-09-01 05:09:46 +00:00
|
|
|
if (!bvec)
|
|
|
|
return -EIO;
|
|
|
|
cmd->bvec = bvec;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The bios of the request may be started from the middle of
|
|
|
|
* the 'bvec' because of bio splitting, so we can't directly
|
2019-02-15 11:13:17 +00:00
|
|
|
* copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
|
2017-09-01 05:09:46 +00:00
|
|
|
* API will take care of all details for us.
|
|
|
|
*/
|
2019-02-15 11:13:17 +00:00
|
|
|
rq_for_each_bvec(tmp, rq, rq_iter) {
|
2017-09-01 05:09:46 +00:00
|
|
|
*bvec = tmp;
|
|
|
|
bvec++;
|
|
|
|
}
|
|
|
|
bvec = cmd->bvec;
|
|
|
|
offset = 0;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Same here, this bio may be started from the middle of the
|
|
|
|
* 'bvec' because of bio splitting, so offset from the bvec
|
|
|
|
* must be passed to iov iterator
|
|
|
|
*/
|
|
|
|
offset = bio->bi_iter.bi_bvec_done;
|
|
|
|
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
|
|
|
|
}
|
2017-09-01 18:15:17 +00:00
|
|
|
atomic_set(&cmd->ref, 2);
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
2019-06-26 13:49:28 +00:00
|
|
|
iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
|
2017-09-01 05:09:46 +00:00
|
|
|
iter.iov_offset = offset;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
|
|
|
cmd->iocb.ki_pos = pos;
|
|
|
|
cmd->iocb.ki_filp = file;
|
|
|
|
cmd->iocb.ki_complete = lo_rw_aio_complete;
|
|
|
|
cmd->iocb.ki_flags = IOCB_DIRECT;
|
2018-05-22 17:52:19 +00:00
|
|
|
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
2022-09-16 00:25:47 +00:00
|
|
|
if (rw == ITER_SOURCE)
|
2023-08-28 15:13:18 +00:00
|
|
|
ret = file->f_op->write_iter(&cmd->iocb, &iter);
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
else
|
2023-08-28 15:13:18 +00:00
|
|
|
ret = file->f_op->read_iter(&cmd->iocb, &iter);
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
2017-09-01 18:15:17 +00:00
|
|
|
lo_rw_aio_do_completion(cmd);
|
|
|
|
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
if (ret != -EIOCBQUEUED)
|
2021-10-21 15:22:35 +00:00
|
|
|
lo_rw_aio_complete(&cmd->iocb, ret);
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-04 14:10:01 +00:00
|
|
|
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
{
|
|
|
|
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
2016-08-04 14:10:01 +00:00
|
|
|
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* lo_write_simple and lo_read_simple should have been covered
|
|
|
|
* by io submit style function like lo_rw_aio(), one blocker
|
|
|
|
* is that lo_read_simple() need to call flush_dcache_page after
|
|
|
|
* the page is written from kernel, and it isn't easy to handle
|
|
|
|
* this in io submit style function which submits all segments
|
|
|
|
* of the req at one time. And direct read IO doesn't need to
|
|
|
|
* run flush_dcache_page().
|
|
|
|
*/
|
2016-08-04 14:10:01 +00:00
|
|
|
switch (req_op(rq)) {
|
|
|
|
case REQ_OP_FLUSH:
|
|
|
|
return lo_req_flush(lo, rq);
|
2017-04-05 17:21:15 +00:00
|
|
|
case REQ_OP_WRITE_ZEROES:
|
2019-10-31 03:29:48 +00:00
|
|
|
/*
|
|
|
|
* If the caller doesn't want deallocation, call zeroout to
|
|
|
|
* write zeroes the range. Otherwise, punch them out.
|
|
|
|
*/
|
|
|
|
return lo_fallocate(lo, rq, pos,
|
|
|
|
(rq->cmd_flags & REQ_NOUNMAP) ?
|
|
|
|
FALLOC_FL_ZERO_RANGE :
|
|
|
|
FALLOC_FL_PUNCH_HOLE);
|
|
|
|
case REQ_OP_DISCARD:
|
|
|
|
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
|
2016-08-04 14:10:01 +00:00
|
|
|
case REQ_OP_WRITE:
|
2021-10-19 07:56:39 +00:00
|
|
|
if (cmd->use_aio)
|
2022-09-16 00:25:47 +00:00
|
|
|
return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
|
2014-12-31 13:23:00 +00:00
|
|
|
else
|
2016-08-04 14:10:01 +00:00
|
|
|
return lo_write_simple(lo, rq, pos);
|
|
|
|
case REQ_OP_READ:
|
2021-10-19 07:56:39 +00:00
|
|
|
if (cmd->use_aio)
|
2022-09-16 00:25:47 +00:00
|
|
|
return lo_rw_aio(lo, cmd, pos, ITER_DEST);
|
2015-04-07 16:23:29 +00:00
|
|
|
else
|
2016-08-04 14:10:01 +00:00
|
|
|
return lo_read_simple(lo, rq, pos);
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return -EIO;
|
2015-04-07 16:23:29 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-08-17 02:31:49 +00:00
|
|
|
static inline void loop_update_dio(struct loop_device *lo)
|
|
|
|
{
|
2020-04-30 14:41:33 +00:00
|
|
|
__loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
|
|
|
|
lo->use_dio);
|
2015-08-17 02:31:49 +00:00
|
|
|
}
|
|
|
|
|
2021-06-24 12:32:40 +00:00
|
|
|
static void loop_reread_partitions(struct loop_device *lo)
|
2015-05-06 04:26:24 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2021-06-24 12:32:40 +00:00
|
|
|
mutex_lock(&lo->lo_disk->open_mutex);
|
|
|
|
rc = bdev_disk_changed(lo->lo_disk, false);
|
|
|
|
mutex_unlock(&lo->lo_disk->open_mutex);
|
2015-05-06 04:26:24 +00:00
|
|
|
if (rc)
|
|
|
|
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
|
|
|
|
__func__, lo->lo_number, lo->lo_file_name, rc);
|
|
|
|
}
|
|
|
|
|
2018-05-07 15:37:58 +00:00
|
|
|
static inline int is_loop_device(struct file *file)
|
|
|
|
{
|
|
|
|
struct inode *i = file->f_mapping->host;
|
|
|
|
|
2021-02-01 00:23:55 +00:00
|
|
|
return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
|
2018-05-07 15:37:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int loop_validate_file(struct file *file, struct block_device *bdev)
|
|
|
|
{
|
|
|
|
struct inode *inode = file->f_mapping->host;
|
|
|
|
struct file *f = file;
|
|
|
|
|
|
|
|
/* Avoid recursion */
|
|
|
|
while (is_loop_device(f)) {
|
|
|
|
struct loop_device *l;
|
|
|
|
|
2021-07-06 14:40:34 +00:00
|
|
|
lockdep_assert_held(&loop_validate_mutex);
|
2020-11-23 12:38:40 +00:00
|
|
|
if (f->f_mapping->host->i_rdev == bdev->bd_dev)
|
2018-05-07 15:37:58 +00:00
|
|
|
return -EBADF;
|
|
|
|
|
2020-11-23 12:38:40 +00:00
|
|
|
l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
|
2021-07-06 14:40:34 +00:00
|
|
|
if (l->lo_state != Lo_bound)
|
2018-05-07 15:37:58 +00:00
|
|
|
return -EINVAL;
|
2021-07-06 14:40:34 +00:00
|
|
|
/* Order wrt setting lo->lo_backing_file in loop_configure(). */
|
|
|
|
rmb();
|
2018-05-07 15:37:58 +00:00
|
|
|
f = l->lo_backing_file;
|
|
|
|
}
|
|
|
|
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* loop_change_fd switched the backing store of a loopback device to
|
|
|
|
* a new file. This is useful for operating system installers to free up
|
|
|
|
* the original file and in High Availability environments to switch to
|
|
|
|
* an alternative location for the content in case of server meltdown.
|
|
|
|
* This can only work if the loop device is used read-only, and if the
|
|
|
|
* new backing store is the same size and type as the old backing store.
|
|
|
|
*/
|
2008-03-02 14:29:48 +00:00
|
|
|
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|
|
|
unsigned int arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-07-06 14:40:34 +00:00
|
|
|
struct file *file = fget(arg);
|
|
|
|
struct file *old_file;
|
|
|
|
int error;
|
|
|
|
bool partscan;
|
|
|
|
bool is_loop;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-07-06 14:40:34 +00:00
|
|
|
if (!file)
|
|
|
|
return -EBADF;
|
2022-03-30 05:29:14 +00:00
|
|
|
|
|
|
|
/* suppress uevents while reconfiguring the device */
|
|
|
|
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
|
|
|
|
|
2021-07-06 14:40:34 +00:00
|
|
|
is_loop = is_loop_device(file);
|
|
|
|
error = loop_global_lock_killable(lo, is_loop);
|
2018-11-08 13:01:11 +00:00
|
|
|
if (error)
|
2021-07-06 14:40:34 +00:00
|
|
|
goto out_putf;
|
2005-04-16 22:20:36 +00:00
|
|
|
error = -ENXIO;
|
|
|
|
if (lo->lo_state != Lo_bound)
|
2018-11-08 13:01:15 +00:00
|
|
|
goto out_err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* the loop device has to be read-only */
|
|
|
|
error = -EINVAL;
|
|
|
|
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
|
2018-11-08 13:01:15 +00:00
|
|
|
goto out_err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-07 15:37:58 +00:00
|
|
|
error = loop_validate_file(file, bdev);
|
|
|
|
if (error)
|
2018-11-08 13:01:15 +00:00
|
|
|
goto out_err;
|
2018-05-07 15:37:58 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
old_file = lo->lo_backing_file;
|
|
|
|
|
|
|
|
error = -EINVAL;
|
|
|
|
|
|
|
|
/* size of the new backing store needs to be the same */
|
|
|
|
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
2018-11-08 13:01:15 +00:00
|
|
|
goto out_err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* and ... switch */
|
2023-08-11 10:08:19 +00:00
|
|
|
disk_force_media_change(lo->lo_disk);
|
2017-08-24 07:03:44 +00:00
|
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
|
|
|
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
|
|
|
|
lo->lo_backing_file = file;
|
|
|
|
lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
|
|
|
|
mapping_set_gfp_mask(file->f_mapping,
|
|
|
|
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
|
|
|
loop_update_dio(lo);
|
|
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
2018-11-08 13:01:13 +00:00
|
|
|
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
|
2021-07-06 14:40:34 +00:00
|
|
|
loop_global_unlock(lo, is_loop);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush loop_validate_file() before fput(), for l->lo_backing_file
|
|
|
|
* might be pointing at old_file which might be the last reference.
|
|
|
|
*/
|
|
|
|
if (!is_loop) {
|
|
|
|
mutex_lock(&loop_validate_mutex);
|
|
|
|
mutex_unlock(&loop_validate_mutex);
|
|
|
|
}
|
2018-11-08 13:01:15 +00:00
|
|
|
/*
|
2021-01-26 14:46:30 +00:00
|
|
|
* We must drop file reference outside of lo_mutex as dropping
|
2021-05-25 06:12:56 +00:00
|
|
|
* the file ref can take open_mutex which creates circular locking
|
2018-11-08 13:01:15 +00:00
|
|
|
* dependency.
|
|
|
|
*/
|
|
|
|
fput(old_file);
|
2018-11-08 13:01:13 +00:00
|
|
|
if (partscan)
|
2021-06-24 12:32:40 +00:00
|
|
|
loop_reread_partitions(lo);
|
2022-03-30 05:29:14 +00:00
|
|
|
|
|
|
|
error = 0;
|
|
|
|
done:
|
|
|
|
/* enable and uncork uevent now that we are done */
|
|
|
|
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-11-08 13:01:15 +00:00
|
|
|
out_err:
|
2021-07-06 14:40:34 +00:00
|
|
|
loop_global_unlock(lo, is_loop);
|
|
|
|
out_putf:
|
|
|
|
fput(file);
|
2022-03-30 05:29:14 +00:00
|
|
|
goto done;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-08-23 13:16:00 +00:00
|
|
|
/* loop sysfs attributes */
|
|
|
|
|
|
|
|
static ssize_t loop_attr_show(struct device *dev, char *page,
|
|
|
|
ssize_t (*callback)(struct loop_device *, char *))
|
|
|
|
{
|
2011-07-31 20:08:04 +00:00
|
|
|
struct gendisk *disk = dev_to_disk(dev);
|
|
|
|
struct loop_device *lo = disk->private_data;
|
2010-08-23 13:16:00 +00:00
|
|
|
|
2011-07-31 20:08:04 +00:00
|
|
|
return callback(lo, page);
|
2010-08-23 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#define LOOP_ATTR_RO(_name) \
|
|
|
|
static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
|
|
|
|
static ssize_t loop_attr_do_show_##_name(struct device *d, \
|
|
|
|
struct device_attribute *attr, char *b) \
|
|
|
|
{ \
|
|
|
|
return loop_attr_show(d, b, loop_attr_##_name##_show); \
|
|
|
|
} \
|
|
|
|
static struct device_attribute loop_attr_##_name = \
|
2018-05-24 19:38:59 +00:00
|
|
|
__ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
|
2010-08-23 13:16:00 +00:00
|
|
|
|
|
|
|
static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
|
|
|
|
{
|
|
|
|
ssize_t ret;
|
|
|
|
char *p = NULL;
|
|
|
|
|
2011-07-31 20:21:35 +00:00
|
|
|
spin_lock_irq(&lo->lo_lock);
|
2010-08-23 13:16:00 +00:00
|
|
|
if (lo->lo_backing_file)
|
2015-06-19 08:29:13 +00:00
|
|
|
p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
|
2011-07-31 20:21:35 +00:00
|
|
|
spin_unlock_irq(&lo->lo_lock);
|
2010-08-23 13:16:00 +00:00
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(p))
|
|
|
|
ret = PTR_ERR(p);
|
|
|
|
else {
|
|
|
|
ret = strlen(p);
|
|
|
|
memmove(buf, p, ret);
|
|
|
|
buf[ret++] = '\n';
|
|
|
|
buf[ret] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
|
|
|
|
{
|
2022-02-15 21:33:07 +00:00
|
|
|
return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset);
|
2010-08-23 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
|
|
|
|
{
|
2022-02-15 21:33:07 +00:00
|
|
|
return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
|
2010-08-23 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
|
|
|
|
{
|
|
|
|
int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
|
|
|
|
|
2022-02-15 21:33:07 +00:00
|
|
|
return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0");
|
2010-08-23 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
loop: always allow userspace partitions and optionally support automatic scanning
Automatic partition scanning can be requested individually per loop
device during its setup by setting LO_FLAGS_PARTSCAN. By default, no
partition tables are scanned.
Userspace can now always add and remove partitions from all loop
devices, regardless if the in-kernel partition scanner is enabled or
not.
The needed partition minor numbers are allocated from the extended
minors space, the main loop device numbers will continue to match the
loop minors, regardless of the number of partitions used.
# grep . /sys/class/block/loop1/loop/*
/sys/block/loop1/loop/autoclear:0
/sys/block/loop1/loop/backing_file:/home/kay/data/stuff/part.img
/sys/block/loop1/loop/offset:0
/sys/block/loop1/loop/partscan:1
/sys/block/loop1/loop/sizelimit:0
# ls -l /dev/loop*
brw-rw---- 1 root disk 7, 0 Aug 14 20:22 /dev/loop0
brw-rw---- 1 root disk 7, 1 Aug 14 20:23 /dev/loop1
brw-rw---- 1 root disk 259, 0 Aug 14 20:23 /dev/loop1p1
brw-rw---- 1 root disk 259, 1 Aug 14 20:23 /dev/loop1p2
brw-rw---- 1 root disk 7, 99 Aug 14 20:23 /dev/loop99
brw-rw---- 1 root disk 259, 2 Aug 14 20:23 /dev/loop99p1
brw-rw---- 1 root disk 259, 3 Aug 14 20:23 /dev/loop99p2
crw------T 1 root root 10, 237 Aug 14 20:22 /dev/loop-control
Cc: Karel Zak <kzak@redhat.com>
Cc: Davidlohr Bueso <dave@gnu.org>
Acked-By: Tejun Heo <tj@kernel.org>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-08-23 18:12:04 +00:00
|
|
|
static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
|
|
|
|
{
|
|
|
|
int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
|
|
|
|
|
2022-02-15 21:33:07 +00:00
|
|
|
return sysfs_emit(buf, "%s\n", partscan ? "1" : "0");
|
loop: always allow userspace partitions and optionally support automatic scanning
Automatic partition scanning can be requested individually per loop
device during its setup by setting LO_FLAGS_PARTSCAN. By default, no
partition tables are scanned.
Userspace can now always add and remove partitions from all loop
devices, regardless if the in-kernel partition scanner is enabled or
not.
The needed partition minor numbers are allocated from the extended
minors space, the main loop device numbers will continue to match the
loop minors, regardless of the number of partitions used.
# grep . /sys/class/block/loop1/loop/*
/sys/block/loop1/loop/autoclear:0
/sys/block/loop1/loop/backing_file:/home/kay/data/stuff/part.img
/sys/block/loop1/loop/offset:0
/sys/block/loop1/loop/partscan:1
/sys/block/loop1/loop/sizelimit:0
# ls -l /dev/loop*
brw-rw---- 1 root disk 7, 0 Aug 14 20:22 /dev/loop0
brw-rw---- 1 root disk 7, 1 Aug 14 20:23 /dev/loop1
brw-rw---- 1 root disk 259, 0 Aug 14 20:23 /dev/loop1p1
brw-rw---- 1 root disk 259, 1 Aug 14 20:23 /dev/loop1p2
brw-rw---- 1 root disk 7, 99 Aug 14 20:23 /dev/loop99
brw-rw---- 1 root disk 259, 2 Aug 14 20:23 /dev/loop99p1
brw-rw---- 1 root disk 259, 3 Aug 14 20:23 /dev/loop99p2
crw------T 1 root root 10, 237 Aug 14 20:22 /dev/loop-control
Cc: Karel Zak <kzak@redhat.com>
Cc: Davidlohr Bueso <dave@gnu.org>
Acked-By: Tejun Heo <tj@kernel.org>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-08-23 18:12:04 +00:00
|
|
|
}
|
|
|
|
|
2015-08-17 02:31:49 +00:00
|
|
|
static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
|
|
|
|
{
|
|
|
|
int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
|
|
|
|
|
2022-02-15 21:33:07 +00:00
|
|
|
return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
|
2015-08-17 02:31:49 +00:00
|
|
|
}
|
|
|
|
|
2010-08-23 13:16:00 +00:00
|
|
|
LOOP_ATTR_RO(backing_file);
|
|
|
|
LOOP_ATTR_RO(offset);
|
|
|
|
LOOP_ATTR_RO(sizelimit);
|
|
|
|
LOOP_ATTR_RO(autoclear);
|
loop: always allow userspace partitions and optionally support automatic scanning
Automatic partition scanning can be requested individually per loop
device during its setup by setting LO_FLAGS_PARTSCAN. By default, no
partition tables are scanned.
Userspace can now always add and remove partitions from all loop
devices, regardless if the in-kernel partition scanner is enabled or
not.
The needed partition minor numbers are allocated from the extended
minors space, the main loop device numbers will continue to match the
loop minors, regardless of the number of partitions used.
# grep . /sys/class/block/loop1/loop/*
/sys/block/loop1/loop/autoclear:0
/sys/block/loop1/loop/backing_file:/home/kay/data/stuff/part.img
/sys/block/loop1/loop/offset:0
/sys/block/loop1/loop/partscan:1
/sys/block/loop1/loop/sizelimit:0
# ls -l /dev/loop*
brw-rw---- 1 root disk 7, 0 Aug 14 20:22 /dev/loop0
brw-rw---- 1 root disk 7, 1 Aug 14 20:23 /dev/loop1
brw-rw---- 1 root disk 259, 0 Aug 14 20:23 /dev/loop1p1
brw-rw---- 1 root disk 259, 1 Aug 14 20:23 /dev/loop1p2
brw-rw---- 1 root disk 7, 99 Aug 14 20:23 /dev/loop99
brw-rw---- 1 root disk 259, 2 Aug 14 20:23 /dev/loop99p1
brw-rw---- 1 root disk 259, 3 Aug 14 20:23 /dev/loop99p2
crw------T 1 root root 10, 237 Aug 14 20:22 /dev/loop-control
Cc: Karel Zak <kzak@redhat.com>
Cc: Davidlohr Bueso <dave@gnu.org>
Acked-By: Tejun Heo <tj@kernel.org>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-08-23 18:12:04 +00:00
|
|
|
LOOP_ATTR_RO(partscan);
|
2015-08-17 02:31:49 +00:00
|
|
|
LOOP_ATTR_RO(dio);
|
2010-08-23 13:16:00 +00:00
|
|
|
|
|
|
|
static struct attribute *loop_attrs[] = {
|
|
|
|
&loop_attr_backing_file.attr,
|
|
|
|
&loop_attr_offset.attr,
|
|
|
|
&loop_attr_sizelimit.attr,
|
|
|
|
&loop_attr_autoclear.attr,
|
loop: always allow userspace partitions and optionally support automatic scanning
Automatic partition scanning can be requested individually per loop
device during its setup by setting LO_FLAGS_PARTSCAN. By default, no
partition tables are scanned.
Userspace can now always add and remove partitions from all loop
devices, regardless if the in-kernel partition scanner is enabled or
not.
The needed partition minor numbers are allocated from the extended
minors space, the main loop device numbers will continue to match the
loop minors, regardless of the number of partitions used.
# grep . /sys/class/block/loop1/loop/*
/sys/block/loop1/loop/autoclear:0
/sys/block/loop1/loop/backing_file:/home/kay/data/stuff/part.img
/sys/block/loop1/loop/offset:0
/sys/block/loop1/loop/partscan:1
/sys/block/loop1/loop/sizelimit:0
# ls -l /dev/loop*
brw-rw---- 1 root disk 7, 0 Aug 14 20:22 /dev/loop0
brw-rw---- 1 root disk 7, 1 Aug 14 20:23 /dev/loop1
brw-rw---- 1 root disk 259, 0 Aug 14 20:23 /dev/loop1p1
brw-rw---- 1 root disk 259, 1 Aug 14 20:23 /dev/loop1p2
brw-rw---- 1 root disk 7, 99 Aug 14 20:23 /dev/loop99
brw-rw---- 1 root disk 259, 2 Aug 14 20:23 /dev/loop99p1
brw-rw---- 1 root disk 259, 3 Aug 14 20:23 /dev/loop99p2
crw------T 1 root root 10, 237 Aug 14 20:22 /dev/loop-control
Cc: Karel Zak <kzak@redhat.com>
Cc: Davidlohr Bueso <dave@gnu.org>
Acked-By: Tejun Heo <tj@kernel.org>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-08-23 18:12:04 +00:00
|
|
|
&loop_attr_partscan.attr,
|
2015-08-17 02:31:49 +00:00
|
|
|
&loop_attr_dio.attr,
|
2010-08-23 13:16:00 +00:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group loop_attribute_group = {
|
|
|
|
.name = "loop",
|
|
|
|
.attrs= loop_attrs,
|
|
|
|
};
|
|
|
|
|
2018-05-04 16:58:09 +00:00
|
|
|
static void loop_sysfs_init(struct loop_device *lo)
|
2010-08-23 13:16:00 +00:00
|
|
|
{
|
2018-05-04 16:58:09 +00:00
|
|
|
lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
|
|
|
|
&loop_attribute_group);
|
2010-08-23 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void loop_sysfs_exit(struct loop_device *lo)
|
|
|
|
{
|
2018-05-04 16:58:09 +00:00
|
|
|
if (lo->sysfs_inited)
|
|
|
|
sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
|
|
|
|
&loop_attribute_group);
|
2010-08-23 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
2024-02-13 07:34:25 +00:00
|
|
|
static void loop_config_discard(struct loop_device *lo,
|
|
|
|
struct queue_limits *lim)
|
2011-08-19 12:50:46 +00:00
|
|
|
{
|
|
|
|
struct file *file = lo->lo_backing_file;
|
|
|
|
struct inode *inode = file->f_mapping->host;
|
2024-02-13 07:34:23 +00:00
|
|
|
u32 granularity = 0, max_discard_sectors = 0;
|
|
|
|
struct kstatfs sbuf;
|
2011-08-19 12:50:46 +00:00
|
|
|
|
2020-04-03 14:43:04 +00:00
|
|
|
/*
|
|
|
|
* If the backing device is a block device, mirror its zeroing
|
|
|
|
* capability. Set the discard sectors to the block device's zeroing
|
|
|
|
* capabilities because loop discards result in blkdev_issue_zeroout(),
|
|
|
|
* not blkdev_issue_discard(). This maintains consistent behavior with
|
|
|
|
* file-backed loop devices: discarded regions read back as zero.
|
|
|
|
*/
|
2021-10-19 07:56:39 +00:00
|
|
|
if (S_ISBLK(inode->i_mode)) {
|
2020-11-23 12:38:40 +00:00
|
|
|
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
|
2020-04-03 14:43:04 +00:00
|
|
|
|
2020-08-17 10:01:30 +00:00
|
|
|
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
|
2022-04-15 04:52:56 +00:00
|
|
|
granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
|
2020-08-17 10:01:30 +00:00
|
|
|
queue_physical_block_size(backingq);
|
2020-04-03 14:43:04 +00:00
|
|
|
|
2011-08-19 12:50:46 +00:00
|
|
|
/*
|
|
|
|
* We use punch hole to reclaim the free space used by the
|
2021-10-19 07:56:39 +00:00
|
|
|
* image a.k.a. discard.
|
2011-08-19 12:50:46 +00:00
|
|
|
*/
|
2024-02-13 07:34:23 +00:00
|
|
|
} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
|
2020-08-17 10:01:30 +00:00
|
|
|
max_discard_sectors = UINT_MAX >> 9;
|
2024-02-13 07:34:23 +00:00
|
|
|
granularity = sbuf.f_bsize;
|
2020-04-03 14:43:04 +00:00
|
|
|
}
|
|
|
|
|
2024-02-13 07:34:25 +00:00
|
|
|
lim->max_hw_discard_sectors = max_discard_sectors;
|
|
|
|
lim->max_write_zeroes_sectors = max_discard_sectors;
|
2024-02-13 07:34:23 +00:00
|
|
|
if (max_discard_sectors)
|
2024-02-13 07:34:25 +00:00
|
|
|
lim->discard_granularity = granularity;
|
2024-02-13 07:34:23 +00:00
|
|
|
else
|
2024-02-13 07:34:25 +00:00
|
|
|
lim->discard_granularity = 0;
|
2011-08-19 12:50:46 +00:00
|
|
|
}
|
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
struct loop_worker {
|
|
|
|
struct rb_node rb_node;
|
|
|
|
struct work_struct work;
|
|
|
|
struct list_head cmd_list;
|
|
|
|
struct list_head idle_list;
|
|
|
|
struct loop_device *lo;
|
2021-06-29 02:38:21 +00:00
|
|
|
struct cgroup_subsys_state *blkcg_css;
|
2021-06-29 02:38:15 +00:00
|
|
|
unsigned long last_ran_at;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void loop_workfn(struct work_struct *work);
|
|
|
|
|
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
|
|
static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
|
2015-08-17 02:31:48 +00:00
|
|
|
{
|
2021-06-29 02:38:15 +00:00
|
|
|
return !css || css == blkcg_root_css;
|
2015-08-17 02:31:48 +00:00
|
|
|
}
|
2021-06-29 02:38:15 +00:00
|
|
|
#else
|
|
|
|
static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
|
loop: Add PF_LESS_THROTTLE to block/loop device thread.
When a filesystem is mounted from a loop device, writes are
throttled by balance_dirty_pages() twice: once when writing
to the filesystem and once when the loop_handle_cmd() writes
to the backing file. This double-throttling can trigger
positive feedback loops that create significant delays. The
throttling at the lower level is seen by the upper level as
a slow device, so it throttles extra hard.
The PF_LESS_THROTTLE flag was created to handle exactly this
circumstance, though with an NFS filesystem mounted from a
local NFS server. It reduces the throttling on the lower
layer so that it can proceed largely unthrottled.
To demonstrate this, create a filesystem on a loop device
and write (e.g. with dd) several large files which combine
to consume significantly more than the limit set by
/proc/sys/vm/dirty_ratio or dirty_bytes. Measure the total
time taken.
When I do this directly on a device (no loop device) the
total time for several runs (mkfs, mount, write 200 files,
umount) is fairly stable: 28-35 seconds.
When I do this over a loop device the times are much worse
and less stable. 52-460 seconds. Half below 100seconds,
half above.
When I apply this patch, the times become stable again,
though not as fast as the no-loop-back case: 53-72 seconds.
There may be room for further improvement as the total overhead still
seems too high, but this is a big improvement.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-06-16 05:02:09 +00:00
|
|
|
{
|
2021-06-29 02:38:15 +00:00
|
|
|
return !css;
|
loop: Add PF_LESS_THROTTLE to block/loop device thread.
When a filesystem is mounted from a loop device, writes are
throttled by balance_dirty_pages() twice: once when writing
to the filesystem and once when the loop_handle_cmd() writes
to the backing file. This double-throttling can trigger
positive feedback loops that create significant delays. The
throttling at the lower level is seen by the upper level as
a slow device, so it throttles extra hard.
The PF_LESS_THROTTLE flag was created to handle exactly this
circumstance, though with an NFS filesystem mounted from a
local NFS server. It reduces the throttling on the lower
layer so that it can proceed largely unthrottled.
To demonstrate this, create a filesystem on a loop device
and write (e.g. with dd) several large files which combine
to consume significantly more than the limit set by
/proc/sys/vm/dirty_ratio or dirty_bytes. Measure the total
time taken.
When I do this directly on a device (no loop device) the
total time for several runs (mkfs, mount, write 200 files,
umount) is fairly stable: 28-35 seconds.
When I do this over a loop device the times are much worse
and less stable. 52-460 seconds. Half below 100seconds,
half above.
When I apply this patch, the times become stable again,
though not as fast as the no-loop-back case: 53-72 seconds.
There may be room for further improvement as the total overhead still
seems too high, but this is a big improvement.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-06-16 05:02:09 +00:00
|
|
|
}
|
2021-06-29 02:38:15 +00:00
|
|
|
#endif
|
loop: Add PF_LESS_THROTTLE to block/loop device thread.
When a filesystem is mounted from a loop device, writes are
throttled by balance_dirty_pages() twice: once when writing
to the filesystem and once when the loop_handle_cmd() writes
to the backing file. This double-throttling can trigger
positive feedback loops that create significant delays. The
throttling at the lower level is seen by the upper level as
a slow device, so it throttles extra hard.
The PF_LESS_THROTTLE flag was created to handle exactly this
circumstance, though with an NFS filesystem mounted from a
local NFS server. It reduces the throttling on the lower
layer so that it can proceed largely unthrottled.
To demonstrate this, create a filesystem on a loop device
and write (e.g. with dd) several large files which combine
to consume significantly more than the limit set by
/proc/sys/vm/dirty_ratio or dirty_bytes. Measure the total
time taken.
When I do this directly on a device (no loop device) the
total time for several runs (mkfs, mount, write 200 files,
umount) is fairly stable: 28-35 seconds.
When I do this over a loop device the times are much worse
and less stable. 52-460 seconds. Half below 100seconds,
half above.
When I apply this patch, the times become stable again,
though not as fast as the no-loop-back case: 53-72 seconds.
There may be room for further improvement as the total overhead still
seems too high, but this is a big improvement.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <tom.leiming@gmail.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-06-16 05:02:09 +00:00
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
|
2015-08-17 02:31:48 +00:00
|
|
|
{
|
2022-01-13 00:14:32 +00:00
|
|
|
struct rb_node **node, *parent = NULL;
|
2021-06-29 02:38:15 +00:00
|
|
|
struct loop_worker *cur_worker, *worker = NULL;
|
|
|
|
struct work_struct *work;
|
|
|
|
struct list_head *cmd_list;
|
|
|
|
|
|
|
|
spin_lock_irq(&lo->lo_work_lock);
|
|
|
|
|
2021-06-29 02:38:21 +00:00
|
|
|
if (queue_on_root_worker(cmd->blkcg_css))
|
2021-06-29 02:38:15 +00:00
|
|
|
goto queue_work;
|
|
|
|
|
|
|
|
node = &lo->worker_tree.rb_node;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
cur_worker = container_of(*node, struct loop_worker, rb_node);
|
2021-06-29 02:38:21 +00:00
|
|
|
if (cur_worker->blkcg_css == cmd->blkcg_css) {
|
2021-06-29 02:38:15 +00:00
|
|
|
worker = cur_worker;
|
|
|
|
break;
|
2021-06-29 02:38:21 +00:00
|
|
|
} else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) {
|
2021-06-29 02:38:15 +00:00
|
|
|
node = &(*node)->rb_left;
|
|
|
|
} else {
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (worker)
|
|
|
|
goto queue_work;
|
|
|
|
|
|
|
|
worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
|
|
|
|
/*
|
|
|
|
* In the event we cannot allocate a worker, just queue on the
|
2021-06-29 02:38:21 +00:00
|
|
|
* rootcg worker and issue the I/O as the rootcg
|
2021-06-29 02:38:15 +00:00
|
|
|
*/
|
2021-06-29 02:38:21 +00:00
|
|
|
if (!worker) {
|
|
|
|
cmd->blkcg_css = NULL;
|
|
|
|
if (cmd->memcg_css)
|
|
|
|
css_put(cmd->memcg_css);
|
|
|
|
cmd->memcg_css = NULL;
|
2021-06-29 02:38:15 +00:00
|
|
|
goto queue_work;
|
2021-06-29 02:38:21 +00:00
|
|
|
}
|
2021-06-29 02:38:15 +00:00
|
|
|
|
2021-06-29 02:38:21 +00:00
|
|
|
worker->blkcg_css = cmd->blkcg_css;
|
|
|
|
css_get(worker->blkcg_css);
|
2021-06-29 02:38:15 +00:00
|
|
|
INIT_WORK(&worker->work, loop_workfn);
|
|
|
|
INIT_LIST_HEAD(&worker->cmd_list);
|
|
|
|
INIT_LIST_HEAD(&worker->idle_list);
|
|
|
|
worker->lo = lo;
|
|
|
|
rb_link_node(&worker->rb_node, parent, node);
|
|
|
|
rb_insert_color(&worker->rb_node, &lo->worker_tree);
|
|
|
|
queue_work:
|
|
|
|
if (worker) {
|
|
|
|
/*
|
|
|
|
* We need to remove from the idle list here while
|
|
|
|
* holding the lock so that the idle timer doesn't
|
|
|
|
* free the worker
|
|
|
|
*/
|
|
|
|
if (!list_empty(&worker->idle_list))
|
|
|
|
list_del_init(&worker->idle_list);
|
|
|
|
work = &worker->work;
|
|
|
|
cmd_list = &worker->cmd_list;
|
|
|
|
} else {
|
|
|
|
work = &lo->rootcg_work;
|
|
|
|
cmd_list = &lo->rootcg_cmd_list;
|
|
|
|
}
|
|
|
|
list_add_tail(&cmd->list_entry, cmd_list);
|
|
|
|
queue_work(lo->workqueue, work);
|
|
|
|
spin_unlock_irq(&lo->lo_work_lock);
|
2015-08-17 02:31:48 +00:00
|
|
|
}
|
|
|
|
|
2022-03-30 05:29:08 +00:00
|
|
|
static void loop_set_timer(struct loop_device *lo)
|
|
|
|
{
|
|
|
|
timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void loop_free_idle_workers(struct loop_device *lo, bool delete_all)
|
|
|
|
{
|
|
|
|
struct loop_worker *pos, *worker;
|
|
|
|
|
|
|
|
spin_lock_irq(&lo->lo_work_lock);
|
|
|
|
list_for_each_entry_safe(worker, pos, &lo->idle_worker_list,
|
|
|
|
idle_list) {
|
|
|
|
if (!delete_all &&
|
|
|
|
time_is_after_jiffies(worker->last_ran_at +
|
|
|
|
LOOP_IDLE_WORKER_TIMEOUT))
|
|
|
|
break;
|
|
|
|
list_del(&worker->idle_list);
|
|
|
|
rb_erase(&worker->rb_node, &lo->worker_tree);
|
|
|
|
css_put(worker->blkcg_css);
|
|
|
|
kfree(worker);
|
|
|
|
}
|
|
|
|
if (!list_empty(&lo->idle_worker_list))
|
|
|
|
loop_set_timer(lo);
|
|
|
|
spin_unlock_irq(&lo->lo_work_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void loop_free_idle_workers_timer(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
struct loop_device *lo = container_of(timer, struct loop_device, timer);
|
|
|
|
|
|
|
|
return loop_free_idle_workers(lo, false);
|
|
|
|
}
|
|
|
|
|
2020-05-13 13:38:42 +00:00
|
|
|
/**
|
|
|
|
* loop_set_status_from_info - configure device from loop_info
|
|
|
|
* @lo: struct loop_device to configure
|
|
|
|
* @info: struct loop_info64 to configure the device with
|
|
|
|
*
|
|
|
|
* Configures the loop device parameters according to the passed
|
|
|
|
* in loop_info64 configuration.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
loop_set_status_from_info(struct loop_device *lo,
|
|
|
|
const struct loop_info64 *info)
|
|
|
|
{
|
|
|
|
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-10-19 07:56:39 +00:00
|
|
|
switch (info->lo_encrypt_type) {
|
|
|
|
case LO_CRYPT_NONE:
|
|
|
|
break;
|
|
|
|
case LO_CRYPT_XOR:
|
|
|
|
pr_warn("support for the xor transformation has been removed.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
case LO_CRYPT_CRYPTOAPI:
|
|
|
|
pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-05-13 13:38:42 +00:00
|
|
|
|
2023-02-21 09:50:27 +00:00
|
|
|
/* Avoid assigning overflow values */
|
|
|
|
if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
2020-05-13 13:38:42 +00:00
|
|
|
lo->lo_offset = info->lo_offset;
|
|
|
|
lo->lo_sizelimit = info->lo_sizelimit;
|
2022-08-23 16:08:10 +00:00
|
|
|
|
2020-05-13 13:38:42 +00:00
|
|
|
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
|
|
|
|
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
|
2020-05-13 13:38:44 +00:00
|
|
|
lo->lo_flags = info->lo_flags;
|
2020-05-13 13:38:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-17 06:04:33 +00:00
|
|
|
static unsigned short loop_default_blocksize(struct loop_device *lo,
|
|
|
|
struct block_device *backing_bdev)
|
2024-02-13 07:34:25 +00:00
|
|
|
{
|
2024-06-17 06:04:33 +00:00
|
|
|
/* In case of direct I/O, match underlying block size */
|
|
|
|
if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
|
|
|
|
return bdev_logical_block_size(backing_bdev);
|
|
|
|
return SECTOR_SIZE;
|
|
|
|
}
|
|
|
|
|
2024-06-17 06:04:32 +00:00
|
|
|
static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
|
2024-02-13 07:34:25 +00:00
|
|
|
{
|
2024-06-17 06:04:33 +00:00
|
|
|
struct file *file = lo->lo_backing_file;
|
|
|
|
struct inode *inode = file->f_mapping->host;
|
2024-06-17 06:04:34 +00:00
|
|
|
struct block_device *backing_bdev = NULL;
|
2024-02-13 07:34:25 +00:00
|
|
|
struct queue_limits lim;
|
|
|
|
|
2024-06-17 06:04:34 +00:00
|
|
|
if (S_ISBLK(inode->i_mode))
|
|
|
|
backing_bdev = I_BDEV(inode);
|
|
|
|
else if (inode->i_sb->s_bdev)
|
|
|
|
backing_bdev = inode->i_sb->s_bdev;
|
|
|
|
|
2024-06-17 06:04:33 +00:00
|
|
|
if (!bsize)
|
2024-06-17 06:04:34 +00:00
|
|
|
bsize = loop_default_blocksize(lo, backing_bdev);
|
2024-06-17 06:04:33 +00:00
|
|
|
|
2024-02-13 07:34:25 +00:00
|
|
|
lim = queue_limits_start_update(lo->lo_queue);
|
|
|
|
lim.logical_block_size = bsize;
|
|
|
|
lim.physical_block_size = bsize;
|
|
|
|
lim.io_min = bsize;
|
2024-06-17 06:04:41 +00:00
|
|
|
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
|
2024-06-17 06:04:40 +00:00
|
|
|
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
|
|
|
|
lim.features |= BLK_FEAT_WRITE_CACHE;
|
2024-06-17 06:04:41 +00:00
|
|
|
if (backing_bdev && !bdev_nonrot(backing_bdev))
|
|
|
|
lim.features |= BLK_FEAT_ROTATIONAL;
|
2024-06-17 06:04:32 +00:00
|
|
|
loop_config_discard(lo, &lim);
|
2024-02-13 07:34:25 +00:00
|
|
|
return queue_limits_commit_update(lo->lo_queue, &lim);
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
|
2020-05-13 13:38:45 +00:00
|
|
|
struct block_device *bdev,
|
|
|
|
const struct loop_config *config)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-07-06 14:40:34 +00:00
|
|
|
struct file *file = fget(config->fd);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct address_space *mapping;
|
2021-07-06 14:40:34 +00:00
|
|
|
int error;
|
|
|
|
loff_t size;
|
|
|
|
bool partscan;
|
|
|
|
bool is_loop;
|
|
|
|
|
|
|
|
if (!file)
|
|
|
|
return -EBADF;
|
|
|
|
is_loop = is_loop_device(file);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* This is safe, since we have a reference from open(). */
|
|
|
|
__module_get(THIS_MODULE);
|
|
|
|
|
2019-05-16 14:01:27 +00:00
|
|
|
/*
|
|
|
|
* If we don't hold exclusive handle for the device, upgrade to it
|
|
|
|
* here to avoid changing device under exclusive owner.
|
|
|
|
*/
|
2023-06-08 11:02:55 +00:00
|
|
|
if (!(mode & BLK_OPEN_EXCL)) {
|
2023-06-01 09:44:52 +00:00
|
|
|
error = bd_prepare_to_claim(bdev, loop_configure, NULL);
|
2020-07-16 14:33:09 +00:00
|
|
|
if (error)
|
2019-05-16 14:01:27 +00:00
|
|
|
goto out_putf;
|
|
|
|
}
|
|
|
|
|
2021-07-06 14:40:34 +00:00
|
|
|
error = loop_global_lock_killable(lo, is_loop);
|
2018-11-08 13:01:10 +00:00
|
|
|
if (error)
|
2019-05-16 14:01:27 +00:00
|
|
|
goto out_bdev;
|
2018-11-08 13:01:10 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
error = -EBUSY;
|
|
|
|
if (lo->lo_state != Lo_unbound)
|
2018-11-08 13:01:10 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-07 15:37:58 +00:00
|
|
|
error = loop_validate_file(file, bdev);
|
|
|
|
if (error)
|
2018-11-08 13:01:10 +00:00
|
|
|
goto out_unlock;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
mapping = file->f_mapping;
|
|
|
|
|
2020-05-13 13:38:45 +00:00
|
|
|
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = loop_set_status_from_info(lo, &config->info);
|
|
|
|
if (error)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
|
2015-04-03 19:21:59 +00:00
|
|
|
!file->f_op->write_iter)
|
2020-05-13 13:38:45 +00:00
|
|
|
lo->lo_flags |= LO_FLAGS_READ_ONLY;
|
2006-09-27 08:50:49 +00:00
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
if (!lo->workqueue) {
|
2022-03-30 05:29:17 +00:00
|
|
|
lo->workqueue = alloc_workqueue("loop%d",
|
|
|
|
WQ_UNBOUND | WQ_FREEZABLE,
|
|
|
|
0, lo->lo_number);
|
|
|
|
if (!lo->workqueue) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2021-06-29 02:38:15 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
loop: LOOP_CONFIGURE: send uevents for partitions
LOOP_CONFIGURE is, as far as I understand it, supposed to be a way to
combine LOOP_SET_FD and LOOP_SET_STATUS64 into a single syscall. When
using LOOP_SET_FD+LOOP_SET_STATUS64, a single uevent would be sent for
each partition found on the loop device after the second ioctl(), but
when using LOOP_CONFIGURE, no such uevent was being sent.
In the old setup, uevents are disabled for LOOP_SET_FD, but not for
LOOP_SET_STATUS64. This makes sense, as it prevents uevents being
sent for a partially configured device during LOOP_SET_FD - they're
only sent at the end of LOOP_SET_STATUS64. But for LOOP_CONFIGURE,
uevents were disabled for the entire operation, so that final
notification was never issued. To fix this, reduce the critical
section to exclude the loop_reread_partitions() call, which causes
the uevents to be issued, to after uevents are re-enabled, matching
the behaviour of the LOOP_SET_FD+LOOP_SET_STATUS64 combination.
I noticed this because Busybox's losetup program recently changed from
using LOOP_SET_FD+LOOP_SET_STATUS64 to LOOP_CONFIGURE, and this broke
my setup, for which I want a notification from the kernel any time a
new partition becomes available.
Signed-off-by: Alyssa Ross <hi@alyssa.is>
[hch: reduced the critical section]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixes: 3448914e8cc5 ("loop: Add LOOP_CONFIGURE ioctl")
Link: https://lore.kernel.org/r/20230320125430.55367-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-03-20 12:54:30 +00:00
|
|
|
/* suppress uevents while reconfiguring the device */
|
|
|
|
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
|
|
|
|
|
2023-08-11 10:08:19 +00:00
|
|
|
disk_force_media_change(lo->lo_disk);
|
2020-11-03 10:00:16 +00:00
|
|
|
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-13 13:38:45 +00:00
|
|
|
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->lo_device = bdev;
|
|
|
|
lo->lo_backing_file = file;
|
|
|
|
lo->old_gfp_mask = mapping_gfp_mask(mapping);
|
|
|
|
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
|
|
|
|
2024-06-17 06:04:33 +00:00
|
|
|
error = loop_reconfigure_limits(lo, config->block_size);
|
2024-07-08 09:16:51 +00:00
|
|
|
if (error)
|
2024-02-13 07:34:25 +00:00
|
|
|
goto out_unlock;
|
2019-09-04 19:49:01 +00:00
|
|
|
|
2015-08-17 02:31:49 +00:00
|
|
|
loop_update_dio(lo);
|
2010-08-23 13:16:00 +00:00
|
|
|
loop_sysfs_init(lo);
|
2020-08-25 07:18:29 +00:00
|
|
|
|
|
|
|
size = get_loop_size(lo, file);
|
2020-05-13 13:38:37 +00:00
|
|
|
loop_set_size(lo, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-07-06 14:40:34 +00:00
|
|
|
/* Order wrt reading lo_state in loop_validate_file(). */
|
|
|
|
wmb();
|
|
|
|
|
2006-09-29 08:59:11 +00:00
|
|
|
lo->lo_state = Lo_bound;
|
loop: always allow userspace partitions and optionally support automatic scanning
Automatic partition scanning can be requested individually per loop
device during its setup by setting LO_FLAGS_PARTSCAN. By default, no
partition tables are scanned.
Userspace can now always add and remove partitions from all loop
devices, regardless if the in-kernel partition scanner is enabled or
not.
The needed partition minor numbers are allocated from the extended
minors space, the main loop device numbers will continue to match the
loop minors, regardless of the number of partitions used.
# grep . /sys/class/block/loop1/loop/*
/sys/block/loop1/loop/autoclear:0
/sys/block/loop1/loop/backing_file:/home/kay/data/stuff/part.img
/sys/block/loop1/loop/offset:0
/sys/block/loop1/loop/partscan:1
/sys/block/loop1/loop/sizelimit:0
# ls -l /dev/loop*
brw-rw---- 1 root disk 7, 0 Aug 14 20:22 /dev/loop0
brw-rw---- 1 root disk 7, 1 Aug 14 20:23 /dev/loop1
brw-rw---- 1 root disk 259, 0 Aug 14 20:23 /dev/loop1p1
brw-rw---- 1 root disk 259, 1 Aug 14 20:23 /dev/loop1p2
brw-rw---- 1 root disk 7, 99 Aug 14 20:23 /dev/loop99
brw-rw---- 1 root disk 259, 2 Aug 14 20:23 /dev/loop99p1
brw-rw---- 1 root disk 259, 3 Aug 14 20:23 /dev/loop99p2
crw------T 1 root root 10, 237 Aug 14 20:22 /dev/loop-control
Cc: Karel Zak <kzak@redhat.com>
Cc: Davidlohr Bueso <dave@gnu.org>
Acked-By: Tejun Heo <tj@kernel.org>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-08-23 18:12:04 +00:00
|
|
|
if (part_shift)
|
|
|
|
lo->lo_flags |= LO_FLAGS_PARTSCAN;
|
2018-11-08 13:01:13 +00:00
|
|
|
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
|
2020-08-10 17:16:32 +00:00
|
|
|
if (partscan)
|
2022-05-27 05:58:06 +00:00
|
|
|
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
2013-04-01 16:47:56 +00:00
|
|
|
|
loop: LOOP_CONFIGURE: send uevents for partitions
LOOP_CONFIGURE is, as far as I understand it, supposed to be a way to
combine LOOP_SET_FD and LOOP_SET_STATUS64 into a single syscall. When
using LOOP_SET_FD+LOOP_SET_STATUS64, a single uevent would be sent for
each partition found on the loop device after the second ioctl(), but
when using LOOP_CONFIGURE, no such uevent was being sent.
In the old setup, uevents are disabled for LOOP_SET_FD, but not for
LOOP_SET_STATUS64. This makes sense, as it prevents uevents being
sent for a partially configured device during LOOP_SET_FD - they're
only sent at the end of LOOP_SET_STATUS64. But for LOOP_CONFIGURE,
uevents were disabled for the entire operation, so that final
notification was never issued. To fix this, reduce the critical
section to exclude the loop_reread_partitions() call, which causes
the uevents to be issued, to after uevents are re-enabled, matching
the behaviour of the LOOP_SET_FD+LOOP_SET_STATUS64 combination.
I noticed this because Busybox's losetup program recently changed from
using LOOP_SET_FD+LOOP_SET_STATUS64 to LOOP_CONFIGURE, and this broke
my setup, for which I want a notification from the kernel any time a
new partition becomes available.
Signed-off-by: Alyssa Ross <hi@alyssa.is>
[hch: reduced the critical section]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixes: 3448914e8cc5 ("loop: Add LOOP_CONFIGURE ioctl")
Link: https://lore.kernel.org/r/20230320125430.55367-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-03-20 12:54:30 +00:00
|
|
|
/* enable and uncork uevent now that we are done */
|
|
|
|
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
|
|
|
|
|
2021-07-06 14:40:34 +00:00
|
|
|
loop_global_unlock(lo, is_loop);
|
2018-11-08 13:01:13 +00:00
|
|
|
if (partscan)
|
2021-06-24 12:32:40 +00:00
|
|
|
loop_reread_partitions(lo);
|
loop: LOOP_CONFIGURE: send uevents for partitions
LOOP_CONFIGURE is, as far as I understand it, supposed to be a way to
combine LOOP_SET_FD and LOOP_SET_STATUS64 into a single syscall. When
using LOOP_SET_FD+LOOP_SET_STATUS64, a single uevent would be sent for
each partition found on the loop device after the second ioctl(), but
when using LOOP_CONFIGURE, no such uevent was being sent.
In the old setup, uevents are disabled for LOOP_SET_FD, but not for
LOOP_SET_STATUS64. This makes sense, as it prevents uevents being
sent for a partially configured device during LOOP_SET_FD - they're
only sent at the end of LOOP_SET_STATUS64. But for LOOP_CONFIGURE,
uevents were disabled for the entire operation, so that final
notification was never issued. To fix this, reduce the critical
section to exclude the loop_reread_partitions() call, which causes
the uevents to be issued, to after uevents are re-enabled, matching
the behaviour of the LOOP_SET_FD+LOOP_SET_STATUS64 combination.
I noticed this because Busybox's losetup program recently changed from
using LOOP_SET_FD+LOOP_SET_STATUS64 to LOOP_CONFIGURE, and this broke
my setup, for which I want a notification from the kernel any time a
new partition becomes available.
Signed-off-by: Alyssa Ross <hi@alyssa.is>
[hch: reduced the critical section]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixes: 3448914e8cc5 ("loop: Add LOOP_CONFIGURE ioctl")
Link: https://lore.kernel.org/r/20230320125430.55367-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-03-20 12:54:30 +00:00
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
if (!(mode & BLK_OPEN_EXCL))
|
2020-11-25 20:20:08 +00:00
|
|
|
bd_abort_claiming(bdev, loop_configure);
|
2022-03-30 05:29:14 +00:00
|
|
|
|
loop: LOOP_CONFIGURE: send uevents for partitions
LOOP_CONFIGURE is, as far as I understand it, supposed to be a way to
combine LOOP_SET_FD and LOOP_SET_STATUS64 into a single syscall. When
using LOOP_SET_FD+LOOP_SET_STATUS64, a single uevent would be sent for
each partition found on the loop device after the second ioctl(), but
when using LOOP_CONFIGURE, no such uevent was being sent.
In the old setup, uevents are disabled for LOOP_SET_FD, but not for
LOOP_SET_STATUS64. This makes sense, as it prevents uevents being
sent for a partially configured device during LOOP_SET_FD - they're
only sent at the end of LOOP_SET_STATUS64. But for LOOP_CONFIGURE,
uevents were disabled for the entire operation, so that final
notification was never issued. To fix this, reduce the critical
section to exclude the loop_reread_partitions() call, which causes
the uevents to be issued, to after uevents are re-enabled, matching
the behaviour of the LOOP_SET_FD+LOOP_SET_STATUS64 combination.
I noticed this because Busybox's losetup program recently changed from
using LOOP_SET_FD+LOOP_SET_STATUS64 to LOOP_CONFIGURE, and this broke
my setup, for which I want a notification from the kernel any time a
new partition becomes available.
Signed-off-by: Alyssa Ross <hi@alyssa.is>
[hch: reduced the critical section]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixes: 3448914e8cc5 ("loop: Add LOOP_CONFIGURE ioctl")
Link: https://lore.kernel.org/r/20230320125430.55367-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-03-20 12:54:30 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-11-08 13:01:10 +00:00
|
|
|
out_unlock:
|
2021-07-06 14:40:34 +00:00
|
|
|
loop_global_unlock(lo, is_loop);
|
2019-05-16 14:01:27 +00:00
|
|
|
out_bdev:
|
2023-06-08 11:02:55 +00:00
|
|
|
if (!(mode & BLK_OPEN_EXCL))
|
2020-11-25 20:20:08 +00:00
|
|
|
bd_abort_claiming(bdev, loop_configure);
|
2018-11-08 13:01:10 +00:00
|
|
|
out_putf:
|
2005-04-16 22:20:36 +00:00
|
|
|
fput(file);
|
|
|
|
/* This is safe: open() is still holding a reference. */
|
|
|
|
module_put(THIS_MODULE);
|
loop: LOOP_CONFIGURE: send uevents for partitions
LOOP_CONFIGURE is, as far as I understand it, supposed to be a way to
combine LOOP_SET_FD and LOOP_SET_STATUS64 into a single syscall. When
using LOOP_SET_FD+LOOP_SET_STATUS64, a single uevent would be sent for
each partition found on the loop device after the second ioctl(), but
when using LOOP_CONFIGURE, no such uevent was being sent.
In the old setup, uevents are disabled for LOOP_SET_FD, but not for
LOOP_SET_STATUS64. This makes sense, as it prevents uevents being
sent for a partially configured device during LOOP_SET_FD - they're
only sent at the end of LOOP_SET_STATUS64. But for LOOP_CONFIGURE,
uevents were disabled for the entire operation, so that final
notification was never issued. To fix this, reduce the critical
section to exclude the loop_reread_partitions() call, which causes
the uevents to be issued, to after uevents are re-enabled, matching
the behaviour of the LOOP_SET_FD+LOOP_SET_STATUS64 combination.
I noticed this because Busybox's losetup program recently changed from
using LOOP_SET_FD+LOOP_SET_STATUS64 to LOOP_CONFIGURE, and this broke
my setup, for which I want a notification from the kernel any time a
new partition becomes available.
Signed-off-by: Alyssa Ross <hi@alyssa.is>
[hch: reduced the critical section]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixes: 3448914e8cc5 ("loop: Add LOOP_CONFIGURE ioctl")
Link: https://lore.kernel.org/r/20230320125430.55367-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-03-20 12:54:30 +00:00
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
static void __loop_clr_fd(struct loop_device *lo)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2024-06-17 06:04:31 +00:00
|
|
|
struct queue_limits lim;
|
2021-11-24 10:47:40 +00:00
|
|
|
struct file *filp;
|
2005-10-21 07:22:34 +00:00
|
|
|
gfp_t gfp = lo->old_gfp_mask;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_irq(&lo->lo_lock);
|
2021-11-24 10:47:40 +00:00
|
|
|
filp = lo->lo_backing_file;
|
2005-04-16 22:20:36 +00:00
|
|
|
lo->lo_backing_file = NULL;
|
2011-07-31 20:21:35 +00:00
|
|
|
spin_unlock_irq(&lo->lo_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
lo->lo_device = NULL;
|
|
|
|
lo->lo_offset = 0;
|
|
|
|
lo->lo_sizelimit = 0;
|
|
|
|
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
|
2024-06-17 06:04:31 +00:00
|
|
|
|
|
|
|
/* reset the block size to the default */
|
|
|
|
lim = queue_limits_start_update(lo->lo_queue);
|
|
|
|
lim.logical_block_size = SECTOR_SIZE;
|
|
|
|
lim.physical_block_size = SECTOR_SIZE;
|
|
|
|
lim.io_min = SECTOR_SIZE;
|
|
|
|
queue_limits_commit_update(lo->lo_queue, &lim);
|
|
|
|
|
2021-09-22 12:37:09 +00:00
|
|
|
invalidate_disk(lo->lo_disk);
|
2010-10-28 01:51:30 +00:00
|
|
|
loop_sysfs_exit(lo);
|
2021-09-22 12:37:10 +00:00
|
|
|
/* let user-space know about this change */
|
|
|
|
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
2005-04-16 22:20:36 +00:00
|
|
|
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
2022-02-11 07:15:54 +00:00
|
|
|
/* This is safe: open() is still holding a reference. */
|
|
|
|
module_put(THIS_MODULE);
|
2015-05-06 04:26:23 +00:00
|
|
|
|
2023-08-11 10:08:19 +00:00
|
|
|
disk_force_media_change(lo->lo_disk);
|
2021-11-24 10:47:40 +00:00
|
|
|
|
|
|
|
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
|
|
|
|
int err;
|
|
|
|
|
2022-02-11 07:15:54 +00:00
|
|
|
/*
|
|
|
|
* open_mutex has been held already in release path, so don't
|
|
|
|
* acquire it if this function is called in such case.
|
|
|
|
*
|
|
|
|
* If the reread partition isn't from release path, lo_refcnt
|
|
|
|
* must be at least one and it can only become zero when the
|
|
|
|
* current holder is released.
|
|
|
|
*/
|
2021-06-24 12:32:40 +00:00
|
|
|
err = bdev_disk_changed(lo->lo_disk, false);
|
2019-02-22 14:10:19 +00:00
|
|
|
if (err)
|
|
|
|
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
2021-11-24 10:47:40 +00:00
|
|
|
__func__, lo->lo_number, err);
|
2018-11-08 13:01:12 +00:00
|
|
|
/* Device is gone, no point in returning error */
|
|
|
|
}
|
2019-02-22 14:10:20 +00:00
|
|
|
|
2022-02-11 07:15:54 +00:00
|
|
|
/*
|
|
|
|
* lo->lo_state is set to Lo_unbound here after above partscan has
|
|
|
|
* finished. There cannot be anybody else entering __loop_clr_fd() as
|
|
|
|
* Lo_rundown state protects us from all the other places trying to
|
|
|
|
* change the 'lo' device.
|
|
|
|
*/
|
2019-02-22 14:10:20 +00:00
|
|
|
lo->lo_flags = 0;
|
|
|
|
if (!part_shift)
|
2022-05-27 05:58:06 +00:00
|
|
|
set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
2021-11-24 10:47:40 +00:00
|
|
|
mutex_lock(&lo->lo_mutex);
|
2019-02-22 14:10:20 +00:00
|
|
|
lo->lo_state = Lo_unbound;
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
2021-12-13 12:55:27 +00:00
|
|
|
|
2022-02-11 07:15:54 +00:00
|
|
|
/*
|
|
|
|
* Need not hold lo_mutex to fput backing file. Calling fput holding
|
|
|
|
* lo_mutex triggers a circular lock dependency possibility warning as
|
|
|
|
* fput can take open_mutex which is usually taken before lo_mutex.
|
|
|
|
*/
|
|
|
|
fput(filp);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-11-08 13:01:06 +00:00
|
|
|
static int loop_clr_fd(struct loop_device *lo)
|
|
|
|
{
|
2018-11-08 13:01:07 +00:00
|
|
|
int err;
|
|
|
|
|
loop: avoid loop_validate_mutex/lo_mutex in ->release
Since ->release is called with disk->open_mutex held, and __loop_clr_fd()
from lo_release() is called via ->release when disk_openers() == 0, we are
guaranteed that "struct file" which will be passed to loop_validate_file()
via fget() cannot be the loop device __loop_clr_fd(lo, true) will clear.
Thus, there is no need to hold loop_validate_mutex from __loop_clr_fd()
if release == true.
When I made commit 3ce6e1f662a91097 ("loop: reintroduce global lock for
safe loop_validate_file() traversal"), I wrote "It is acceptable for
loop_validate_file() to succeed, for actual clear operation has not started
yet.". But now I came to feel why it is acceptable to succeed.
It seems that the loop driver was added in Linux 1.3.68, and
if (lo->lo_refcnt > 1)
return -EBUSY;
check in loop_clr_fd() was there from the beginning. The intent of this
check was unclear. But now I think that current
disk_openers(lo->lo_disk) > 1
form is there for three reasons.
(1) Avoid I/O errors when some process which opens and reads from this
loop device in response to uevent notification (e.g. systemd-udevd),
as described in commit a1ecac3b0656a682 ("loop: Make explicit loop
device destruction lazy"). This opener is short-lived because it is
likely that the file descriptor used by that process is closed soon.
(2) Avoid I/O errors caused by underlying layer of stacked loop devices
(i.e. ioctl(some_loop_fd, LOOP_SET_FD, other_loop_fd)) being suddenly
disappeared. This opener is long-lived because this reference is
associated with not a file descriptor but lo->lo_backing_file.
(3) Avoid I/O errors caused by underlying layer of mounted loop device
(i.e. mount(some_loop_device, some_mount_point)) being suddenly
disappeared. This opener is long-lived because this reference is
associated with not a file descriptor but mount.
While race in (1) might be acceptable, (2) and (3) should be checked
racelessly. That is, make sure that __loop_clr_fd() will not run if
loop_validate_file() succeeds, by doing refcount check with global lock
held when explicit loop device destruction is requested.
As a result of no longer waiting for lo->lo_mutex after setting Lo_rundown,
we can remove pointless BUG_ON(lo->lo_state != Lo_rundown) check.
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220330052917.2566582-14-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-03-30 05:29:15 +00:00
|
|
|
/*
|
|
|
|
* Since lo_ioctl() is called without locks held, it is possible that
|
|
|
|
* loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel.
|
|
|
|
*
|
|
|
|
* Therefore, use global lock when setting Lo_rundown state in order to
|
|
|
|
* make sure that loop_validate_file() will fail if the "struct file"
|
|
|
|
* which loop_configure()/loop_change_fd() found via fget() was this
|
|
|
|
* loop device.
|
|
|
|
*/
|
|
|
|
err = loop_global_lock_killable(lo, true);
|
2018-11-08 13:01:07 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (lo->lo_state != Lo_bound) {
|
loop: avoid loop_validate_mutex/lo_mutex in ->release
Since ->release is called with disk->open_mutex held, and __loop_clr_fd()
from lo_release() is called via ->release when disk_openers() == 0, we are
guaranteed that "struct file" which will be passed to loop_validate_file()
via fget() cannot be the loop device __loop_clr_fd(lo, true) will clear.
Thus, there is no need to hold loop_validate_mutex from __loop_clr_fd()
if release == true.
When I made commit 3ce6e1f662a91097 ("loop: reintroduce global lock for
safe loop_validate_file() traversal"), I wrote "It is acceptable for
loop_validate_file() to succeed, for actual clear operation has not started
yet.". But now I came to feel why it is acceptable to succeed.
It seems that the loop driver was added in Linux 1.3.68, and
if (lo->lo_refcnt > 1)
return -EBUSY;
check in loop_clr_fd() was there from the beginning. The intent of this
check was unclear. But now I think that current
disk_openers(lo->lo_disk) > 1
form is there for three reasons.
(1) Avoid I/O errors when some process which opens and reads from this
loop device in response to uevent notification (e.g. systemd-udevd),
as described in commit a1ecac3b0656a682 ("loop: Make explicit loop
device destruction lazy"). This opener is short-lived because it is
likely that the file descriptor used by that process is closed soon.
(2) Avoid I/O errors caused by underlying layer of stacked loop devices
(i.e. ioctl(some_loop_fd, LOOP_SET_FD, other_loop_fd)) being suddenly
disappeared. This opener is long-lived because this reference is
associated with not a file descriptor but lo->lo_backing_file.
(3) Avoid I/O errors caused by underlying layer of mounted loop device
(i.e. mount(some_loop_device, some_mount_point)) being suddenly
disappeared. This opener is long-lived because this reference is
associated with not a file descriptor but mount.
While race in (1) might be acceptable, (2) and (3) should be checked
racelessly. That is, make sure that __loop_clr_fd() will not run if
loop_validate_file() succeeds, by doing refcount check with global lock
held when explicit loop device destruction is requested.
As a result of no longer waiting for lo->lo_mutex after setting Lo_rundown,
we can remove pointless BUG_ON(lo->lo_state != Lo_rundown) check.
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220330052917.2566582-14-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-03-30 05:29:15 +00:00
|
|
|
loop_global_unlock(lo, true);
|
2018-11-08 13:01:06 +00:00
|
|
|
return -ENXIO;
|
2018-11-08 13:01:07 +00:00
|
|
|
}
|
2018-11-08 13:01:06 +00:00
|
|
|
/*
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
* Mark the device for removing the backing device on last close.
|
|
|
|
* If we are the only opener, also switch the state to roundown here to
|
|
|
|
* prevent new openers from coming in.
|
2018-11-08 13:01:06 +00:00
|
|
|
*/
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
|
|
|
|
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
|
|
|
|
if (disk_openers(lo->lo_disk) == 1)
|
|
|
|
lo->lo_state = Lo_rundown;
|
loop: avoid loop_validate_mutex/lo_mutex in ->release
Since ->release is called with disk->open_mutex held, and __loop_clr_fd()
from lo_release() is called via ->release when disk_openers() == 0, we are
guaranteed that "struct file" which will be passed to loop_validate_file()
via fget() cannot be the loop device __loop_clr_fd(lo, true) will clear.
Thus, there is no need to hold loop_validate_mutex from __loop_clr_fd()
if release == true.
When I made commit 3ce6e1f662a91097 ("loop: reintroduce global lock for
safe loop_validate_file() traversal"), I wrote "It is acceptable for
loop_validate_file() to succeed, for actual clear operation has not started
yet.". But now I came to feel why it is acceptable to succeed.
It seems that the loop driver was added in Linux 1.3.68, and
if (lo->lo_refcnt > 1)
return -EBUSY;
check in loop_clr_fd() was there from the beginning. The intent of this
check was unclear. But now I think that current
disk_openers(lo->lo_disk) > 1
form is there for three reasons.
(1) Avoid I/O errors when some process which opens and reads from this
loop device in response to uevent notification (e.g. systemd-udevd),
as described in commit a1ecac3b0656a682 ("loop: Make explicit loop
device destruction lazy"). This opener is short-lived because it is
likely that the file descriptor used by that process is closed soon.
(2) Avoid I/O errors caused by underlying layer of stacked loop devices
(i.e. ioctl(some_loop_fd, LOOP_SET_FD, other_loop_fd)) being suddenly
disappeared. This opener is long-lived because this reference is
associated with not a file descriptor but lo->lo_backing_file.
(3) Avoid I/O errors caused by underlying layer of mounted loop device
(i.e. mount(some_loop_device, some_mount_point)) being suddenly
disappeared. This opener is long-lived because this reference is
associated with not a file descriptor but mount.
While race in (1) might be acceptable, (2) and (3) should be checked
racelessly. That is, make sure that __loop_clr_fd() will not run if
loop_validate_file() succeeds, by doing refcount check with global lock
held when explicit loop device destruction is requested.
As a result of no longer waiting for lo->lo_mutex after setting Lo_rundown,
we can remove pointless BUG_ON(lo->lo_state != Lo_rundown) check.
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220330052917.2566582-14-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-03-30 05:29:15 +00:00
|
|
|
loop_global_unlock(lo, true);
|
2018-11-08 13:01:06 +00:00
|
|
|
|
2021-11-24 10:47:40 +00:00
|
|
|
return 0;
|
2018-11-08 13:01:06 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int
|
|
|
|
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|
|
|
{
|
|
|
|
int err;
|
2020-05-13 13:38:44 +00:00
|
|
|
int prev_lo_flags;
|
2018-11-08 13:01:13 +00:00
|
|
|
bool partscan = false;
|
2020-05-13 13:38:41 +00:00
|
|
|
bool size_changed = false;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-01-26 14:46:30 +00:00
|
|
|
err = mutex_lock_killable(&lo->lo_mutex);
|
2018-11-08 13:01:09 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (lo->lo_state != Lo_bound) {
|
|
|
|
err = -ENXIO;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-01-10 03:17:14 +00:00
|
|
|
if (lo->lo_offset != info->lo_offset ||
|
|
|
|
lo->lo_sizelimit != info->lo_sizelimit) {
|
2020-05-13 13:38:41 +00:00
|
|
|
size_changed = true;
|
2019-01-10 03:17:14 +00:00
|
|
|
sync_blockdev(lo->lo_device);
|
2020-06-18 04:21:37 +00:00
|
|
|
invalidate_bdev(lo->lo_device);
|
2019-01-10 03:17:14 +00:00
|
|
|
}
|
|
|
|
|
2017-02-11 03:40:45 +00:00
|
|
|
/* I/O need to be drained during transfer transition */
|
|
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
|
|
|
|
2020-05-13 13:38:44 +00:00
|
|
|
prev_lo_flags = lo->lo_flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-13 13:38:41 +00:00
|
|
|
err = loop_set_status_from_info(lo, info);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (err)
|
2018-11-08 13:01:09 +00:00
|
|
|
goto out_unfreeze;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-13 13:38:44 +00:00
|
|
|
/* Mask out flags that can't be set using LOOP_SET_STATUS. */
|
2020-06-04 20:25:20 +00:00
|
|
|
lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
|
2020-05-13 13:38:44 +00:00
|
|
|
/* For those flags, use the previous values instead */
|
|
|
|
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
|
|
|
|
/* For flags that can't be cleared, use previous values too */
|
|
|
|
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
|
|
|
|
|
2020-05-13 13:38:39 +00:00
|
|
|
if (size_changed) {
|
|
|
|
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
|
|
|
|
lo->lo_backing_file);
|
|
|
|
loop_set_size(lo, new_size);
|
2017-06-09 10:19:18 +00:00
|
|
|
}
|
2013-02-21 23:16:46 +00:00
|
|
|
|
2015-08-17 02:31:49 +00:00
|
|
|
/* update dio if lo_offset or transfer is changed */
|
|
|
|
__loop_update_dio(lo, lo->use_dio);
|
|
|
|
|
2018-11-08 13:01:09 +00:00
|
|
|
out_unfreeze:
|
2017-02-11 03:40:45 +00:00
|
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
2017-03-01 18:42:38 +00:00
|
|
|
|
2020-05-13 13:38:44 +00:00
|
|
|
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
|
|
|
|
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
|
2022-05-27 05:58:06 +00:00
|
|
|
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
2018-11-08 13:01:13 +00:00
|
|
|
partscan = true;
|
2017-03-01 18:42:38 +00:00
|
|
|
}
|
2018-11-08 13:01:09 +00:00
|
|
|
out_unlock:
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
2018-11-08 13:01:13 +00:00
|
|
|
if (partscan)
|
2021-06-24 12:32:40 +00:00
|
|
|
loop_reread_partitions(lo);
|
2017-03-01 18:42:38 +00:00
|
|
|
|
2017-02-11 03:40:45 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
|
|
|
{
|
2018-11-08 13:01:01 +00:00
|
|
|
struct path path;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct kstat stat;
|
2018-03-27 04:39:11 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-01-26 14:46:30 +00:00
|
|
|
ret = mutex_lock_killable(&lo->lo_mutex);
|
2018-11-08 13:01:08 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-03-27 04:39:11 +00:00
|
|
|
if (lo->lo_state != Lo_bound) {
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENXIO;
|
2018-03-27 04:39:11 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->lo_number = lo->lo_number;
|
|
|
|
info->lo_offset = lo->lo_offset;
|
|
|
|
info->lo_sizelimit = lo->lo_sizelimit;
|
|
|
|
info->lo_flags = lo->lo_flags;
|
|
|
|
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
|
2018-03-27 04:39:11 +00:00
|
|
|
|
2021-01-26 14:46:30 +00:00
|
|
|
/* Drop lo_mutex while we call into the filesystem. */
|
2018-11-08 13:01:01 +00:00
|
|
|
path = lo->lo_backing_file->f_path;
|
|
|
|
path_get(&path);
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
2018-11-08 13:01:01 +00:00
|
|
|
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
|
2018-03-27 04:39:11 +00:00
|
|
|
if (!ret) {
|
|
|
|
info->lo_device = huge_encode_dev(stat.dev);
|
|
|
|
info->lo_inode = stat.ino;
|
|
|
|
info->lo_rdevice = huge_encode_dev(stat.rdev);
|
|
|
|
}
|
2018-11-08 13:01:01 +00:00
|
|
|
path_put(&path);
|
2018-03-27 04:39:11 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
|
|
|
|
{
|
|
|
|
memset(info64, 0, sizeof(*info64));
|
|
|
|
info64->lo_number = info->lo_number;
|
|
|
|
info64->lo_device = info->lo_device;
|
|
|
|
info64->lo_inode = info->lo_inode;
|
|
|
|
info64->lo_rdevice = info->lo_rdevice;
|
|
|
|
info64->lo_offset = info->lo_offset;
|
|
|
|
info64->lo_sizelimit = 0;
|
|
|
|
info64->lo_flags = info->lo_flags;
|
2021-10-19 07:56:39 +00:00
|
|
|
memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
|
|
|
|
{
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->lo_number = info64->lo_number;
|
|
|
|
info->lo_device = info64->lo_device;
|
|
|
|
info->lo_inode = info64->lo_inode;
|
|
|
|
info->lo_rdevice = info64->lo_rdevice;
|
|
|
|
info->lo_offset = info64->lo_offset;
|
|
|
|
info->lo_flags = info64->lo_flags;
|
2021-10-19 07:56:39 +00:00
|
|
|
memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* error in case values were truncated */
|
|
|
|
if (info->lo_device != info64->lo_device ||
|
|
|
|
info->lo_rdevice != info64->lo_rdevice ||
|
|
|
|
info->lo_inode != info64->lo_inode ||
|
|
|
|
info->lo_offset != info64->lo_offset)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info info;
|
|
|
|
struct loop_info64 info64;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
|
|
|
|
return -EFAULT;
|
|
|
|
loop_info64_from_old(&info, &info64);
|
|
|
|
return loop_set_status(lo, &info64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info64 info64;
|
|
|
|
|
|
|
|
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
|
|
|
|
return -EFAULT;
|
|
|
|
return loop_set_status(lo, &info64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
|
|
|
|
struct loop_info info;
|
|
|
|
struct loop_info64 info64;
|
2018-04-06 16:57:03 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-11-08 13:01:08 +00:00
|
|
|
if (!arg)
|
2018-04-06 16:57:03 +00:00
|
|
|
return -EINVAL;
|
|
|
|
err = loop_get_status(lo, &info64);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!err)
|
|
|
|
err = loop_info64_to_old(&info64, &info);
|
|
|
|
if (!err && copy_to_user(arg, &info, sizeof(info)))
|
|
|
|
err = -EFAULT;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
|
|
|
|
struct loop_info64 info64;
|
2018-04-06 16:57:03 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-11-08 13:01:08 +00:00
|
|
|
if (!arg)
|
2018-04-06 16:57:03 +00:00
|
|
|
return -EINVAL;
|
|
|
|
err = loop_get_status(lo, &info64);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
|
|
|
|
err = -EFAULT;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-08 11:46:44 +00:00
|
|
|
static int loop_set_capacity(struct loop_device *lo)
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
{
|
2020-05-13 13:38:40 +00:00
|
|
|
loff_t size;
|
|
|
|
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
if (unlikely(lo->lo_state != Lo_bound))
|
2013-02-21 23:16:47 +00:00
|
|
|
return -ENXIO;
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
|
2020-05-13 13:38:40 +00:00
|
|
|
size = get_loop_size(lo, lo->lo_backing_file);
|
|
|
|
loop_set_size(lo, size);
|
2020-05-13 13:38:36 +00:00
|
|
|
|
|
|
|
return 0;
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
}
|
|
|
|
|
2015-08-17 02:31:50 +00:00
|
|
|
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
|
|
|
|
{
|
|
|
|
int error = -ENXIO;
|
|
|
|
if (lo->lo_state != Lo_bound)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
__loop_update_dio(lo, !!arg);
|
|
|
|
if (lo->use_dio == !!arg)
|
|
|
|
return 0;
|
|
|
|
error = -EINVAL;
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2017-08-24 07:03:43 +00:00
|
|
|
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
|
|
|
|
{
|
2019-01-10 03:17:14 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2017-08-24 07:03:43 +00:00
|
|
|
if (lo->lo_state != Lo_bound)
|
|
|
|
return -ENXIO;
|
|
|
|
|
2020-03-10 13:12:30 +00:00
|
|
|
if (lo->lo_queue->limits.logical_block_size == arg)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
sync_blockdev(lo->lo_device);
|
2020-06-18 04:21:37 +00:00
|
|
|
invalidate_bdev(lo->lo_device);
|
2019-01-10 03:17:14 +00:00
|
|
|
|
2017-08-24 07:03:43 +00:00
|
|
|
blk_mq_freeze_queue(lo->lo_queue);
|
2024-06-17 06:04:32 +00:00
|
|
|
err = loop_reconfigure_limits(lo, arg);
|
2017-08-24 07:03:43 +00:00
|
|
|
loop_update_dio(lo);
|
|
|
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
|
|
|
|
2019-01-10 03:17:14 +00:00
|
|
|
return err;
|
2017-08-24 07:03:43 +00:00
|
|
|
}
|
|
|
|
|
2018-11-08 13:01:05 +00:00
|
|
|
static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-01-26 14:46:30 +00:00
|
|
|
err = mutex_lock_killable(&lo->lo_mutex);
|
2018-03-27 04:39:12 +00:00
|
|
|
if (err)
|
2018-11-08 13:01:05 +00:00
|
|
|
return err;
|
|
|
|
switch (cmd) {
|
|
|
|
case LOOP_SET_CAPACITY:
|
|
|
|
err = loop_set_capacity(lo);
|
|
|
|
break;
|
|
|
|
case LOOP_SET_DIRECT_IO:
|
|
|
|
err = loop_set_dio(lo, arg);
|
|
|
|
break;
|
|
|
|
case LOOP_SET_BLOCK_SIZE:
|
|
|
|
err = loop_set_block_size(lo, arg);
|
|
|
|
break;
|
|
|
|
default:
|
2021-10-19 07:56:39 +00:00
|
|
|
err = -EINVAL;
|
2018-11-08 13:01:05 +00:00
|
|
|
}
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
2018-11-08 13:01:05 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
|
2018-11-08 13:01:05 +00:00
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct loop_device *lo = bdev->bd_disk->private_data;
|
2020-05-13 13:38:43 +00:00
|
|
|
void __user *argp = (void __user *) arg;
|
2018-11-08 13:01:05 +00:00
|
|
|
int err;
|
2018-03-27 04:39:12 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (cmd) {
|
2020-05-13 13:38:45 +00:00
|
|
|
case LOOP_SET_FD: {
|
|
|
|
/*
|
|
|
|
* Legacy case - pass in a zeroed out struct loop_config with
|
|
|
|
* only the file descriptor set , which corresponds with the
|
|
|
|
* default parameters we'd have used otherwise.
|
|
|
|
*/
|
|
|
|
struct loop_config config;
|
|
|
|
|
|
|
|
memset(&config, 0, sizeof(config));
|
|
|
|
config.fd = arg;
|
|
|
|
|
|
|
|
return loop_configure(lo, mode, bdev, &config);
|
|
|
|
}
|
|
|
|
case LOOP_CONFIGURE: {
|
|
|
|
struct loop_config config;
|
|
|
|
|
|
|
|
if (copy_from_user(&config, argp, sizeof(config)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return loop_configure(lo, mode, bdev, &config);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
case LOOP_CHANGE_FD:
|
2018-11-08 13:01:11 +00:00
|
|
|
return loop_change_fd(lo, bdev, arg);
|
2005-04-16 22:20:36 +00:00
|
|
|
case LOOP_CLR_FD:
|
2018-11-08 13:01:07 +00:00
|
|
|
return loop_clr_fd(lo);
|
2005-04-16 22:20:36 +00:00
|
|
|
case LOOP_SET_STATUS:
|
2011-11-16 08:21:49 +00:00
|
|
|
err = -EPERM;
|
2023-06-08 11:02:55 +00:00
|
|
|
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
|
2020-05-13 13:38:43 +00:00
|
|
|
err = loop_set_status_old(lo, argp);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case LOOP_GET_STATUS:
|
2020-05-13 13:38:43 +00:00
|
|
|
return loop_get_status_old(lo, argp);
|
2005-04-16 22:20:36 +00:00
|
|
|
case LOOP_SET_STATUS64:
|
2011-11-16 08:21:49 +00:00
|
|
|
err = -EPERM;
|
2023-06-08 11:02:55 +00:00
|
|
|
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
|
2020-05-13 13:38:43 +00:00
|
|
|
err = loop_set_status64(lo, argp);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case LOOP_GET_STATUS64:
|
2020-05-13 13:38:43 +00:00
|
|
|
return loop_get_status64(lo, argp);
|
2018-11-08 13:01:05 +00:00
|
|
|
case LOOP_SET_CAPACITY:
|
2015-08-17 02:31:50 +00:00
|
|
|
case LOOP_SET_DIRECT_IO:
|
2017-08-24 07:03:43 +00:00
|
|
|
case LOOP_SET_BLOCK_SIZE:
|
2023-06-08 11:02:55 +00:00
|
|
|
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
|
2018-11-08 13:01:05 +00:00
|
|
|
return -EPERM;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
2018-11-08 13:01:05 +00:00
|
|
|
err = lo_simple_ioctl(lo, cmd, arg);
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
loop: fix circular locking in loop_clr_fd()
With CONFIG_PROVE_LOCKING enabled
$ losetup /dev/loop0 file
$ losetup -o 32256 /dev/loop1 /dev/loop0
$ losetup -d /dev/loop1
$ losetup -d /dev/loop0
triggers a [ INFO: possible circular locking dependency detected ]
I think this warning is a false positive.
Open/close on a loop device acquires bd_mutex of the device before
acquiring lo_ctl_mutex of the same device. For ioctl(LOOP_CLR_FD) after
acquiring lo_ctl_mutex, fput on the backing_file might acquire the bd_mutex of
a device, if backing file is a device and this is the last reference to the
file being dropped . But it is guaranteed that it is impossible to have a
circular list of backing devices.(say loop2->loop1->loop0->loop2 is not
possible), which guarantees that this can never deadlock.
So this warning should be suppressed. It is very difficult to annotate lockdep
not to warn here in the correct way. A simple way to silence lockdep could be
to mark the lo_ctl_mutex in ioctl to be a sub class, but this might mask some
other real bugs.
@@ -1164,7 +1164,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
Or actually marking the bd_mutex after lo_ctl_mutex as a sub class could be
a better solution.
Luckily it is easy to avoid calling fput on backing file with lo_ctl_mutex
held, so no lockdep annotation is required.
If you do not like the special handling of the lo_ctl_mutex just for the
LOOP_CLR_FD ioctl in lo_ioctl(), the mutex handling could be moved inside
each of the individual ioctl handlers and I could send you another patch.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2009-03-24 11:33:41 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-08-29 18:06:14 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
struct compat_loop_info {
|
|
|
|
compat_int_t lo_number; /* ioctl r/o */
|
|
|
|
compat_dev_t lo_device; /* ioctl r/o */
|
|
|
|
compat_ulong_t lo_inode; /* ioctl r/o */
|
|
|
|
compat_dev_t lo_rdevice; /* ioctl r/o */
|
|
|
|
compat_int_t lo_offset;
|
2022-03-29 20:18:15 +00:00
|
|
|
compat_int_t lo_encrypt_type; /* obsolete, ignored */
|
2006-08-29 18:06:14 +00:00
|
|
|
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
|
|
|
|
compat_int_t lo_flags; /* ioctl r/o */
|
|
|
|
char lo_name[LO_NAME_SIZE];
|
|
|
|
unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
|
|
|
|
compat_ulong_t lo_init[2];
|
|
|
|
char reserved[4];
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer 32-bit compatibility structure in userspace to 64-bit loop info
|
|
|
|
* - noinlined to reduce stack space usage in main part of driver
|
|
|
|
*/
|
|
|
|
static noinline int
|
2006-10-10 21:48:27 +00:00
|
|
|
loop_info64_from_compat(const struct compat_loop_info __user *arg,
|
2006-08-29 18:06:14 +00:00
|
|
|
struct loop_info64 *info64)
|
|
|
|
{
|
|
|
|
struct compat_loop_info info;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, arg, sizeof(info)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
memset(info64, 0, sizeof(*info64));
|
|
|
|
info64->lo_number = info.lo_number;
|
|
|
|
info64->lo_device = info.lo_device;
|
|
|
|
info64->lo_inode = info.lo_inode;
|
|
|
|
info64->lo_rdevice = info.lo_rdevice;
|
|
|
|
info64->lo_offset = info.lo_offset;
|
|
|
|
info64->lo_sizelimit = 0;
|
|
|
|
info64->lo_flags = info.lo_flags;
|
2021-10-19 07:56:39 +00:00
|
|
|
memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
|
2006-08-29 18:06:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer 64-bit loop info to 32-bit compatibility structure in userspace
|
|
|
|
* - noinlined to reduce stack space usage in main part of driver
|
|
|
|
*/
|
|
|
|
static noinline int
|
|
|
|
loop_info64_to_compat(const struct loop_info64 *info64,
|
|
|
|
struct compat_loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct compat_loop_info info;
|
|
|
|
|
|
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
info.lo_number = info64->lo_number;
|
|
|
|
info.lo_device = info64->lo_device;
|
|
|
|
info.lo_inode = info64->lo_inode;
|
|
|
|
info.lo_rdevice = info64->lo_rdevice;
|
|
|
|
info.lo_offset = info64->lo_offset;
|
|
|
|
info.lo_flags = info64->lo_flags;
|
2021-10-19 07:56:39 +00:00
|
|
|
memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
|
2006-08-29 18:06:14 +00:00
|
|
|
|
|
|
|
/* error in case values were truncated */
|
|
|
|
if (info.lo_device != info64->lo_device ||
|
|
|
|
info.lo_rdevice != info64->lo_rdevice ||
|
|
|
|
info.lo_inode != info64->lo_inode ||
|
2021-10-19 07:56:39 +00:00
|
|
|
info.lo_offset != info64->lo_offset)
|
2006-08-29 18:06:14 +00:00
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
if (copy_to_user(arg, &info, sizeof(info)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_set_status_compat(struct loop_device *lo,
|
|
|
|
const struct compat_loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info64 info64;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = loop_info64_from_compat(arg, &info64);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return loop_set_status(lo, &info64);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
loop_get_status_compat(struct loop_device *lo,
|
|
|
|
struct compat_loop_info __user *arg)
|
|
|
|
{
|
|
|
|
struct loop_info64 info64;
|
2018-04-06 16:57:03 +00:00
|
|
|
int err;
|
2006-08-29 18:06:14 +00:00
|
|
|
|
2018-11-08 13:01:08 +00:00
|
|
|
if (!arg)
|
2018-04-06 16:57:03 +00:00
|
|
|
return -EINVAL;
|
|
|
|
err = loop_get_status(lo, &info64);
|
2006-08-29 18:06:14 +00:00
|
|
|
if (!err)
|
|
|
|
err = loop_info64_to_compat(&info64, arg);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
|
2008-03-02 14:29:48 +00:00
|
|
|
unsigned int cmd, unsigned long arg)
|
2006-08-29 18:06:14 +00:00
|
|
|
{
|
2008-03-02 14:29:48 +00:00
|
|
|
struct loop_device *lo = bdev->bd_disk->private_data;
|
2006-08-29 18:06:14 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
switch(cmd) {
|
|
|
|
case LOOP_SET_STATUS:
|
2018-11-08 13:01:09 +00:00
|
|
|
err = loop_set_status_compat(lo,
|
|
|
|
(const struct compat_loop_info __user *)arg);
|
2006-08-29 18:06:14 +00:00
|
|
|
break;
|
|
|
|
case LOOP_GET_STATUS:
|
2018-11-08 13:01:08 +00:00
|
|
|
err = loop_get_status_compat(lo,
|
|
|
|
(struct compat_loop_info __user *)arg);
|
2006-08-29 18:06:14 +00:00
|
|
|
break;
|
loop: add ioctl to resize a loop device
Add the ability to 'resize' the loop device on the fly.
One practical application is a loop file with XFS filesystem, already
mounted: You can easily enlarge the file (append some bytes) and then call
ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
new size and you can use xfs_growfs later on, which will allow you to use
full capacity of the loop file without the need to unmount.
Test app:
#include <linux/fs.h>
#include <linux/loop.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define _GNU_SOURCE
#include <getopt.h>
char *me;
void usage(FILE *f)
{
fprintf(f, "%s [options] loop_dev [backend_file]\n"
"-s, --set new_size_in_bytes\n"
"\twhen backend_file is given, "
"it will be expanded too while keeping the original contents\n",
me);
}
struct option opts[] = {
{
.name = "set",
.has_arg = 1,
.flag = NULL,
.val = 's'
},
{
.name = "help",
.has_arg = 0,
.flag = NULL,
.val = 'h'
}
};
void err_size(char *name, __u64 old)
{
fprintf(stderr, "size must be larger than current %s (%llu)\n",
name, old);
}
int main(int argc, char *argv[])
{
int fd, err, c, i, bfd;
ssize_t ssz;
size_t sz;
__u64 old, new, append;
char a[BUFSIZ];
struct stat st;
FILE *out;
char *backend, *dev;
err = EINVAL;
out = stderr;
me = argv[0];
new = 0;
while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
switch (c) {
case 's':
errno = 0;
new = strtoull(optarg, NULL, 0);
if (errno) {
err = errno;
perror(argv[i]);
goto out;
}
break;
case 'h':
err = 0;
out = stdout;
goto err;
default:
perror(argv[i]);
goto err;
}
}
if (optind < argc)
dev = argv[optind++];
else
goto err;
fd = open(dev, O_RDONLY);
if (fd < 0) {
err = errno;
perror(dev);
goto out;
}
err = ioctl(fd, BLKGETSIZE64, &old);
if (err) {
err = errno;
perror("ioctl BLKGETSIZE64");
goto out;
}
if (!new) {
printf("%llu\n", old);
goto out;
}
if (new < old) {
err = EINVAL;
err_size(dev, old);
goto out;
}
if (optind < argc) {
backend = argv[optind++];
bfd = open(backend, O_WRONLY|O_APPEND);
if (bfd < 0) {
err = errno;
perror(backend);
goto out;
}
err = fstat(bfd, &st);
if (err) {
err = errno;
perror(backend);
goto out;
}
if (new < st.st_size) {
err = EINVAL;
err_size(backend, st.st_size);
goto out;
}
append = new - st.st_size;
sz = sizeof(a);
while (append > 0) {
if (append < sz)
sz = append;
ssz = write(bfd, a, sz);
if (ssz != sz) {
err = errno;
perror(backend);
goto out;
}
append -= sz;
}
err = fsync(bfd);
if (err) {
err = errno;
perror(backend);
goto out;
}
}
err = ioctl(fd, LOOP_SET_CAPACITY, new);
if (err) {
err = errno;
perror("ioctl LOOP_SET_CAPACITY");
}
goto out;
err:
usage(out);
out:
return err;
}
Signed-off-by: J. R. Okajima <hooanon05@yahoo.co.jp>
Signed-off-by: Tomas Matejicek <tomas@slax.org>
Cc: <util-linux-ng@vger.kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: <linux-api@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-03-31 22:23:43 +00:00
|
|
|
case LOOP_SET_CAPACITY:
|
2006-08-29 18:06:14 +00:00
|
|
|
case LOOP_CLR_FD:
|
|
|
|
case LOOP_GET_STATUS64:
|
|
|
|
case LOOP_SET_STATUS64:
|
2020-05-13 13:38:45 +00:00
|
|
|
case LOOP_CONFIGURE:
|
2006-08-29 18:06:14 +00:00
|
|
|
arg = (unsigned long) compat_ptr(arg);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2006-08-29 18:06:14 +00:00
|
|
|
case LOOP_SET_FD:
|
|
|
|
case LOOP_CHANGE_FD:
|
2018-07-02 23:03:46 +00:00
|
|
|
case LOOP_SET_BLOCK_SIZE:
|
2019-08-07 00:48:28 +00:00
|
|
|
case LOOP_SET_DIRECT_IO:
|
2008-03-02 14:29:48 +00:00
|
|
|
err = lo_ioctl(bdev, mode, cmd, arg);
|
2006-08-29 18:06:14 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -ENOIOCTLCMD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
static int lo_open(struct gendisk *disk, blk_mode_t mode)
|
|
|
|
{
|
|
|
|
struct loop_device *lo = disk->private_data;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mutex_lock_killable(&lo->lo_mutex);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown)
|
|
|
|
err = -ENXIO;
|
|
|
|
mutex_unlock(&lo->lo_mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:37 +00:00
|
|
|
static void lo_release(struct gendisk *disk)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-01-26 14:46:30 +00:00
|
|
|
struct loop_device *lo = disk->private_data;
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
bool need_clear = false;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-03-30 05:29:16 +00:00
|
|
|
if (disk_openers(disk) > 0)
|
|
|
|
return;
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
/*
|
|
|
|
* Clear the backing device information if this is the last close of
|
|
|
|
* a device that's been marked for auto clear, or on which LOOP_CLR_FD
|
|
|
|
* has been called.
|
|
|
|
*/
|
2008-12-12 13:48:27 +00:00
|
|
|
|
2022-03-30 05:29:16 +00:00
|
|
|
mutex_lock(&lo->lo_mutex);
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR))
|
2018-11-08 13:01:06 +00:00
|
|
|
lo->lo_state = Lo_rundown;
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
|
|
|
|
need_clear = (lo->lo_state == Lo_rundown);
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
|
|
|
|
if (need_clear)
|
|
|
|
__loop_clr_fd(lo);
|
2018-01-06 00:26:00 +00:00
|
|
|
}
|
|
|
|
|
2022-03-30 05:29:13 +00:00
|
|
|
static void lo_free_disk(struct gendisk *disk)
|
|
|
|
{
|
|
|
|
struct loop_device *lo = disk->private_data;
|
|
|
|
|
2022-03-30 05:29:17 +00:00
|
|
|
if (lo->workqueue)
|
|
|
|
destroy_workqueue(lo->workqueue);
|
|
|
|
loop_free_idle_workers(lo, true);
|
2022-12-20 18:45:19 +00:00
|
|
|
timer_shutdown_sync(&lo->timer);
|
2022-03-30 05:29:13 +00:00
|
|
|
mutex_destroy(&lo->lo_mutex);
|
|
|
|
kfree(lo);
|
|
|
|
}
|
|
|
|
|
2009-09-22 00:01:13 +00:00
|
|
|
static const struct block_device_operations lo_fops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.owner = THIS_MODULE,
|
loop: Fix a race between loop detach and loop open
1. Userspace sends the command "losetup -d" which uses the open() call
to open the device
2. Kernel receives the ioctl command "LOOP_CLR_FD" which calls the
function loop_clr_fd()
3. If LOOP_CLR_FD is the first command received at the time, then the
AUTOCLEAR flag is not set and deletion of the
loop device proceeds ahead and scans the partitions (drop/add
partitions)
if (disk_openers(lo->lo_disk) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
loop_global_unlock(lo, true);
return 0;
}
4. Before scanning partitions, it will check to see if any partition of
the loop device is currently opened
5. If any partition is opened, then it will return EBUSY:
if (disk->open_partitions)
return -EBUSY;
6. So, after receiving the "LOOP_CLR_FD" command and just before the above
check for open_partitions, if any other command
(like blkid) opens any partition of the loop device, then the partition
scan will not proceed and EBUSY is returned as shown in above code
7. But in "__loop_clr_fd()", this EBUSY error is not propagated
8. We have noticed that this is causing the partitions of the loop to
remain stale even after the loop device is detached resulting in the
IO errors on the partitions
Fix:
Defer the detach of loop device to release function, which is called when
the last close happens, by setting the lo_flags to LO_FLAGS_AUTOCLEAR at
the time of detach i.e in loop_clr_fd() function.
Test case involves the following two scripts:
script1.sh:
while [ 1 ];
do
losetup -P -f /home/opt/looptest/test10.img
blkid /dev/loop0p1
done
script2.sh:
while [ 1 ];
do
losetup -d /dev/loop0
done
Without fix, the following IO errors have been observed:
kernel: __loop_clr_fd: partition scan of loop0 failed (rc=-16)
kernel: I/O error, dev loop0, sector 20971392 op 0x0:(READ) flags 0x80700
phys_seg 1 prio class 0
kernel: I/O error, dev loop0, sector 108868 op 0x0:(READ) flags 0x0
phys_seg 1 prio class 0
kernel: Buffer I/O error on dev loop0p1, logical block 27201, async page
read
Signed-off-by: Gulam Mohamed <gulam.mohamed@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240618164042.343777-1-gulam.mohamed@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-06-18 16:40:42 +00:00
|
|
|
.open = lo_open,
|
2008-03-02 14:29:48 +00:00
|
|
|
.release = lo_release,
|
|
|
|
.ioctl = lo_ioctl,
|
2006-08-29 18:06:14 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-03-02 14:29:48 +00:00
|
|
|
.compat_ioctl = lo_compat_ioctl,
|
2006-08-29 18:06:14 +00:00
|
|
|
#endif
|
2022-03-30 05:29:13 +00:00
|
|
|
.free_disk = lo_free_disk,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And now the modules code and kernel interface.
|
|
|
|
*/
|
2022-12-08 21:29:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If max_loop is specified, create that many devices upfront.
|
|
|
|
* This also becomes a hard limit. If max_loop is not specified,
|
loop: do not enforce max_loop hard limit by (new) default
Problem:
The max_loop parameter is used for 2 different purposes:
1) initial number of loop devices to pre-create on init
2) maximum number of loop devices to add on access/open()
Historically, its default value (zero) caused 1) to create non-zero
number of devices (CONFIG_BLK_DEV_LOOP_MIN_COUNT), and no hard limit on
2) to add devices with autoloading.
However, the default value changed in commit 85c50197716c ("loop: Fix
the max_loop commandline argument treatment when it is set to 0") to
CONFIG_BLK_DEV_LOOP_MIN_COUNT, for max_loop=0 not to pre-create devices.
That does improve 1), but unfortunately it breaks 2), as the default
behavior changed from no-limit to hard-limit.
Example:
For example, this userspace code broke for N >= CONFIG, if the user
relied on the default value 0 for max_loop:
mknod("/dev/loopN");
open("/dev/loopN"); // now fails with ENXIO
Though affected users may "fix" it with (loop.)max_loop=0, this means to
require a kernel parameter change on stable kernel update (that commit
Fixes: an old commit in stable).
Solution:
The original semantics for the default value in 2) can be applied if the
parameter is not set (ie, default behavior).
This still keeps the intended function in 1) and 2) if set, and that
commit's intended improvement in 1) if max_loop=0.
Before 85c50197716c:
- default: 1) CONFIG devices 2) no limit
- max_loop=0: 1) CONFIG devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
After 85c50197716c:
- default: 1) CONFIG devices 2) CONFIG limit (*)
- max_loop=0: 1) 0 devices (*) 2) no limit
- max_loop=X: 1) X devices 2) X limit
This commit:
- default: 1) CONFIG devices 2) no limit (*)
- max_loop=0: 1) 0 devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
Future:
The issue/regression from that commit only affects code under the
CONFIG_BLOCK_LEGACY_AUTOLOAD deprecation guard, thus the fix too is
contained under it.
Once that deprecated functionality/code is removed, the purpose 2) of
max_loop (hard limit) is no longer in use, so the module parameter
description can be changed then.
Tests:
Linux 6.4-rc7
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
CONFIG_BLOCK_LEGACY_AUTOLOAD=y
- default (original)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- default (patched)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
#
- max_loop=0 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
#
- max_loop=8 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- max_loop=0 (patched; CONFIG_BLOCK_LEGACY_AUTOLOAD is not set)
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
open: /dev/loop8: No such device or address
Fixes: 85c50197716c ("loop: Fix the max_loop commandline argument treatment when it is set to 0")
Signed-off-by: Mauricio Faria de Oliveira <mfo@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230720143033.841001-3-mfo@canonical.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-07-20 14:30:33 +00:00
|
|
|
* the default isn't a hard limit (as before commit 85c50197716c
|
|
|
|
* changed the default value from 0 for max_loop=0 reasons), just
|
2022-12-08 21:29:01 +00:00
|
|
|
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
|
|
|
|
* init time. Loop devices can be requested on-demand with the
|
|
|
|
* /dev/loop-control interface, or be instantiated by accessing
|
|
|
|
* a 'dead' device node.
|
|
|
|
*/
|
|
|
|
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
|
loop: do not enforce max_loop hard limit by (new) default
Problem:
The max_loop parameter is used for 2 different purposes:
1) initial number of loop devices to pre-create on init
2) maximum number of loop devices to add on access/open()
Historically, its default value (zero) caused 1) to create non-zero
number of devices (CONFIG_BLK_DEV_LOOP_MIN_COUNT), and no hard limit on
2) to add devices with autoloading.
However, the default value changed in commit 85c50197716c ("loop: Fix
the max_loop commandline argument treatment when it is set to 0") to
CONFIG_BLK_DEV_LOOP_MIN_COUNT, for max_loop=0 not to pre-create devices.
That does improve 1), but unfortunately it breaks 2), as the default
behavior changed from no-limit to hard-limit.
Example:
For example, this userspace code broke for N >= CONFIG, if the user
relied on the default value 0 for max_loop:
mknod("/dev/loopN");
open("/dev/loopN"); // now fails with ENXIO
Though affected users may "fix" it with (loop.)max_loop=0, this means to
require a kernel parameter change on stable kernel update (that commit
Fixes: an old commit in stable).
Solution:
The original semantics for the default value in 2) can be applied if the
parameter is not set (ie, default behavior).
This still keeps the intended function in 1) and 2) if set, and that
commit's intended improvement in 1) if max_loop=0.
Before 85c50197716c:
- default: 1) CONFIG devices 2) no limit
- max_loop=0: 1) CONFIG devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
After 85c50197716c:
- default: 1) CONFIG devices 2) CONFIG limit (*)
- max_loop=0: 1) 0 devices (*) 2) no limit
- max_loop=X: 1) X devices 2) X limit
This commit:
- default: 1) CONFIG devices 2) no limit (*)
- max_loop=0: 1) 0 devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
Future:
The issue/regression from that commit only affects code under the
CONFIG_BLOCK_LEGACY_AUTOLOAD deprecation guard, thus the fix too is
contained under it.
Once that deprecated functionality/code is removed, the purpose 2) of
max_loop (hard limit) is no longer in use, so the module parameter
description can be changed then.
Tests:
Linux 6.4-rc7
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
CONFIG_BLOCK_LEGACY_AUTOLOAD=y
- default (original)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- default (patched)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
#
- max_loop=0 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
#
- max_loop=8 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- max_loop=0 (patched; CONFIG_BLOCK_LEGACY_AUTOLOAD is not set)
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
open: /dev/loop8: No such device or address
Fixes: 85c50197716c ("loop: Fix the max_loop commandline argument treatment when it is set to 0")
Signed-off-by: Mauricio Faria de Oliveira <mfo@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230720143033.841001-3-mfo@canonical.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-07-20 14:30:33 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
|
|
|
static bool max_loop_specified;
|
|
|
|
|
|
|
|
static int max_loop_param_set_int(const char *val,
|
|
|
|
const struct kernel_param *kp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = param_set_int(val, kp);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
max_loop_specified = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct kernel_param_ops max_loop_param_ops = {
|
|
|
|
.set = max_loop_param_set_int,
|
|
|
|
.get = param_get_int,
|
|
|
|
};
|
|
|
|
|
|
|
|
module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
|
2007-06-08 20:46:44 +00:00
|
|
|
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
|
loop: do not enforce max_loop hard limit by (new) default
Problem:
The max_loop parameter is used for 2 different purposes:
1) initial number of loop devices to pre-create on init
2) maximum number of loop devices to add on access/open()
Historically, its default value (zero) caused 1) to create non-zero
number of devices (CONFIG_BLK_DEV_LOOP_MIN_COUNT), and no hard limit on
2) to add devices with autoloading.
However, the default value changed in commit 85c50197716c ("loop: Fix
the max_loop commandline argument treatment when it is set to 0") to
CONFIG_BLK_DEV_LOOP_MIN_COUNT, for max_loop=0 not to pre-create devices.
That does improve 1), but unfortunately it breaks 2), as the default
behavior changed from no-limit to hard-limit.
Example:
For example, this userspace code broke for N >= CONFIG, if the user
relied on the default value 0 for max_loop:
mknod("/dev/loopN");
open("/dev/loopN"); // now fails with ENXIO
Though affected users may "fix" it with (loop.)max_loop=0, this means to
require a kernel parameter change on stable kernel update (that commit
Fixes: an old commit in stable).
Solution:
The original semantics for the default value in 2) can be applied if the
parameter is not set (ie, default behavior).
This still keeps the intended function in 1) and 2) if set, and that
commit's intended improvement in 1) if max_loop=0.
Before 85c50197716c:
- default: 1) CONFIG devices 2) no limit
- max_loop=0: 1) CONFIG devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
After 85c50197716c:
- default: 1) CONFIG devices 2) CONFIG limit (*)
- max_loop=0: 1) 0 devices (*) 2) no limit
- max_loop=X: 1) X devices 2) X limit
This commit:
- default: 1) CONFIG devices 2) no limit (*)
- max_loop=0: 1) 0 devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
Future:
The issue/regression from that commit only affects code under the
CONFIG_BLOCK_LEGACY_AUTOLOAD deprecation guard, thus the fix too is
contained under it.
Once that deprecated functionality/code is removed, the purpose 2) of
max_loop (hard limit) is no longer in use, so the module parameter
description can be changed then.
Tests:
Linux 6.4-rc7
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
CONFIG_BLOCK_LEGACY_AUTOLOAD=y
- default (original)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- default (patched)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
#
- max_loop=0 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
#
- max_loop=8 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- max_loop=0 (patched; CONFIG_BLOCK_LEGACY_AUTOLOAD is not set)
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
open: /dev/loop8: No such device or address
Fixes: 85c50197716c ("loop: Fix the max_loop commandline argument treatment when it is set to 0")
Signed-off-by: Mauricio Faria de Oliveira <mfo@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230720143033.841001-3-mfo@canonical.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-07-20 14:30:33 +00:00
|
|
|
#else
|
|
|
|
module_param(max_loop, int, 0444);
|
|
|
|
MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
|
|
|
|
#endif
|
|
|
|
|
2018-05-24 19:38:59 +00:00
|
|
|
module_param(max_part, int, 0444);
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
|
2022-02-15 21:33:10 +00:00
|
|
|
|
|
|
|
static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
|
|
|
|
|
|
|
|
static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
|
|
|
|
{
|
2023-01-30 21:13:47 +00:00
|
|
|
int qd, ret;
|
2022-02-15 21:33:10 +00:00
|
|
|
|
2023-01-30 21:13:47 +00:00
|
|
|
ret = kstrtoint(s, 0, &qd);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (qd < 1)
|
|
|
|
return -EINVAL;
|
|
|
|
hw_queue_depth = qd;
|
|
|
|
return 0;
|
2022-02-15 21:33:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
|
|
|
|
.set = loop_set_hw_queue_depth,
|
|
|
|
.get = param_get_int,
|
|
|
|
};
|
|
|
|
|
|
|
|
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
|
2023-01-30 21:13:47 +00:00
|
|
|
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
|
2022-02-15 21:33:10 +00:00
|
|
|
|
2024-06-03 00:15:09 +00:00
|
|
|
MODULE_DESCRIPTION("Loopback device support");
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
|
|
|
|
|
2017-06-03 07:38:05 +00:00
|
|
|
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
const struct blk_mq_queue_data *bd)
|
|
|
|
{
|
2018-04-13 22:24:29 +00:00
|
|
|
struct request *rq = bd->rq;
|
|
|
|
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
|
|
|
struct loop_device *lo = rq->q->queuedata;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2018-04-13 22:24:29 +00:00
|
|
|
blk_mq_start_request(rq);
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2015-05-05 11:49:54 +00:00
|
|
|
if (lo->lo_state != Lo_bound)
|
2017-06-03 07:38:05 +00:00
|
|
|
return BLK_STS_IOERR;
|
2015-05-05 11:49:54 +00:00
|
|
|
|
2018-04-13 22:24:29 +00:00
|
|
|
switch (req_op(rq)) {
|
2016-08-04 14:10:00 +00:00
|
|
|
case REQ_OP_FLUSH:
|
|
|
|
case REQ_OP_DISCARD:
|
2017-04-05 17:21:15 +00:00
|
|
|
case REQ_OP_WRITE_ZEROES:
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
cmd->use_aio = false;
|
2016-08-04 14:10:00 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
cmd->use_aio = lo->use_dio;
|
|
|
|
break;
|
|
|
|
}
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
|
2017-09-25 19:07:22 +00:00
|
|
|
/* always use the first bio's css */
|
2021-06-29 02:38:21 +00:00
|
|
|
cmd->blkcg_css = NULL;
|
|
|
|
cmd->memcg_css = NULL;
|
2017-09-26 18:02:12 +00:00
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
2022-04-20 04:27:17 +00:00
|
|
|
if (rq->bio) {
|
|
|
|
cmd->blkcg_css = bio_blkcg_css(rq->bio);
|
2021-06-29 02:38:21 +00:00
|
|
|
#ifdef CONFIG_MEMCG
|
2022-04-20 04:27:17 +00:00
|
|
|
if (cmd->blkcg_css) {
|
|
|
|
cmd->memcg_css =
|
|
|
|
cgroup_get_e_css(cmd->blkcg_css->cgroup,
|
|
|
|
&memory_cgrp_subsys);
|
|
|
|
}
|
2021-06-29 02:38:21 +00:00
|
|
|
#endif
|
|
|
|
}
|
2017-09-25 19:07:22 +00:00
|
|
|
#endif
|
2021-06-29 02:38:15 +00:00
|
|
|
loop_queue_work(lo, cmd);
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2017-06-03 07:38:05 +00:00
|
|
|
return BLK_STS_OK;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void loop_handle_cmd(struct loop_cmd *cmd)
|
|
|
|
{
|
2023-03-14 18:21:54 +00:00
|
|
|
struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
|
|
|
|
struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
|
2018-04-13 22:24:29 +00:00
|
|
|
struct request *rq = blk_mq_rq_from_pdu(cmd);
|
|
|
|
const bool write = op_is_write(req_op(rq));
|
|
|
|
struct loop_device *lo = rq->q->queuedata;
|
2015-09-27 19:01:50 +00:00
|
|
|
int ret = 0;
|
2021-06-29 02:38:21 +00:00
|
|
|
struct mem_cgroup *old_memcg = NULL;
|
2023-03-14 18:21:54 +00:00
|
|
|
const bool use_aio = cmd->use_aio;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2015-09-27 19:01:50 +00:00
|
|
|
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
|
|
|
|
ret = -EIO;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
goto failed;
|
2015-09-27 19:01:50 +00:00
|
|
|
}
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2023-03-14 18:21:54 +00:00
|
|
|
if (cmd_blkcg_css)
|
|
|
|
kthread_associate_blkcg(cmd_blkcg_css);
|
|
|
|
if (cmd_memcg_css)
|
2021-06-29 02:38:21 +00:00
|
|
|
old_memcg = set_active_memcg(
|
2023-03-14 18:21:54 +00:00
|
|
|
mem_cgroup_from_css(cmd_memcg_css));
|
2021-06-29 02:38:21 +00:00
|
|
|
|
2023-03-14 18:21:54 +00:00
|
|
|
/*
|
|
|
|
* do_req_filebacked() may call blk_mq_complete_request() synchronously
|
|
|
|
* or asynchronously if using aio. Hence, do not touch 'cmd' after
|
|
|
|
* do_req_filebacked() has returned unless we are sure that 'cmd' has
|
|
|
|
* not yet been completed.
|
|
|
|
*/
|
2018-04-13 22:24:29 +00:00
|
|
|
ret = do_req_filebacked(lo, rq);
|
2021-06-29 02:38:21 +00:00
|
|
|
|
2023-03-14 18:21:54 +00:00
|
|
|
if (cmd_blkcg_css)
|
2021-06-29 02:38:21 +00:00
|
|
|
kthread_associate_blkcg(NULL);
|
|
|
|
|
2023-03-14 18:21:54 +00:00
|
|
|
if (cmd_memcg_css) {
|
2021-06-29 02:38:21 +00:00
|
|
|
set_active_memcg(old_memcg);
|
2023-03-14 18:21:54 +00:00
|
|
|
css_put(cmd_memcg_css);
|
2021-06-29 02:38:21 +00:00
|
|
|
}
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
failed:
|
block: loop: support DIO & AIO
There are at least 3 advantages to use direct I/O and AIO on
read/write loop's backing file:
1) double cache can be avoided, then memory usage gets
decreased a lot
2) not like user space direct I/O, there isn't cost of
pinning pages
3) avoid context switch for obtaining good throughput
- in buffered file read, random I/O top throughput is often obtained
only if they are submitted concurrently from lots of tasks; but for
sequential I/O, most of times they can be hit from page cache, so
concurrent submissions often introduce unnecessary context switch
and can't improve throughput much. There was such discussion[1]
to use non-blocking I/O to improve the problem for application.
- with direct I/O and AIO, concurrent submissions can be
avoided and random read throughput can't be affected meantime
xfstests(-g auto, ext4) is basically passed when running with
direct I/O(aio), one exception is generic/232, but it failed in
loop buffered I/O(4.2-rc6-next-20150814) too.
Follows the fio test result for performance purpose:
4 jobs fio test inside ext4 file system over loop block
1) How to run
- KVM: 4 VCPUs, 2G RAM
- linux kernel: 4.2-rc6-next-20150814(base) with the patchset
- the loop block is over one image on SSD.
- linux psync, 4 jobs, size 1500M, ext4 over loop block
- test result: IOPS from fio output
2) Throughput(IOPS) becomes a bit better with direct I/O(aio)
-------------------------------------------------------------
test cases |randread |read |randwrite |write |
-------------------------------------------------------------
base |8015 |113811 |67442 |106978
-------------------------------------------------------------
base+loop aio |8136 |125040 |67811 |111376
-------------------------------------------------------------
- somehow, it should be caused by more page cache avaiable for
application or one extra page copy is avoided in case of direct I/O
3) context switch
- context switch decreased by ~50% with loop direct I/O(aio)
compared with loop buffered I/O(4.2-rc6-next-20150814)
4) memory usage from /proc/meminfo
-------------------------------------------------------------
| Buffers | Cached
-------------------------------------------------------------
base | > 760MB | ~950MB
-------------------------------------------------------------
base+loop direct I/O(aio) | < 5MB | ~1.6GB
-------------------------------------------------------------
- so there are much more page caches available for application with
direct I/O
[1] https://lwn.net/Articles/612483/
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
2015-08-17 02:31:51 +00:00
|
|
|
/* complete non-aio request */
|
2023-03-14 18:21:54 +00:00
|
|
|
if (!use_aio || ret) {
|
2020-04-03 14:43:03 +00:00
|
|
|
if (ret == -EOPNOTSUPP)
|
|
|
|
cmd->ret = ret;
|
|
|
|
else
|
|
|
|
cmd->ret = ret ? -EIO : 0;
|
2020-06-11 06:44:47 +00:00
|
|
|
if (likely(!blk_should_fake_timeout(rq->q)))
|
|
|
|
blk_mq_complete_request(rq);
|
2017-04-20 14:03:02 +00:00
|
|
|
}
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
}
|
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
static void loop_process_work(struct loop_worker *worker,
|
|
|
|
struct list_head *cmd_list, struct loop_device *lo)
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
{
|
2021-06-29 02:38:15 +00:00
|
|
|
int orig_flags = current->flags;
|
|
|
|
struct loop_cmd *cmd;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
|
|
|
|
spin_lock_irq(&lo->lo_work_lock);
|
|
|
|
while (!list_empty(cmd_list)) {
|
|
|
|
cmd = container_of(
|
|
|
|
cmd_list->next, struct loop_cmd, list_entry);
|
|
|
|
list_del(cmd_list->next);
|
|
|
|
spin_unlock_irq(&lo->lo_work_lock);
|
|
|
|
|
|
|
|
loop_handle_cmd(cmd);
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
spin_lock_irq(&lo->lo_work_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only add to the idle list if there are no pending cmds
|
|
|
|
* *and* the worker will not run again which ensures that it
|
|
|
|
* is safe to free any worker on the idle list
|
|
|
|
*/
|
|
|
|
if (worker && !work_pending(&worker->work)) {
|
|
|
|
worker->last_ran_at = jiffies;
|
|
|
|
list_add_tail(&worker->idle_list, &lo->idle_worker_list);
|
|
|
|
loop_set_timer(lo);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&lo->lo_work_lock);
|
|
|
|
current->flags = orig_flags;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
}
|
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
static void loop_workfn(struct work_struct *work)
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
{
|
2021-06-29 02:38:15 +00:00
|
|
|
struct loop_worker *worker =
|
|
|
|
container_of(work, struct loop_worker, work);
|
|
|
|
loop_process_work(worker, &worker->cmd_list, worker->lo);
|
|
|
|
}
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
|
2021-06-29 02:38:15 +00:00
|
|
|
static void loop_rootcg_workfn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct loop_device *lo =
|
|
|
|
container_of(work, struct loop_device, rootcg_work);
|
|
|
|
loop_process_work(NULL, &lo->rootcg_cmd_list, lo);
|
|
|
|
}
|
|
|
|
|
2017-03-30 20:39:16 +00:00
|
|
|
static const struct blk_mq_ops loop_mq_ops = {
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
.queue_rq = loop_queue_rq,
|
2017-04-20 14:03:02 +00:00
|
|
|
.complete = lo_complete_rq,
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
};
|
|
|
|
|
2021-06-23 14:59:02 +00:00
|
|
|
static int loop_add(int i)
|
2007-05-08 07:28:20 +00:00
|
|
|
{
|
2024-02-13 07:34:24 +00:00
|
|
|
struct queue_limits lim = {
|
|
|
|
/*
|
|
|
|
* Random number picked from the historic block max_sectors cap.
|
|
|
|
*/
|
|
|
|
.max_hw_sectors = 2560u,
|
|
|
|
};
|
2007-05-08 07:28:20 +00:00
|
|
|
struct loop_device *lo;
|
|
|
|
struct gendisk *disk;
|
2011-07-31 20:08:04 +00:00
|
|
|
int err;
|
2007-05-08 07:28:20 +00:00
|
|
|
|
2012-07-14 22:39:58 +00:00
|
|
|
err = -ENOMEM;
|
2007-05-08 07:28:20 +00:00
|
|
|
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
|
2012-07-14 22:39:58 +00:00
|
|
|
if (!lo)
|
2007-05-08 07:28:20 +00:00
|
|
|
goto out;
|
2022-03-30 05:29:09 +00:00
|
|
|
lo->worker_tree = RB_ROOT;
|
|
|
|
INIT_LIST_HEAD(&lo->idle_worker_list);
|
|
|
|
timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE);
|
2013-10-15 20:14:38 +00:00
|
|
|
lo->lo_state = Lo_unbound;
|
|
|
|
|
2021-06-23 14:59:05 +00:00
|
|
|
err = mutex_lock_killable(&loop_ctl_mutex);
|
|
|
|
if (err)
|
|
|
|
goto out_free_dev;
|
|
|
|
|
2013-02-28 01:03:58 +00:00
|
|
|
/* allocate id, if @id >= 0, we're requesting that specific id */
|
2011-07-31 20:08:04 +00:00
|
|
|
if (i >= 0) {
|
2013-02-28 01:03:58 +00:00
|
|
|
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
|
|
|
|
if (err == -ENOSPC)
|
2011-07-31 20:08:04 +00:00
|
|
|
err = -EEXIST;
|
|
|
|
} else {
|
2013-02-28 01:03:58 +00:00
|
|
|
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
|
2011-07-31 20:08:04 +00:00
|
|
|
}
|
2021-09-02 00:07:35 +00:00
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
2011-07-31 20:08:04 +00:00
|
|
|
if (err < 0)
|
2021-09-02 00:07:35 +00:00
|
|
|
goto out_free_dev;
|
2013-02-28 01:03:58 +00:00
|
|
|
i = err;
|
2007-05-08 07:28:20 +00:00
|
|
|
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
lo->tag_set.ops = &loop_mq_ops;
|
|
|
|
lo->tag_set.nr_hw_queues = 1;
|
2022-02-15 21:33:10 +00:00
|
|
|
lo->tag_set.queue_depth = hw_queue_depth;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
lo->tag_set.numa_node = NUMA_NO_NODE;
|
|
|
|
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
|
2021-08-05 17:42:00 +00:00
|
|
|
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
|
|
|
|
BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
lo->tag_set.driver_data = lo;
|
|
|
|
|
|
|
|
err = blk_mq_alloc_tag_set(&lo->tag_set);
|
|
|
|
if (err)
|
2013-10-14 16:12:24 +00:00
|
|
|
goto out_free_idr;
|
2007-05-08 07:28:20 +00:00
|
|
|
|
2024-02-13 07:34:24 +00:00
|
|
|
disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo);
|
2021-06-02 06:53:33 +00:00
|
|
|
if (IS_ERR(disk)) {
|
|
|
|
err = PTR_ERR(disk);
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
goto out_cleanup_tags;
|
|
|
|
}
|
2021-06-02 06:53:33 +00:00
|
|
|
lo->lo_queue = lo->lo_disk->queue;
|
2013-10-15 20:14:38 +00:00
|
|
|
|
loop: always allow userspace partitions and optionally support automatic scanning
Automatic partition scanning can be requested individually per loop
device during its setup by setting LO_FLAGS_PARTSCAN. By default, no
partition tables are scanned.
Userspace can now always add and remove partitions from all loop
devices, regardless if the in-kernel partition scanner is enabled or
not.
The needed partition minor numbers are allocated from the extended
minors space, the main loop device numbers will continue to match the
loop minors, regardless of the number of partitions used.
# grep . /sys/class/block/loop1/loop/*
/sys/block/loop1/loop/autoclear:0
/sys/block/loop1/loop/backing_file:/home/kay/data/stuff/part.img
/sys/block/loop1/loop/offset:0
/sys/block/loop1/loop/partscan:1
/sys/block/loop1/loop/sizelimit:0
# ls -l /dev/loop*
brw-rw---- 1 root disk 7, 0 Aug 14 20:22 /dev/loop0
brw-rw---- 1 root disk 7, 1 Aug 14 20:23 /dev/loop1
brw-rw---- 1 root disk 259, 0 Aug 14 20:23 /dev/loop1p1
brw-rw---- 1 root disk 259, 1 Aug 14 20:23 /dev/loop1p2
brw-rw---- 1 root disk 7, 99 Aug 14 20:23 /dev/loop99
brw-rw---- 1 root disk 259, 2 Aug 14 20:23 /dev/loop99p1
brw-rw---- 1 root disk 259, 3 Aug 14 20:23 /dev/loop99p2
crw------T 1 root root 10, 237 Aug 14 20:22 /dev/loop-control
Cc: Karel Zak <kzak@redhat.com>
Cc: Davidlohr Bueso <dave@gnu.org>
Acked-By: Tejun Heo <tj@kernel.org>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-08-23 18:12:04 +00:00
|
|
|
/*
|
|
|
|
* Disable partition scanning by default. The in-kernel partition
|
|
|
|
* scanning can be requested individually per-device during its
|
|
|
|
* setup. Userspace can always add and remove partitions from all
|
|
|
|
* devices. The needed partition minors are allocated from the
|
|
|
|
* extended minor space, the main loop device numbers will continue
|
|
|
|
* to match the loop minors, regardless of the number of partitions
|
|
|
|
* used.
|
|
|
|
*
|
|
|
|
* If max_part is given, partition scanning is globally enabled for
|
|
|
|
* all loop devices. The minors for the main loop devices will be
|
|
|
|
* multiples of max_part.
|
|
|
|
*
|
|
|
|
* Note: Global-for-all-devices, set-only-at-init, read-only module
|
|
|
|
* parameteters like 'max_loop' and 'max_part' make things needlessly
|
|
|
|
* complicated, are too static, inflexible and may surprise
|
|
|
|
* userspace tools. Parameters like this in general should be avoided.
|
|
|
|
*/
|
|
|
|
if (!part_shift)
|
2022-05-27 05:58:06 +00:00
|
|
|
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
2021-01-26 14:46:30 +00:00
|
|
|
mutex_init(&lo->lo_mutex);
|
2007-05-08 07:28:20 +00:00
|
|
|
lo->lo_number = i;
|
|
|
|
spin_lock_init(&lo->lo_lock);
|
2021-06-29 02:38:15 +00:00
|
|
|
spin_lock_init(&lo->lo_work_lock);
|
2022-03-30 05:29:17 +00:00
|
|
|
INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn);
|
|
|
|
INIT_LIST_HEAD(&lo->rootcg_cmd_list);
|
2007-05-08 07:28:20 +00:00
|
|
|
disk->major = LOOP_MAJOR;
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
disk->first_minor = i << part_shift;
|
2021-06-02 06:53:33 +00:00
|
|
|
disk->minors = 1 << part_shift;
|
2007-05-08 07:28:20 +00:00
|
|
|
disk->fops = &lo_fops;
|
|
|
|
disk->private_data = lo;
|
|
|
|
disk->queue = lo->lo_queue;
|
2021-07-12 23:05:30 +00:00
|
|
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
|
|
|
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
|
2007-05-08 07:28:20 +00:00
|
|
|
sprintf(disk->disk_name, "loop%d", i);
|
2021-09-02 00:07:35 +00:00
|
|
|
/* Make this loop device reachable from pathname. */
|
2021-09-27 21:59:57 +00:00
|
|
|
err = add_disk(disk);
|
|
|
|
if (err)
|
|
|
|
goto out_cleanup_disk;
|
|
|
|
|
2021-09-02 00:07:35 +00:00
|
|
|
/* Show this loop device. */
|
|
|
|
mutex_lock(&loop_ctl_mutex);
|
|
|
|
lo->idr_visible = true;
|
2021-06-23 14:59:05 +00:00
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
2021-09-27 21:59:57 +00:00
|
|
|
|
2021-06-23 14:59:05 +00:00
|
|
|
return i;
|
2007-05-08 07:28:20 +00:00
|
|
|
|
2021-09-27 21:59:57 +00:00
|
|
|
out_cleanup_disk:
|
2022-06-19 06:05:52 +00:00
|
|
|
put_disk(disk);
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
out_cleanup_tags:
|
|
|
|
blk_mq_free_tag_set(&lo->tag_set);
|
2013-10-14 16:12:24 +00:00
|
|
|
out_free_idr:
|
2021-09-02 00:07:35 +00:00
|
|
|
mutex_lock(&loop_ctl_mutex);
|
2013-10-14 16:12:24 +00:00
|
|
|
idr_remove(&loop_index_idr, i);
|
2021-06-23 14:59:05 +00:00
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
2007-05-08 07:28:20 +00:00
|
|
|
out_free_dev:
|
|
|
|
kfree(lo);
|
|
|
|
out:
|
2011-07-31 20:08:04 +00:00
|
|
|
return err;
|
2007-05-08 07:28:20 +00:00
|
|
|
}
|
|
|
|
|
2011-07-31 20:08:04 +00:00
|
|
|
static void loop_remove(struct loop_device *lo)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-09-02 00:07:35 +00:00
|
|
|
/* Make this loop device unreachable from pathname. */
|
2015-04-27 04:12:22 +00:00
|
|
|
del_gendisk(lo->lo_disk);
|
block: loop: improve performance via blk-mq
The conversion is a bit straightforward, and use work queue to
dispatch requests of loop block, and one big change is that requests
is submitted to backend file/device concurrently with work queue,
so throughput may get improved much. Given write requests over same
file are often run exclusively, so don't handle them concurrently for
avoiding extra context switch cost, possible lock contention and work
schedule cost. Also with blk-mq, there is opportunity to get loop I/O
merged before submitting to backend file/device.
In the following test:
- base: v3.19-rc2-2041231
- loop over file in ext4 file system on SSD disk
- bs: 4k, libaio, io depth: 64, O_DIRECT, num of jobs: 1
- throughput: IOPS
------------------------------------------------------
| | base | base with loop-mq | delta |
------------------------------------------------------
| randread | 1740 | 25318 | +1355%|
------------------------------------------------------
| read | 42196 | 51771 | +22.6%|
-----------------------------------------------------
| randwrite | 35709 | 34624 | -3% |
-----------------------------------------------------
| write | 39137 | 40326 | +3% |
-----------------------------------------------------
So loop-mq can improve throughput for both read and randread, meantime,
performance of write and randwrite isn't hurted basically.
Another benefit is that loop driver code gets simplified
much after blk-mq conversion, and the patch can be thought as
cleanup too.
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2014-12-31 13:22:57 +00:00
|
|
|
blk_mq_free_tag_set(&lo->tag_set);
|
2022-03-15 12:27:07 +00:00
|
|
|
|
2021-09-02 00:07:35 +00:00
|
|
|
mutex_lock(&loop_ctl_mutex);
|
|
|
|
idr_remove(&loop_index_idr, lo->lo_number);
|
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
2022-03-30 05:29:13 +00:00
|
|
|
|
|
|
|
put_disk(lo->lo_disk);
|
2007-05-08 07:28:20 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-07-20 14:30:32 +00:00
|
|
|
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
2020-10-29 14:58:33 +00:00
|
|
|
static void loop_probe(dev_t dev)
|
2007-05-08 07:28:20 +00:00
|
|
|
{
|
2020-10-29 14:58:33 +00:00
|
|
|
int idx = MINOR(dev) >> part_shift;
|
|
|
|
|
loop: do not enforce max_loop hard limit by (new) default
Problem:
The max_loop parameter is used for 2 different purposes:
1) initial number of loop devices to pre-create on init
2) maximum number of loop devices to add on access/open()
Historically, its default value (zero) caused 1) to create non-zero
number of devices (CONFIG_BLK_DEV_LOOP_MIN_COUNT), and no hard limit on
2) to add devices with autoloading.
However, the default value changed in commit 85c50197716c ("loop: Fix
the max_loop commandline argument treatment when it is set to 0") to
CONFIG_BLK_DEV_LOOP_MIN_COUNT, for max_loop=0 not to pre-create devices.
That does improve 1), but unfortunately it breaks 2), as the default
behavior changed from no-limit to hard-limit.
Example:
For example, this userspace code broke for N >= CONFIG, if the user
relied on the default value 0 for max_loop:
mknod("/dev/loopN");
open("/dev/loopN"); // now fails with ENXIO
Though affected users may "fix" it with (loop.)max_loop=0, this means to
require a kernel parameter change on stable kernel update (that commit
Fixes: an old commit in stable).
Solution:
The original semantics for the default value in 2) can be applied if the
parameter is not set (ie, default behavior).
This still keeps the intended function in 1) and 2) if set, and that
commit's intended improvement in 1) if max_loop=0.
Before 85c50197716c:
- default: 1) CONFIG devices 2) no limit
- max_loop=0: 1) CONFIG devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
After 85c50197716c:
- default: 1) CONFIG devices 2) CONFIG limit (*)
- max_loop=0: 1) 0 devices (*) 2) no limit
- max_loop=X: 1) X devices 2) X limit
This commit:
- default: 1) CONFIG devices 2) no limit (*)
- max_loop=0: 1) 0 devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
Future:
The issue/regression from that commit only affects code under the
CONFIG_BLOCK_LEGACY_AUTOLOAD deprecation guard, thus the fix too is
contained under it.
Once that deprecated functionality/code is removed, the purpose 2) of
max_loop (hard limit) is no longer in use, so the module parameter
description can be changed then.
Tests:
Linux 6.4-rc7
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
CONFIG_BLOCK_LEGACY_AUTOLOAD=y
- default (original)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- default (patched)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
#
- max_loop=0 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
#
- max_loop=8 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- max_loop=0 (patched; CONFIG_BLOCK_LEGACY_AUTOLOAD is not set)
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
open: /dev/loop8: No such device or address
Fixes: 85c50197716c ("loop: Fix the max_loop commandline argument treatment when it is set to 0")
Signed-off-by: Mauricio Faria de Oliveira <mfo@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230720143033.841001-3-mfo@canonical.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-07-20 14:30:33 +00:00
|
|
|
if (max_loop_specified && max_loop && idx >= max_loop)
|
2020-10-29 14:58:33 +00:00
|
|
|
return;
|
2021-06-23 14:59:03 +00:00
|
|
|
loop_add(idx);
|
2021-06-23 14:59:04 +00:00
|
|
|
}
|
2023-07-20 14:30:32 +00:00
|
|
|
#else
|
|
|
|
#define loop_probe NULL
|
|
|
|
#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
|
2021-06-23 14:59:04 +00:00
|
|
|
|
|
|
|
static int loop_control_remove(int idx)
|
|
|
|
{
|
|
|
|
struct loop_device *lo;
|
|
|
|
int ret;
|
2021-06-23 14:59:06 +00:00
|
|
|
|
|
|
|
if (idx < 0) {
|
2021-11-29 10:00:43 +00:00
|
|
|
pr_warn_once("deleting an unspecified loop device is not supported.\n");
|
2021-06-23 14:59:06 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-06-23 14:59:04 +00:00
|
|
|
|
2021-09-02 00:07:35 +00:00
|
|
|
/* Hide this loop device for serialization. */
|
2021-06-23 14:59:04 +00:00
|
|
|
ret = mutex_lock_killable(&loop_ctl_mutex);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-06-23 14:59:07 +00:00
|
|
|
lo = idr_find(&loop_index_idr, idx);
|
2021-09-02 00:07:35 +00:00
|
|
|
if (!lo || !lo->idr_visible)
|
2021-06-23 14:59:07 +00:00
|
|
|
ret = -ENODEV;
|
2021-09-02 00:07:35 +00:00
|
|
|
else
|
|
|
|
lo->idr_visible = false;
|
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-06-23 14:59:04 +00:00
|
|
|
|
2021-09-02 00:07:35 +00:00
|
|
|
/* Check whether this loop device can be removed. */
|
2021-06-23 14:59:04 +00:00
|
|
|
ret = mutex_lock_killable(&lo->lo_mutex);
|
|
|
|
if (ret)
|
2021-09-02 00:07:35 +00:00
|
|
|
goto mark_visible;
|
2022-03-30 05:29:16 +00:00
|
|
|
if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) {
|
2021-06-23 14:59:04 +00:00
|
|
|
mutex_unlock(&lo->lo_mutex);
|
|
|
|
ret = -EBUSY;
|
2021-09-02 00:07:35 +00:00
|
|
|
goto mark_visible;
|
2021-06-23 14:59:04 +00:00
|
|
|
}
|
2022-03-30 05:29:16 +00:00
|
|
|
/* Mark this loop device as no more bound, but not quite unbound yet */
|
2021-06-23 14:59:04 +00:00
|
|
|
lo->lo_state = Lo_deleting;
|
|
|
|
mutex_unlock(&lo->lo_mutex);
|
|
|
|
|
|
|
|
loop_remove(lo);
|
2021-09-02 00:07:35 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
mark_visible:
|
|
|
|
/* Show this loop device again. */
|
|
|
|
mutex_lock(&loop_ctl_mutex);
|
|
|
|
lo->idr_visible = true;
|
2021-06-23 14:59:04 +00:00
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int loop_control_get_free(int idx)
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
{
|
|
|
|
struct loop_device *lo;
|
2021-06-23 14:59:07 +00:00
|
|
|
int id, ret;
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
|
2018-11-08 13:01:04 +00:00
|
|
|
ret = mutex_lock_killable(&loop_ctl_mutex);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-06-23 14:59:07 +00:00
|
|
|
idr_for_each_entry(&loop_index_idr, lo, id) {
|
2021-09-02 00:07:35 +00:00
|
|
|
/* Hitting a race results in creating a new loop device which is harmless. */
|
|
|
|
if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
|
2021-06-23 14:59:07 +00:00
|
|
|
goto found;
|
|
|
|
}
|
2021-06-23 14:59:04 +00:00
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
2021-06-23 14:59:05 +00:00
|
|
|
return loop_add(-1);
|
2021-06-23 14:59:07 +00:00
|
|
|
found:
|
|
|
|
mutex_unlock(&loop_ctl_mutex);
|
|
|
|
return id;
|
2021-06-23 14:59:04 +00:00
|
|
|
}
|
2018-11-08 13:01:04 +00:00
|
|
|
|
2021-06-23 14:59:04 +00:00
|
|
|
static long loop_control_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long parm)
|
|
|
|
{
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case LOOP_CTL_ADD:
|
2021-06-23 14:59:05 +00:00
|
|
|
return loop_add(parm);
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
case LOOP_CTL_REMOVE:
|
2021-06-23 14:59:04 +00:00
|
|
|
return loop_control_remove(parm);
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
case LOOP_CTL_GET_FREE:
|
2021-06-23 14:59:04 +00:00
|
|
|
return loop_control_get_free(parm);
|
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations loop_ctl_fops = {
|
|
|
|
.open = nonseekable_open,
|
|
|
|
.unlocked_ioctl = loop_control_ioctl,
|
|
|
|
.compat_ioctl = loop_control_ioctl,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.llseek = noop_llseek,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct miscdevice loop_misc = {
|
|
|
|
.minor = LOOP_CTRL_MINOR,
|
|
|
|
.name = "loop-control",
|
|
|
|
.fops = &loop_ctl_fops,
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
|
|
|
|
MODULE_ALIAS("devname:loop-control");
|
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
static int __init loop_init(void)
|
|
|
|
{
|
2022-12-08 21:29:01 +00:00
|
|
|
int i;
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
int err;
|
2007-06-08 20:46:44 +00:00
|
|
|
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
part_shift = 0;
|
2011-05-27 05:59:25 +00:00
|
|
|
if (max_part > 0) {
|
loop: manage partitions in disk image
This patch allows to use loop device with partitionned disk image.
Original behavior of loop is not modified.
A new parameter is introduced to define how many partition we want to be
able to manage per loop device. This parameter is "max_part".
For instance, to manage 63 partitions / loop device, we will do:
# modprobe loop max_part=63
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
And to attach a raw partitionned disk image, the original losetup is used:
# losetup -f etch.img
# ls -l /dev/loop?*
brw-rw---- 1 root disk 7, 0 2008-03-05 14:55 /dev/loop0
brw-rw---- 1 root disk 7, 1 2008-03-05 14:57 /dev/loop0p1
brw-rw---- 1 root disk 7, 2 2008-03-05 14:57 /dev/loop0p2
brw-rw---- 1 root disk 7, 5 2008-03-05 14:57 /dev/loop0p5
brw-rw---- 1 root disk 7, 64 2008-03-05 14:55 /dev/loop1
brw-rw---- 1 root disk 7, 128 2008-03-05 14:55 /dev/loop2
brw-rw---- 1 root disk 7, 192 2008-03-05 14:55 /dev/loop3
brw-rw---- 1 root disk 7, 256 2008-03-05 14:55 /dev/loop4
brw-rw---- 1 root disk 7, 320 2008-03-05 14:55 /dev/loop5
brw-rw---- 1 root disk 7, 384 2008-03-05 14:55 /dev/loop6
brw-rw---- 1 root disk 7, 448 2008-03-05 14:55 /dev/loop7
# mount /dev/loop0p1 /mnt
# ls /mnt
bench cdrom home lib mnt root srv usr
bin dev initrd lost+found opt sbin sys var
boot etc initrd.img media proc selinux tmp vmlinuz
# umount /mnt
# losetup -d /dev/loop0
Of course, the same behavior can be done using kpartx on a loop device,
but modifying loop avoids to stack several layers of block device (loop +
device mapper), this is a very light modification (40% of modifications
are to manage the new parameter).
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
2008-03-26 11:11:53 +00:00
|
|
|
part_shift = fls(max_part);
|
|
|
|
|
2011-05-27 05:59:25 +00:00
|
|
|
/*
|
|
|
|
* Adjust max_part according to part_shift as it is exported
|
|
|
|
* to user space so that user can decide correct minor number
|
|
|
|
* if [s]he want to create more devices.
|
|
|
|
*
|
|
|
|
* Note that -1 is required because partition 0 is reserved
|
|
|
|
* for the whole disk.
|
|
|
|
*/
|
|
|
|
max_part = (1UL << part_shift) - 1;
|
|
|
|
}
|
|
|
|
|
2013-02-21 23:16:49 +00:00
|
|
|
if ((1UL << part_shift) > DISK_MAX_PARTS) {
|
|
|
|
err = -EINVAL;
|
2017-08-07 12:37:50 +00:00
|
|
|
goto err_out;
|
2013-02-21 23:16:49 +00:00
|
|
|
}
|
2011-05-24 14:48:54 +00:00
|
|
|
|
2013-02-21 23:16:49 +00:00
|
|
|
if (max_loop > 1UL << (MINORBITS - part_shift)) {
|
|
|
|
err = -EINVAL;
|
2017-08-07 12:37:50 +00:00
|
|
|
goto err_out;
|
2013-02-21 23:16:49 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-08-07 12:37:50 +00:00
|
|
|
err = misc_register(&loop_misc);
|
|
|
|
if (err < 0)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
|
2020-10-29 14:58:33 +00:00
|
|
|
if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
|
2013-02-21 23:16:49 +00:00
|
|
|
err = -EIO;
|
|
|
|
goto misc_out;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-31 20:08:04 +00:00
|
|
|
/* pre-create number of devices given by config or max_loop */
|
2022-12-08 21:29:01 +00:00
|
|
|
for (i = 0; i < max_loop; i++)
|
2021-06-23 14:59:02 +00:00
|
|
|
loop_add(i);
|
2011-07-31 20:08:04 +00:00
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
printk(KERN_INFO "loop: module loaded\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2013-02-21 23:16:49 +00:00
|
|
|
|
|
|
|
misc_out:
|
|
|
|
misc_deregister(&loop_misc);
|
2017-08-07 12:37:50 +00:00
|
|
|
err_out:
|
2013-02-21 23:16:49 +00:00
|
|
|
return err;
|
2011-07-31 20:08:04 +00:00
|
|
|
}
|
2007-06-08 20:46:44 +00:00
|
|
|
|
2007-05-08 07:28:20 +00:00
|
|
|
static void __exit loop_exit(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-06-23 14:59:08 +00:00
|
|
|
struct loop_device *lo;
|
|
|
|
int id;
|
|
|
|
|
2007-07-17 11:03:46 +00:00
|
|
|
unregister_blkdev(LOOP_MAJOR, "loop");
|
loop: add management interface for on-demand device allocation
Loop devices today have a fixed pre-allocated number of usually 8.
The number can only be changed at module init time. To find a free
device to use, /dev/loop%i needs to be scanned, and all devices need
to be opened until a free one is possibly found.
This adds a new /dev/loop-control device node, that allows to
dynamically find or allocate a free device, and to add and remove loop
devices from the running system:
LOOP_CTL_ADD adds a specific device. Arg is the number
of the device. It returns the device i or a negative
error code.
LOOP_CTL_REMOVE removes a specific device, Arg is the
number the device. It returns the device i or a negative
error code.
LOOP_CTL_GET_FREE finds the next unbound device or allocates
a new one. No arg is given. It returns the device i or a
negative error code.
The loop kernel module gets automatically loaded when
/dev/loop-control is accessed the first time. The alias
specified in the module, instructs udev to create this
'dead' device node, even when the module is not loaded.
Example:
cfd = open("/dev/loop-control", O_RDWR);
# add a new specific loop device
err = ioctl(cfd, LOOP_CTL_ADD, devnr);
# remove a specific loop device
err = ioctl(cfd, LOOP_CTL_REMOVE, devnr);
# find or allocate a free loop device to use
devnr = ioctl(cfd, LOOP_CTL_GET_FREE);
sprintf(loopname, "/dev/loop%i", devnr);
ffd = open("backing-file", O_RDWR);
lfd = open(loopname, O_RDWR);
err = ioctl(lfd, LOOP_SET_FD, ffd);
Cc: Tejun Heo <tj@kernel.org>
Cc: Karel Zak <kzak@redhat.com>
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
2011-07-31 20:08:04 +00:00
|
|
|
misc_deregister(&loop_misc);
|
2020-06-19 20:47:27 +00:00
|
|
|
|
2021-09-02 00:07:35 +00:00
|
|
|
/*
|
|
|
|
* There is no need to use loop_ctl_mutex here, for nobody else can
|
|
|
|
* access loop_index_idr when this module is unloading (unless forced
|
|
|
|
* module unloading is requested). If this is not a clean unloading,
|
|
|
|
* we have no means to avoid kernel crash.
|
|
|
|
*/
|
2021-06-23 14:59:08 +00:00
|
|
|
idr_for_each_entry(&loop_index_idr, lo, id)
|
|
|
|
loop_remove(lo);
|
2021-06-23 14:59:01 +00:00
|
|
|
|
|
|
|
idr_destroy(&loop_index_idr);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(loop_init);
|
|
|
|
module_exit(loop_exit);
|
|
|
|
|
|
|
|
#ifndef MODULE
|
|
|
|
static int __init max_loop_setup(char *str)
|
|
|
|
{
|
|
|
|
max_loop = simple_strtol(str, NULL, 0);
|
loop: do not enforce max_loop hard limit by (new) default
Problem:
The max_loop parameter is used for 2 different purposes:
1) initial number of loop devices to pre-create on init
2) maximum number of loop devices to add on access/open()
Historically, its default value (zero) caused 1) to create non-zero
number of devices (CONFIG_BLK_DEV_LOOP_MIN_COUNT), and no hard limit on
2) to add devices with autoloading.
However, the default value changed in commit 85c50197716c ("loop: Fix
the max_loop commandline argument treatment when it is set to 0") to
CONFIG_BLK_DEV_LOOP_MIN_COUNT, for max_loop=0 not to pre-create devices.
That does improve 1), but unfortunately it breaks 2), as the default
behavior changed from no-limit to hard-limit.
Example:
For example, this userspace code broke for N >= CONFIG, if the user
relied on the default value 0 for max_loop:
mknod("/dev/loopN");
open("/dev/loopN"); // now fails with ENXIO
Though affected users may "fix" it with (loop.)max_loop=0, this means to
require a kernel parameter change on stable kernel update (that commit
Fixes: an old commit in stable).
Solution:
The original semantics for the default value in 2) can be applied if the
parameter is not set (ie, default behavior).
This still keeps the intended function in 1) and 2) if set, and that
commit's intended improvement in 1) if max_loop=0.
Before 85c50197716c:
- default: 1) CONFIG devices 2) no limit
- max_loop=0: 1) CONFIG devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
After 85c50197716c:
- default: 1) CONFIG devices 2) CONFIG limit (*)
- max_loop=0: 1) 0 devices (*) 2) no limit
- max_loop=X: 1) X devices 2) X limit
This commit:
- default: 1) CONFIG devices 2) no limit (*)
- max_loop=0: 1) 0 devices 2) no limit
- max_loop=X: 1) X devices 2) X limit
Future:
The issue/regression from that commit only affects code under the
CONFIG_BLOCK_LEGACY_AUTOLOAD deprecation guard, thus the fix too is
contained under it.
Once that deprecated functionality/code is removed, the purpose 2) of
max_loop (hard limit) is no longer in use, so the module parameter
description can be changed then.
Tests:
Linux 6.4-rc7
CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
CONFIG_BLOCK_LEGACY_AUTOLOAD=y
- default (original)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- default (patched)
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
#
- max_loop=0 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
#
- max_loop=8 (original & patched):
# ls -1 /dev/loop*
/dev/loop-control
/dev/loop0
...
/dev/loop7
# ./test-loop
open: /dev/loop8: No such device or address
- max_loop=0 (patched; CONFIG_BLOCK_LEGACY_AUTOLOAD is not set)
# ls -1 /dev/loop*
/dev/loop-control
# ./test-loop
open: /dev/loop8: No such device or address
Fixes: 85c50197716c ("loop: Fix the max_loop commandline argument treatment when it is set to 0")
Signed-off-by: Mauricio Faria de Oliveira <mfo@canonical.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230720143033.841001-3-mfo@canonical.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-07-20 14:30:33 +00:00
|
|
|
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
|
|
|
max_loop_specified = true;
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("max_loop=", max_loop_setup);
|
|
|
|
#endif
|