forked from Minki/linux
d35a878ae1
whether blocks should migrate to/from the cache. The bio-prison-v2 interface supports this improvement by enabling direct dispatch of work to workqueues rather than having to delay the actual work dispatch to the DM cache core. So the dm-cache policies are much more nimble by being able to drive IO as they see fit. One immediate benefit from the improved latency is a cache that should be much more adaptive to changing workloads. - Add a new DM integrity target that emulates a block device that has additional per-sector tags that can be used for storing integrity information. - Add a new authenticated encryption feature to the DM crypt target that builds on the capabilities provided by the DM integrity target. - Add MD interface for switching the raid4/5/6 journal mode and update the DM raid target to use it to enable aid4/5/6 journal write-back support. - Switch the DM verity target over to using the asynchronous hash crypto API (this helps work better with architectures that have access to off-CPU algorithm providers, which should reduce CPU utilization). - Various request-based DM and DM multipath fixes and improvements from Bart and Christoph. - A DM thinp target fix for a bio structure leak that occurs for each discard IFF discard passdown is enabled. - A fix for a possible deadlock in DM bufio and a fix to re-check the new buffer allocation watermark in the face of competing admin changes to the 'max_cache_size_bytes' tunable. - A couple DM core cleanups. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZB6vtAAoJEMUj8QotnQNaoicIALuZTLElgAzxzA28cfk1+1Ea Gd09CfJ3M6cvk/YGUU7WwiSYIwu16yOJALG4sLcYnEmUCzvKfFPcl/RpeSJHPpYM 0aVXa6NIJw7K2r3C17toiK2DRMHYw6QU843WeWI93vBW13lDJklNJL9fM7GBEOLH NMSNw2mAq9ajtLlnJhM3ZfhloA7/u/jektvlBO1AA3RQ5Kx1cXVXFPqN7FdRfcqp 4RuEMe9faAadlXLsj3bia5IBmF/W0Qza6JilP+NLKLWB4fm7LZDjN/k+TsHWMa9e cGR73TgUGLMBJX+sDJy8R3oeBG9JZkFVkD7I30eCjzyhSOs/54XNYQ23EkqHJU0= =9Ryi -----END PGP SIGNATURE----- Merge tag 'for-4.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - A major update for DM cache that reduces the latency for deciding whether blocks should migrate to/from the cache. The bio-prison-v2 interface supports this improvement by enabling direct dispatch of work to workqueues rather than having to delay the actual work dispatch to the DM cache core. So the dm-cache policies are much more nimble by being able to drive IO as they see fit. One immediate benefit from the improved latency is a cache that should be much more adaptive to changing workloads. - Add a new DM integrity target that emulates a block device that has additional per-sector tags that can be used for storing integrity information. - Add a new authenticated encryption feature to the DM crypt target that builds on the capabilities provided by the DM integrity target. - Add MD interface for switching the raid4/5/6 journal mode and update the DM raid target to use it to enable aid4/5/6 journal write-back support. - Switch the DM verity target over to using the asynchronous hash crypto API (this helps work better with architectures that have access to off-CPU algorithm providers, which should reduce CPU utilization). - Various request-based DM and DM multipath fixes and improvements from Bart and Christoph. - A DM thinp target fix for a bio structure leak that occurs for each discard IFF discard passdown is enabled. - A fix for a possible deadlock in DM bufio and a fix to re-check the new buffer allocation watermark in the face of competing admin changes to the 'max_cache_size_bytes' tunable. - A couple DM core cleanups. * tag 'for-4.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (50 commits) dm bufio: check new buffer allocation watermark every 30 seconds dm bufio: avoid a possible ABBA deadlock dm mpath: make it easier to detect unintended I/O request flushes dm mpath: cleanup QUEUE_IF_NO_PATH bit manipulation by introducing assign_bit() dm mpath: micro-optimize the hot path relative to MPATHF_QUEUE_IF_NO_PATH dm: introduce enum dm_queue_mode to cleanup related code dm mpath: verify __pg_init_all_paths locking assumptions at runtime dm: verify suspend_locking assumptions at runtime dm block manager: remove an unused argument from dm_block_manager_create() dm rq: check blk_mq_register_dev() return value in dm_mq_init_request_queue() dm mpath: delay requeuing while path initialization is in progress dm mpath: avoid that path removal can trigger an infinite loop dm mpath: split and rename activate_path() to prepare for its expanded use dm ioctl: prevent stack leak in dm ioctl call dm integrity: use previously calculated log2 of sectors_per_block dm integrity: use hex2bin instead of open-coded variant dm crypt: replace custom implementation of hex2bin() dm crypt: remove obsolete references to per-CPU state dm verity: switch to using asynchronous hash crypto API dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues ...
191 lines
3.8 KiB
C
191 lines
3.8 KiB
C
/*
|
|
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
|
|
*
|
|
* This file is released under the GPL.
|
|
*/
|
|
|
|
#include "dm.h"
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/device-mapper.h>
|
|
|
|
#define DM_MSG_PREFIX "linear"
|
|
|
|
/*
|
|
* Linear: maps a linear range of a device.
|
|
*/
|
|
struct linear_c {
|
|
struct dm_dev *dev;
|
|
sector_t start;
|
|
};
|
|
|
|
/*
|
|
* Construct a linear mapping: <dev_path> <offset>
|
|
*/
|
|
static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
{
|
|
struct linear_c *lc;
|
|
unsigned long long tmp;
|
|
char dummy;
|
|
int ret;
|
|
|
|
if (argc != 2) {
|
|
ti->error = "Invalid argument count";
|
|
return -EINVAL;
|
|
}
|
|
|
|
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
|
|
if (lc == NULL) {
|
|
ti->error = "Cannot allocate linear context";
|
|
return -ENOMEM;
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
|
|
ti->error = "Invalid device sector";
|
|
goto bad;
|
|
}
|
|
lc->start = tmp;
|
|
|
|
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
|
|
if (ret) {
|
|
ti->error = "Device lookup failed";
|
|
goto bad;
|
|
}
|
|
|
|
ti->num_flush_bios = 1;
|
|
ti->num_discard_bios = 1;
|
|
ti->num_write_same_bios = 1;
|
|
ti->num_write_zeroes_bios = 1;
|
|
ti->private = lc;
|
|
return 0;
|
|
|
|
bad:
|
|
kfree(lc);
|
|
return ret;
|
|
}
|
|
|
|
static void linear_dtr(struct dm_target *ti)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
|
|
dm_put_device(ti, lc->dev);
|
|
kfree(lc);
|
|
}
|
|
|
|
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
|
|
return lc->start + dm_target_offset(ti, bi_sector);
|
|
}
|
|
|
|
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
|
|
bio->bi_bdev = lc->dev->bdev;
|
|
if (bio_sectors(bio))
|
|
bio->bi_iter.bi_sector =
|
|
linear_map_sector(ti, bio->bi_iter.bi_sector);
|
|
}
|
|
|
|
static int linear_map(struct dm_target *ti, struct bio *bio)
|
|
{
|
|
linear_map_bio(ti, bio);
|
|
|
|
return DM_MAPIO_REMAPPED;
|
|
}
|
|
|
|
static void linear_status(struct dm_target *ti, status_type_t type,
|
|
unsigned status_flags, char *result, unsigned maxlen)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
|
|
switch (type) {
|
|
case STATUSTYPE_INFO:
|
|
result[0] = '\0';
|
|
break;
|
|
|
|
case STATUSTYPE_TABLE:
|
|
snprintf(result, maxlen, "%s %llu", lc->dev->name,
|
|
(unsigned long long)lc->start);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int linear_prepare_ioctl(struct dm_target *ti,
|
|
struct block_device **bdev, fmode_t *mode)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
struct dm_dev *dev = lc->dev;
|
|
|
|
*bdev = dev->bdev;
|
|
|
|
/*
|
|
* Only pass ioctls through if the device sizes match exactly.
|
|
*/
|
|
if (lc->start ||
|
|
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static int linear_iterate_devices(struct dm_target *ti,
|
|
iterate_devices_callout_fn fn, void *data)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
|
|
return fn(ti, lc->dev, lc->start, ti->len, data);
|
|
}
|
|
|
|
static long linear_direct_access(struct dm_target *ti, sector_t sector,
|
|
void **kaddr, pfn_t *pfn, long size)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
struct block_device *bdev = lc->dev->bdev;
|
|
struct blk_dax_ctl dax = {
|
|
.sector = linear_map_sector(ti, sector),
|
|
.size = size,
|
|
};
|
|
long ret;
|
|
|
|
ret = bdev_direct_access(bdev, &dax);
|
|
*kaddr = dax.addr;
|
|
*pfn = dax.pfn;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static struct target_type linear_target = {
|
|
.name = "linear",
|
|
.version = {1, 3, 0},
|
|
.features = DM_TARGET_PASSES_INTEGRITY,
|
|
.module = THIS_MODULE,
|
|
.ctr = linear_ctr,
|
|
.dtr = linear_dtr,
|
|
.map = linear_map,
|
|
.status = linear_status,
|
|
.prepare_ioctl = linear_prepare_ioctl,
|
|
.iterate_devices = linear_iterate_devices,
|
|
.direct_access = linear_direct_access,
|
|
};
|
|
|
|
int __init dm_linear_init(void)
|
|
{
|
|
int r = dm_register_target(&linear_target);
|
|
|
|
if (r < 0)
|
|
DMERR("register failed %d", r);
|
|
|
|
return r;
|
|
}
|
|
|
|
void dm_linear_exit(void)
|
|
{
|
|
dm_unregister_target(&linear_target);
|
|
}
|