mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
- Update DM crypt to allocate compound pages if possible.
- Fix DM crypt target's crypt_ctr_cipher_new return value on invalid AEAD cipher. - Fix DM flakey testing target's write bio corruption feature to corrupt the data of a cloned bio instead of the original. - Add random_read_corrupt and random_write_corrupt features to DM flakey target. - Fix ABBA deadlock in DM thin metadata by resetting associated bufio client rather than destroying and recreating it. - A couple other small DM thinp cleanups. - Update DM core to support disabling block core IO stats accounting and optimize away code that isn't needed if stats are disabled. - Other small DM core cleanups. - Improve DM integrity target to not require so much memory on 32 bit systems. Also only allocate the recalculate buffer as needed (and increasingly reduce its size on allocation failure). - Update DM integrity to use %*ph for printing hexdump of a small buffer. Also update DM integrity documentation. - Various DM core ioctl interface hardening. Now more careful about alignment of structures and processing of input passed to the kernel from userspace. Also disallow the creation of DM devices named "control", "." or ".." - Eliminate GFP_NOIO workarounds for __vmalloc and kvmalloc in DM core's ioctl and bufio code. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmScyjAACgkQxSPxCi2d A1pilwgAucNIAB6uN4ke4WZrMVxSFntUkqDTkCs2Ycw+W4Tf1Mtrj/4WeBzFdJaA oZMK04LUGaFFXn+halsCDzB354yT9C7V/KfXW8pCM1c9BRz4e8272i2HSN4WwD5n BU4gVaOV5BwxynfF3Z5siRraad1AwmdoRGGsqzVRESAKaObXU//1tnO42UhxRVhn nzFqhIm0xRcLAd8xIBlMsZQGIloicdDP9wZdWzTEDspQiwR2dFRmH9bUF8OmsS+h KwhtDty7aZO+4gJ1ccBImijzQCmbAo7dmFhDfoLXaA5Jt6UwTXMeBHm4aUPMnvQe NVXoRZJodDemwM642Q/Tx1SpsX6QmA== =4R7u -----END PGP SIGNATURE----- Merge tag 'for-6.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - Update DM crypt to allocate compound pages if possible - Fix DM crypt target's crypt_ctr_cipher_new return value on invalid AEAD cipher - Fix DM flakey testing target's write bio corruption feature to corrupt the data of a cloned bio instead of the original - Add random_read_corrupt and random_write_corrupt features to DM flakey target - Fix ABBA deadlock in DM thin metadata by resetting associated bufio client rather than destroying and recreating it - A couple other small DM thinp cleanups - Update DM core to support disabling block core IO stats accounting and optimize away code that isn't needed if stats are disabled - Other small DM core cleanups - Improve DM integrity target to not require so much memory on 32 bit systems. Also only allocate the recalculate buffer as needed (and increasingly reduce its size on allocation failure) - Update DM integrity to use %*ph for printing hexdump of a small buffer. Also update DM integrity documentation - Various DM core ioctl interface hardening. Now more careful about alignment of structures and processing of input passed to the kernel from userspace. Also disallow the creation of DM devices named "control", "." or ".." - Eliminate GFP_NOIO workarounds for __vmalloc and kvmalloc in DM core's ioctl and bufio code * tag 'for-6.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (28 commits) dm: get rid of GFP_NOIO workarounds for __vmalloc and kvmalloc dm integrity: scale down the recalculate buffer if memory allocation fails dm integrity: only allocate recalculate buffer when needed dm integrity: reduce vmalloc space footprint on 32-bit architectures dm ioctl: Refuse to create device named "." or ".." dm ioctl: Refuse to create device named "control" dm ioctl: Avoid double-fetch of version dm ioctl: structs and parameter strings must not overlap dm ioctl: Avoid pointer arithmetic overflow dm ioctl: Check dm_target_spec is sufficiently aligned Documentation: dm-integrity: Document an example of how the tunables relate. Documentation: dm-integrity: Document default values. Documentation: dm-integrity: Document the meaning of "buffer". Documentation: dm-integrity: Fix minor grammatical error. dm integrity: Use %*ph for printing hexdump of a small buffer dm thin: disable discards for thin-pool if no_discard_passdown dm: remove stale/redundant dm_internal_{suspend,resume} prototypes in dm.h dm: skip dm-stats work in alloc_io() unless needed dm: avoid needless dm_io access if all IO accounting is disabled dm: support turning off block-core's io stats accounting ...
This commit is contained in:
commit
6cdbb0907a
@ -67,6 +67,16 @@ Optional feature parameters:
|
||||
Perform the replacement only if bio->bi_opf has all the
|
||||
selected flags set.
|
||||
|
||||
random_read_corrupt <probability>
|
||||
During <down interval>, replace random byte in a read bio
|
||||
with a random value. probability is an integer between
|
||||
0 and 1000000000 meaning 0% to 100% probability of corruption.
|
||||
|
||||
random_write_corrupt <probability>
|
||||
During <down interval>, replace random byte in a write bio
|
||||
with a random value. probability is an integer between
|
||||
0 and 1000000000 meaning 0% to 100% probability of corruption.
|
||||
|
||||
Examples:
|
||||
|
||||
Replaces the 32nd byte of READ bios with the value 1::
|
||||
|
@ -25,7 +25,7 @@ mode it calculates and verifies the integrity tag internally. In this
|
||||
mode, the dm-integrity target can be used to detect silent data
|
||||
corruption on the disk or in the I/O path.
|
||||
|
||||
There's an alternate mode of operation where dm-integrity uses bitmap
|
||||
There's an alternate mode of operation where dm-integrity uses a bitmap
|
||||
instead of a journal. If a bit in the bitmap is 1, the corresponding
|
||||
region's data and integrity tags are not synchronized - if the machine
|
||||
crashes, the unsynchronized regions will be recalculated. The bitmap mode
|
||||
@ -38,6 +38,15 @@ the device. But it will only format the device if the superblock contains
|
||||
zeroes. If the superblock is neither valid nor zeroed, the dm-integrity
|
||||
target can't be loaded.
|
||||
|
||||
Accesses to the on-disk metadata area containing checksums (aka tags) are
|
||||
buffered using dm-bufio. When an access to any given metadata area
|
||||
occurs, each unique metadata area gets its own buffer(s). The buffer size
|
||||
is capped at the size of the metadata area, but may be smaller, thereby
|
||||
requiring multiple buffers to represent the full metadata area. A smaller
|
||||
buffer size will produce a smaller resulting read/write operation to the
|
||||
metadata area for small reads/writes. The metadata is still read even in
|
||||
a full write to the data covered by a single buffer.
|
||||
|
||||
To use the target for the first time:
|
||||
|
||||
1. overwrite the superblock with zeroes
|
||||
@ -93,7 +102,7 @@ journal_sectors:number
|
||||
device. If the device is already formatted, the value from the
|
||||
superblock is used.
|
||||
|
||||
interleave_sectors:number
|
||||
interleave_sectors:number (default 32768)
|
||||
The number of interleaved sectors. This values is rounded down to
|
||||
a power of two. If the device is already formatted, the value from
|
||||
the superblock is used.
|
||||
@ -102,20 +111,16 @@ meta_device:device
|
||||
Don't interleave the data and metadata on the device. Use a
|
||||
separate device for metadata.
|
||||
|
||||
buffer_sectors:number
|
||||
The number of sectors in one buffer. The value is rounded down to
|
||||
a power of two.
|
||||
buffer_sectors:number (default 128)
|
||||
The number of sectors in one metadata buffer. The value is rounded
|
||||
down to a power of two.
|
||||
|
||||
The tag area is accessed using buffers, the buffer size is
|
||||
configurable. The large buffer size means that the I/O size will
|
||||
be larger, but there could be less I/Os issued.
|
||||
|
||||
journal_watermark:number
|
||||
journal_watermark:number (default 50)
|
||||
The journal watermark in percents. When the size of the journal
|
||||
exceeds this watermark, the thread that flushes the journal will
|
||||
be started.
|
||||
|
||||
commit_time:number
|
||||
commit_time:number (default 10000)
|
||||
Commit time in milliseconds. When this time passes, the journal is
|
||||
written. The journal is also written immediately if the FLUSH
|
||||
request is received.
|
||||
@ -163,11 +168,10 @@ journal_mac:algorithm(:key) (the key is optional)
|
||||
the journal. Thus, modified sector number would be detected at
|
||||
this stage.
|
||||
|
||||
block_size:number
|
||||
The size of a data block in bytes. The larger the block size the
|
||||
block_size:number (default 512)
|
||||
The size of a data block in bytes. The larger the block size the
|
||||
less overhead there is for per-block integrity metadata.
|
||||
Supported values are 512, 1024, 2048 and 4096 bytes. If not
|
||||
specified the default block size is 512 bytes.
|
||||
Supported values are 512, 1024, 2048 and 4096 bytes.
|
||||
|
||||
sectors_per_bit:number
|
||||
In the bitmap mode, this parameter specifies the number of
|
||||
@ -209,6 +213,12 @@ table and swap the tables with suspend and resume). The other arguments
|
||||
should not be changed when reloading the target because the layout of disk
|
||||
data depend on them and the reloaded target would be non-functional.
|
||||
|
||||
For example, on a device using the default interleave_sectors of 32768, a
|
||||
block_size of 512, and an internal_hash of crc32c with a tag size of 4
|
||||
bytes, it will take 128 KiB of tags to track a full data area, requiring
|
||||
256 sectors of metadata per data area. With the default buffer_sectors of
|
||||
128, that means there will be 2 buffers per metadata area, or 2 buffers
|
||||
per 16 MiB of data.
|
||||
|
||||
Status line:
|
||||
|
||||
@ -286,7 +296,8 @@ The layout of the formatted block device:
|
||||
Each run contains:
|
||||
|
||||
* tag area - it contains integrity tags. There is one tag for each
|
||||
sector in the data area
|
||||
sector in the data area. The size of this area is always 4KiB or
|
||||
greater.
|
||||
* data area - it contains data sectors. The number of data sectors
|
||||
in one run must be a power of two. log2 of this value is stored
|
||||
in the superblock.
|
||||
|
@ -1157,23 +1157,6 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
|
||||
|
||||
*data_mode = DATA_MODE_VMALLOC;
|
||||
|
||||
/*
|
||||
* __vmalloc allocates the data pages and auxiliary structures with
|
||||
* gfp_flags that were specified, but pagetables are always allocated
|
||||
* with GFP_KERNEL, no matter what was specified as gfp_mask.
|
||||
*
|
||||
* Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
|
||||
* all allocations done by this process (including pagetables) are done
|
||||
* as if GFP_NOIO was specified.
|
||||
*/
|
||||
if (gfp_mask & __GFP_NORETRY) {
|
||||
unsigned int noio_flag = memalloc_noio_save();
|
||||
void *ptr = __vmalloc(c->block_size, gfp_mask);
|
||||
|
||||
memalloc_noio_restore(noio_flag);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return __vmalloc(c->block_size, gfp_mask);
|
||||
}
|
||||
|
||||
@ -2592,6 +2575,13 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
|
||||
|
||||
void dm_bufio_client_reset(struct dm_bufio_client *c)
|
||||
{
|
||||
drop_buffers(c);
|
||||
flush_work(&c->shrink_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
|
||||
|
||||
void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
|
||||
{
|
||||
c->start = start;
|
||||
|
@ -306,7 +306,8 @@ struct dm_io {
|
||||
*/
|
||||
enum {
|
||||
DM_IO_ACCOUNTED,
|
||||
DM_IO_WAS_SPLIT
|
||||
DM_IO_WAS_SPLIT,
|
||||
DM_IO_BLK_STAT
|
||||
};
|
||||
|
||||
static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
|
||||
|
@ -1661,6 +1661,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
|
||||
* In order to not degrade performance with excessive locking, we try
|
||||
* non-blocking allocations without a mutex first but on failure we fallback
|
||||
* to blocking allocations with a mutex.
|
||||
*
|
||||
* In order to reduce allocation overhead, we try to allocate compound pages in
|
||||
* the first pass. If they are not available, we fall back to the mempool.
|
||||
*/
|
||||
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
|
||||
{
|
||||
@ -1668,8 +1671,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
|
||||
struct bio *clone;
|
||||
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
|
||||
unsigned int i, len, remaining_size;
|
||||
struct page *page;
|
||||
unsigned int remaining_size;
|
||||
unsigned int order = MAX_ORDER - 1;
|
||||
|
||||
retry:
|
||||
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
|
||||
@ -1682,19 +1685,34 @@ retry:
|
||||
|
||||
remaining_size = size;
|
||||
|
||||
for (i = 0; i < nr_iovecs; i++) {
|
||||
page = mempool_alloc(&cc->page_pool, gfp_mask);
|
||||
if (!page) {
|
||||
while (remaining_size) {
|
||||
struct page *pages;
|
||||
unsigned size_to_add;
|
||||
unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
order = min(order, remaining_order);
|
||||
|
||||
while (order > 0) {
|
||||
pages = alloc_pages(gfp_mask
|
||||
| __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
|
||||
order);
|
||||
if (likely(pages != NULL))
|
||||
goto have_pages;
|
||||
order--;
|
||||
}
|
||||
|
||||
pages = mempool_alloc(&cc->page_pool, gfp_mask);
|
||||
if (!pages) {
|
||||
crypt_free_buffer_pages(cc, clone);
|
||||
bio_put(clone);
|
||||
gfp_mask |= __GFP_DIRECT_RECLAIM;
|
||||
order = 0;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
|
||||
|
||||
__bio_add_page(clone, page, len, 0);
|
||||
remaining_size -= len;
|
||||
have_pages:
|
||||
size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
|
||||
__bio_add_page(clone, pages, size_to_add, 0);
|
||||
remaining_size -= size_to_add;
|
||||
}
|
||||
|
||||
/* Allocate space for integrity tags */
|
||||
@ -1712,12 +1730,15 @@ retry:
|
||||
|
||||
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter_all iter_all;
|
||||
struct folio_iter fi;
|
||||
|
||||
bio_for_each_segment_all(bv, clone, iter_all) {
|
||||
BUG_ON(!bv->bv_page);
|
||||
mempool_free(bv->bv_page, &cc->page_pool);
|
||||
if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
|
||||
bio_for_each_folio_all(fi, clone) {
|
||||
if (folio_test_large(fi.folio))
|
||||
folio_put(fi.folio);
|
||||
else
|
||||
mempool_free(&fi.folio->page, &cc->page_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2887,7 +2908,7 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
|
||||
ret = crypt_ctr_auth_cipher(cc, cipher_api);
|
||||
if (ret < 0) {
|
||||
ti->error = "Invalid AEAD cipher spec";
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,8 @@
|
||||
|
||||
#define DM_MSG_PREFIX "flakey"
|
||||
|
||||
#define PROBABILITY_BASE 1000000000
|
||||
|
||||
#define all_corrupt_bio_flags_match(bio, fc) \
|
||||
(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
|
||||
|
||||
@ -34,6 +36,8 @@ struct flakey_c {
|
||||
unsigned int corrupt_bio_rw;
|
||||
unsigned int corrupt_bio_value;
|
||||
blk_opf_t corrupt_bio_flags;
|
||||
unsigned int random_read_corrupt;
|
||||
unsigned int random_write_corrupt;
|
||||
};
|
||||
|
||||
enum feature_flag_bits {
|
||||
@ -54,10 +58,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
|
||||
const char *arg_name;
|
||||
|
||||
static const struct dm_arg _args[] = {
|
||||
{0, 7, "Invalid number of feature args"},
|
||||
{0, 11, "Invalid number of feature args"},
|
||||
{1, UINT_MAX, "Invalid corrupt bio byte"},
|
||||
{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
|
||||
{0, UINT_MAX, "Invalid corrupt bio flags mask"},
|
||||
{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
|
||||
};
|
||||
|
||||
/* No feature arguments supplied. */
|
||||
@ -170,6 +175,32 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcasecmp(arg_name, "random_read_corrupt")) {
|
||||
if (!argc) {
|
||||
ti->error = "Feature random_read_corrupt requires a parameter";
|
||||
return -EINVAL;
|
||||
}
|
||||
r = dm_read_arg(_args + 4, as, &fc->random_read_corrupt, &ti->error);
|
||||
if (r)
|
||||
return r;
|
||||
argc--;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!strcasecmp(arg_name, "random_write_corrupt")) {
|
||||
if (!argc) {
|
||||
ti->error = "Feature random_write_corrupt requires a parameter";
|
||||
return -EINVAL;
|
||||
}
|
||||
r = dm_read_arg(_args + 4, as, &fc->random_write_corrupt, &ti->error);
|
||||
if (r)
|
||||
return r;
|
||||
argc--;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ti->error = "Unrecognised flakey feature requested";
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -184,7 +215,8 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
|
||||
}
|
||||
|
||||
if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
|
||||
!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags)) {
|
||||
!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
|
||||
!fc->random_read_corrupt && !fc->random_write_corrupt) {
|
||||
set_bit(ERROR_WRITES, &fc->flags);
|
||||
set_bit(ERROR_READS, &fc->flags);
|
||||
}
|
||||
@ -306,40 +338,143 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
|
||||
bio->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
|
||||
}
|
||||
|
||||
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
|
||||
static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
|
||||
unsigned char corrupt_bio_value)
|
||||
{
|
||||
unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
|
||||
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bvec;
|
||||
|
||||
if (!bio_has_data(bio))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Overwrite the Nth byte of the bio's data, on whichever page
|
||||
* it falls.
|
||||
*/
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
|
||||
char *segment;
|
||||
struct page *page = bio_iter_page(bio, iter);
|
||||
if (unlikely(page == ZERO_PAGE(0)))
|
||||
break;
|
||||
segment = bvec_kmap_local(&bvec);
|
||||
segment[corrupt_bio_byte] = fc->corrupt_bio_value;
|
||||
unsigned char *segment = bvec_kmap_local(&bvec);
|
||||
segment[corrupt_bio_byte] = corrupt_bio_value;
|
||||
kunmap_local(segment);
|
||||
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
|
||||
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
|
||||
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
|
||||
bio, corrupt_bio_value, corrupt_bio_byte,
|
||||
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
|
||||
(unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
|
||||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size);
|
||||
break;
|
||||
}
|
||||
corrupt_bio_byte -= bio_iter_len(bio, iter);
|
||||
}
|
||||
}
|
||||
|
||||
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
|
||||
{
|
||||
unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
|
||||
|
||||
if (!bio_has_data(bio))
|
||||
return;
|
||||
|
||||
corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value);
|
||||
}
|
||||
|
||||
static void corrupt_bio_random(struct bio *bio)
|
||||
{
|
||||
unsigned int corrupt_byte;
|
||||
unsigned char corrupt_value;
|
||||
|
||||
if (!bio_has_data(bio))
|
||||
return;
|
||||
|
||||
corrupt_byte = get_random_u32() % bio->bi_iter.bi_size;
|
||||
corrupt_value = get_random_u8();
|
||||
|
||||
corrupt_bio_common(bio, corrupt_byte, corrupt_value);
|
||||
}
|
||||
|
||||
static void clone_free(struct bio *clone)
|
||||
{
|
||||
struct folio_iter fi;
|
||||
|
||||
if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
|
||||
bio_for_each_folio_all(fi, clone)
|
||||
folio_put(fi.folio);
|
||||
}
|
||||
|
||||
bio_uninit(clone);
|
||||
kfree(clone);
|
||||
}
|
||||
|
||||
static void clone_endio(struct bio *clone)
|
||||
{
|
||||
struct bio *bio = clone->bi_private;
|
||||
bio->bi_status = clone->bi_status;
|
||||
clone_free(clone);
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct bio *bio)
|
||||
{
|
||||
struct bio *clone;
|
||||
unsigned size, remaining_size, nr_iovecs, order;
|
||||
struct bvec_iter iter = bio->bi_iter;
|
||||
|
||||
if (unlikely(bio->bi_iter.bi_size > UIO_MAXIOV << PAGE_SHIFT))
|
||||
dm_accept_partial_bio(bio, UIO_MAXIOV << PAGE_SHIFT >> SECTOR_SHIFT);
|
||||
|
||||
size = bio->bi_iter.bi_size;
|
||||
nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
clone = bio_kmalloc(nr_iovecs, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
|
||||
bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
|
||||
|
||||
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
|
||||
clone->bi_private = bio;
|
||||
clone->bi_end_io = clone_endio;
|
||||
|
||||
remaining_size = size;
|
||||
|
||||
order = MAX_ORDER - 1;
|
||||
while (remaining_size) {
|
||||
struct page *pages;
|
||||
unsigned size_to_add, to_copy;
|
||||
unsigned char *virt;
|
||||
unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
order = min(order, remaining_order);
|
||||
|
||||
retry_alloc_pages:
|
||||
pages = alloc_pages(GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP, order);
|
||||
if (unlikely(!pages)) {
|
||||
if (order) {
|
||||
order--;
|
||||
goto retry_alloc_pages;
|
||||
}
|
||||
clone_free(clone);
|
||||
return NULL;
|
||||
}
|
||||
size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
|
||||
|
||||
virt = page_to_virt(pages);
|
||||
to_copy = size_to_add;
|
||||
do {
|
||||
struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
|
||||
unsigned this_step = min(bvec.bv_len, to_copy);
|
||||
void *map = bvec_kmap_local(&bvec);
|
||||
memcpy(virt, map, this_step);
|
||||
kunmap_local(map);
|
||||
|
||||
bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
|
||||
to_copy -= this_step;
|
||||
virt += this_step;
|
||||
} while (to_copy);
|
||||
|
||||
__bio_add_page(clone, pages, size_to_add, 0);
|
||||
remaining_size -= size_to_add;
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
static int flakey_map(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
struct flakey_c *fc = ti->private;
|
||||
@ -354,6 +489,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
|
||||
/* Are we alive ? */
|
||||
elapsed = (jiffies - fc->start_time) / HZ;
|
||||
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
|
||||
bool corrupt_fixed, corrupt_random;
|
||||
/*
|
||||
* Flag this bio as submitted while down.
|
||||
*/
|
||||
@ -383,12 +519,28 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
|
||||
/*
|
||||
* Corrupt matching writes.
|
||||
*/
|
||||
if (fc->corrupt_bio_byte) {
|
||||
if (fc->corrupt_bio_rw == WRITE) {
|
||||
if (all_corrupt_bio_flags_match(bio, fc))
|
||||
corrupt_bio_data(bio, fc);
|
||||
corrupt_fixed = false;
|
||||
corrupt_random = false;
|
||||
if (fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) {
|
||||
if (all_corrupt_bio_flags_match(bio, fc))
|
||||
corrupt_fixed = true;
|
||||
}
|
||||
if (fc->random_write_corrupt) {
|
||||
u64 rnd = get_random_u64();
|
||||
u32 rem = do_div(rnd, PROBABILITY_BASE);
|
||||
if (rem < fc->random_write_corrupt)
|
||||
corrupt_random = true;
|
||||
}
|
||||
if (corrupt_fixed || corrupt_random) {
|
||||
struct bio *clone = clone_bio(ti, fc, bio);
|
||||
if (clone) {
|
||||
if (corrupt_fixed)
|
||||
corrupt_bio_data(clone, fc);
|
||||
if (corrupt_random)
|
||||
corrupt_bio_random(clone);
|
||||
submit_bio(clone);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
goto map_bio;
|
||||
}
|
||||
}
|
||||
|
||||
@ -417,6 +569,12 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
|
||||
corrupt_bio_data(bio, fc);
|
||||
}
|
||||
}
|
||||
if (fc->random_read_corrupt) {
|
||||
u64 rnd = get_random_u64();
|
||||
u32 rem = do_div(rnd, PROBABILITY_BASE);
|
||||
if (rem < fc->random_read_corrupt)
|
||||
corrupt_bio_random(bio);
|
||||
}
|
||||
if (test_bit(ERROR_READS, &fc->flags)) {
|
||||
/*
|
||||
* Error read during the down_interval if drop_writes
|
||||
@ -449,7 +607,10 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
|
||||
error_reads = test_bit(ERROR_READS, &fc->flags);
|
||||
drop_writes = test_bit(DROP_WRITES, &fc->flags);
|
||||
error_writes = test_bit(ERROR_WRITES, &fc->flags);
|
||||
DMEMIT(" %u", error_reads + drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
|
||||
DMEMIT(" %u", error_reads + drop_writes + error_writes +
|
||||
(fc->corrupt_bio_byte > 0) * 5 +
|
||||
(fc->random_read_corrupt > 0) * 2 +
|
||||
(fc->random_write_corrupt > 0) * 2);
|
||||
|
||||
if (error_reads)
|
||||
DMEMIT(" error_reads");
|
||||
@ -464,6 +625,11 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
|
||||
(fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
|
||||
fc->corrupt_bio_value, fc->corrupt_bio_flags);
|
||||
|
||||
if (fc->random_read_corrupt > 0)
|
||||
DMEMIT(" random_read_corrupt %u", fc->random_read_corrupt);
|
||||
if (fc->random_write_corrupt > 0)
|
||||
DMEMIT(" random_write_corrupt %u", fc->random_write_corrupt);
|
||||
|
||||
break;
|
||||
|
||||
case STATUSTYPE_IMA:
|
||||
|
@ -34,11 +34,11 @@
|
||||
#define DEFAULT_BUFFER_SECTORS 128
|
||||
#define DEFAULT_JOURNAL_WATERMARK 50
|
||||
#define DEFAULT_SYNC_MSEC 10000
|
||||
#define DEFAULT_MAX_JOURNAL_SECTORS 131072
|
||||
#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
|
||||
#define MIN_LOG2_INTERLEAVE_SECTORS 3
|
||||
#define MAX_LOG2_INTERLEAVE_SECTORS 31
|
||||
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
|
||||
#define RECALC_SECTORS 32768
|
||||
#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
|
||||
#define RECALC_WRITE_SUPER 16
|
||||
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
|
||||
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
|
||||
@ -251,8 +251,6 @@ struct dm_integrity_c {
|
||||
|
||||
struct workqueue_struct *recalc_wq;
|
||||
struct work_struct recalc_work;
|
||||
u8 *recalc_buffer;
|
||||
u8 *recalc_tags;
|
||||
|
||||
struct bio_list flush_bio_list;
|
||||
|
||||
@ -342,24 +340,9 @@ static struct kmem_cache *journal_io_cache;
|
||||
#define JOURNAL_IO_MEMPOOL 32
|
||||
|
||||
#ifdef DEBUG_PRINT
|
||||
#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
|
||||
static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, msg);
|
||||
vprintk(msg, args);
|
||||
va_end(args);
|
||||
if (len)
|
||||
pr_cont(":");
|
||||
while (len) {
|
||||
pr_cont(" %02x", *bytes);
|
||||
bytes++;
|
||||
len--;
|
||||
}
|
||||
pr_cont("\n");
|
||||
}
|
||||
#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
|
||||
#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
|
||||
#define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \
|
||||
len ? ": " : "", len, bytes)
|
||||
#else
|
||||
#define DEBUG_print(x, ...) do { } while (0)
|
||||
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
|
||||
@ -2661,6 +2644,9 @@ static void recalc_write_super(struct dm_integrity_c *ic)
|
||||
static void integrity_recalc(struct work_struct *w)
|
||||
{
|
||||
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
|
||||
size_t recalc_tags_size;
|
||||
u8 *recalc_buffer = NULL;
|
||||
u8 *recalc_tags = NULL;
|
||||
struct dm_integrity_range range;
|
||||
struct dm_io_request io_req;
|
||||
struct dm_io_region io_loc;
|
||||
@ -2672,6 +2658,26 @@ static void integrity_recalc(struct work_struct *w)
|
||||
unsigned int i;
|
||||
int r;
|
||||
unsigned int super_counter = 0;
|
||||
unsigned recalc_sectors = RECALC_SECTORS;
|
||||
|
||||
retry:
|
||||
recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
|
||||
if (!recalc_buffer) {
|
||||
oom:
|
||||
recalc_sectors >>= 1;
|
||||
if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
|
||||
goto retry;
|
||||
DMCRIT("out of memory for recalculate buffer - recalculation disabled");
|
||||
goto free_ret;
|
||||
}
|
||||
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
|
||||
if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
|
||||
recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
|
||||
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
|
||||
if (!recalc_tags) {
|
||||
vfree(recalc_buffer);
|
||||
goto oom;
|
||||
}
|
||||
|
||||
DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
|
||||
|
||||
@ -2693,7 +2699,7 @@ next_chunk:
|
||||
}
|
||||
|
||||
get_area_and_offset(ic, range.logical_sector, &area, &offset);
|
||||
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
|
||||
range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
|
||||
if (!ic->meta_dev)
|
||||
range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
|
||||
|
||||
@ -2735,7 +2741,7 @@ next_chunk:
|
||||
|
||||
io_req.bi_opf = REQ_OP_READ;
|
||||
io_req.mem.type = DM_IO_VMA;
|
||||
io_req.mem.ptr.addr = ic->recalc_buffer;
|
||||
io_req.mem.ptr.addr = recalc_buffer;
|
||||
io_req.notify.fn = NULL;
|
||||
io_req.client = ic->io;
|
||||
io_loc.bdev = ic->dev->bdev;
|
||||
@ -2748,15 +2754,15 @@ next_chunk:
|
||||
goto err;
|
||||
}
|
||||
|
||||
t = ic->recalc_tags;
|
||||
t = recalc_tags;
|
||||
for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
|
||||
integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
|
||||
integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
|
||||
t += ic->tag_size;
|
||||
}
|
||||
|
||||
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
|
||||
|
||||
r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
|
||||
r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE);
|
||||
if (unlikely(r)) {
|
||||
dm_integrity_io_error(ic, "writing tags", r);
|
||||
goto err;
|
||||
@ -2784,12 +2790,16 @@ advance_and_next:
|
||||
|
||||
err:
|
||||
remove_range(ic, &range);
|
||||
return;
|
||||
goto free_ret;
|
||||
|
||||
unlock_ret:
|
||||
spin_unlock_irq(&ic->endio_wait.lock);
|
||||
|
||||
recalc_write_super(ic);
|
||||
|
||||
free_ret:
|
||||
vfree(recalc_buffer);
|
||||
kvfree(recalc_tags);
|
||||
}
|
||||
|
||||
static void bitmap_block_work(struct work_struct *w)
|
||||
@ -4454,8 +4464,6 @@ try_smaller_buffer:
|
||||
}
|
||||
|
||||
if (ic->internal_hash) {
|
||||
size_t recalc_tags_size;
|
||||
|
||||
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
|
||||
if (!ic->recalc_wq) {
|
||||
ti->error = "Cannot allocate workqueue";
|
||||
@ -4463,21 +4471,6 @@ try_smaller_buffer:
|
||||
goto bad;
|
||||
}
|
||||
INIT_WORK(&ic->recalc_work, integrity_recalc);
|
||||
ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
|
||||
if (!ic->recalc_buffer) {
|
||||
ti->error = "Cannot allocate buffer for recalculating";
|
||||
r = -ENOMEM;
|
||||
goto bad;
|
||||
}
|
||||
recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
|
||||
if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
|
||||
recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
|
||||
ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
|
||||
if (!ic->recalc_tags) {
|
||||
ti->error = "Cannot allocate tags for recalculating";
|
||||
r = -ENOMEM;
|
||||
goto bad;
|
||||
}
|
||||
} else {
|
||||
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
|
||||
ti->error = "Recalculate can only be specified with internal_hash";
|
||||
@ -4621,8 +4614,6 @@ static void dm_integrity_dtr(struct dm_target *ti)
|
||||
destroy_workqueue(ic->writer_wq);
|
||||
if (ic->recalc_wq)
|
||||
destroy_workqueue(ic->recalc_wq);
|
||||
vfree(ic->recalc_buffer);
|
||||
kvfree(ic->recalc_tags);
|
||||
kvfree(ic->bbs);
|
||||
if (ic->bufio)
|
||||
dm_bufio_client_destroy(ic->bufio);
|
||||
|
@ -767,7 +767,14 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t
|
||||
static int check_name(const char *name)
|
||||
{
|
||||
if (strchr(name, '/')) {
|
||||
DMERR("invalid device name");
|
||||
DMERR("device name cannot contain '/'");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strcmp(name, DM_CONTROL_NODE) == 0 ||
|
||||
strcmp(name, ".") == 0 ||
|
||||
strcmp(name, "..") == 0) {
|
||||
DMERR("device name cannot be \"%s\", \".\", or \"..\"", DM_CONTROL_NODE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1388,16 +1395,38 @@ static inline blk_mode_t get_mode(struct dm_ioctl *param)
|
||||
return mode;
|
||||
}
|
||||
|
||||
static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
|
||||
static int next_target(struct dm_target_spec *last, uint32_t next, const char *end,
|
||||
struct dm_target_spec **spec, char **target_params)
|
||||
{
|
||||
static_assert(__alignof__(struct dm_target_spec) <= 8,
|
||||
"struct dm_target_spec must not require more than 8-byte alignment");
|
||||
|
||||
/*
|
||||
* Number of bytes remaining, starting with last. This is always
|
||||
* sizeof(struct dm_target_spec) or more, as otherwise *last was
|
||||
* out of bounds already.
|
||||
*/
|
||||
size_t remaining = end - (char *)last;
|
||||
|
||||
/*
|
||||
* There must be room for both the next target spec and the
|
||||
* NUL-terminator of the target itself.
|
||||
*/
|
||||
if (remaining - sizeof(struct dm_target_spec) <= next) {
|
||||
DMERR("Target spec extends beyond end of parameters");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (next % __alignof__(struct dm_target_spec)) {
|
||||
DMERR("Next dm_target_spec (offset %u) is not %zu-byte aligned",
|
||||
next, __alignof__(struct dm_target_spec));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*spec = (struct dm_target_spec *) ((unsigned char *) last + next);
|
||||
*target_params = (char *) (*spec + 1);
|
||||
|
||||
if (*spec < (last + 1))
|
||||
return -EINVAL;
|
||||
|
||||
return invalid_str(*target_params, end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int populate_table(struct dm_table *table,
|
||||
@ -1407,8 +1436,9 @@ static int populate_table(struct dm_table *table,
|
||||
unsigned int i = 0;
|
||||
struct dm_target_spec *spec = (struct dm_target_spec *) param;
|
||||
uint32_t next = param->data_start;
|
||||
void *end = (void *) param + param_size;
|
||||
const char *const end = (const char *) param + param_size;
|
||||
char *target_params;
|
||||
size_t min_size = sizeof(struct dm_ioctl);
|
||||
|
||||
if (!param->target_count) {
|
||||
DMERR("%s: no targets specified", __func__);
|
||||
@ -1416,6 +1446,13 @@ static int populate_table(struct dm_table *table,
|
||||
}
|
||||
|
||||
for (i = 0; i < param->target_count; i++) {
|
||||
const char *nul_terminator;
|
||||
|
||||
if (next < min_size) {
|
||||
DMERR("%s: next target spec (offset %u) overlaps %s",
|
||||
__func__, next, i ? "previous target" : "'struct dm_ioctl'");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = next_target(spec, next, end, &spec, &target_params);
|
||||
if (r) {
|
||||
@ -1423,6 +1460,15 @@ static int populate_table(struct dm_table *table,
|
||||
return r;
|
||||
}
|
||||
|
||||
nul_terminator = memchr(target_params, 0, (size_t)(end - target_params));
|
||||
if (nul_terminator == NULL) {
|
||||
DMERR("%s: target parameters not NUL-terminated", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Add 1 for NUL terminator */
|
||||
min_size = (size_t)(nul_terminator - (const char *)spec) + 1;
|
||||
|
||||
r = dm_table_add_target(table, spec->target_type,
|
||||
(sector_t) spec->sector_start,
|
||||
(sector_t) spec->length,
|
||||
@ -1830,30 +1876,36 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
|
||||
* As well as checking the version compatibility this always
|
||||
* copies the kernel interface version out.
|
||||
*/
|
||||
static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
|
||||
static int check_version(unsigned int cmd, struct dm_ioctl __user *user,
|
||||
struct dm_ioctl *kernel_params)
|
||||
{
|
||||
uint32_t version[3];
|
||||
int r = 0;
|
||||
|
||||
if (copy_from_user(version, user->version, sizeof(version)))
|
||||
/* Make certain version is first member of dm_ioctl struct */
|
||||
BUILD_BUG_ON(offsetof(struct dm_ioctl, version) != 0);
|
||||
|
||||
if (copy_from_user(kernel_params->version, user->version, sizeof(kernel_params->version)))
|
||||
return -EFAULT;
|
||||
|
||||
if ((version[0] != DM_VERSION_MAJOR) ||
|
||||
(version[1] > DM_VERSION_MINOR)) {
|
||||
if ((kernel_params->version[0] != DM_VERSION_MAJOR) ||
|
||||
(kernel_params->version[1] > DM_VERSION_MINOR)) {
|
||||
DMERR("ioctl interface mismatch: kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
|
||||
DM_VERSION_MAJOR, DM_VERSION_MINOR,
|
||||
DM_VERSION_PATCHLEVEL,
|
||||
version[0], version[1], version[2], cmd);
|
||||
kernel_params->version[0],
|
||||
kernel_params->version[1],
|
||||
kernel_params->version[2],
|
||||
cmd);
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the kernel version.
|
||||
*/
|
||||
version[0] = DM_VERSION_MAJOR;
|
||||
version[1] = DM_VERSION_MINOR;
|
||||
version[2] = DM_VERSION_PATCHLEVEL;
|
||||
if (copy_to_user(user->version, version, sizeof(version)))
|
||||
kernel_params->version[0] = DM_VERSION_MAJOR;
|
||||
kernel_params->version[1] = DM_VERSION_MINOR;
|
||||
kernel_params->version[2] = DM_VERSION_PATCHLEVEL;
|
||||
if (copy_to_user(user->version, kernel_params->version, sizeof(kernel_params->version)))
|
||||
return -EFAULT;
|
||||
|
||||
return r;
|
||||
@ -1877,9 +1929,11 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
|
||||
struct dm_ioctl *dmi;
|
||||
int secure_data;
|
||||
const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
|
||||
unsigned int noio_flag;
|
||||
|
||||
if (copy_from_user(param_kernel, user, minimum_data_size))
|
||||
/* check_version() already copied version from userspace, avoid TOCTOU */
|
||||
if (copy_from_user((char *)param_kernel + sizeof(param_kernel->version),
|
||||
(char __user *)user + sizeof(param_kernel->version),
|
||||
minimum_data_size - sizeof(param_kernel->version)))
|
||||
return -EFAULT;
|
||||
|
||||
if (param_kernel->data_size < minimum_data_size) {
|
||||
@ -1904,9 +1958,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
|
||||
* Use kmalloc() rather than vmalloc() when we can.
|
||||
*/
|
||||
dmi = NULL;
|
||||
noio_flag = memalloc_noio_save();
|
||||
dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
|
||||
memalloc_noio_restore(noio_flag);
|
||||
dmi = kvmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH);
|
||||
|
||||
if (!dmi) {
|
||||
if (secure_data && clear_user(user, param_kernel->data_size))
|
||||
@ -1991,7 +2043,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
|
||||
* Check the interface version passed in. This also
|
||||
* writes out the kernel's interface version.
|
||||
*/
|
||||
r = check_version(cmd, user);
|
||||
r = check_version(cmd, user, ¶m_kernel);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -603,6 +603,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
|
||||
&pmd->tm, &pmd->metadata_sm);
|
||||
if (r < 0) {
|
||||
pmd->tm = NULL;
|
||||
pmd->metadata_sm = NULL;
|
||||
DMERR("tm_create_with_sm failed");
|
||||
return r;
|
||||
}
|
||||
@ -611,6 +613,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
if (IS_ERR(pmd->data_sm)) {
|
||||
DMERR("sm_disk_create failed");
|
||||
r = PTR_ERR(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
goto bad_cleanup_tm;
|
||||
}
|
||||
|
||||
@ -641,11 +644,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
|
||||
bad_cleanup_nb_tm:
|
||||
dm_tm_destroy(pmd->nb_tm);
|
||||
pmd->nb_tm = NULL;
|
||||
bad_cleanup_data_sm:
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
bad_cleanup_tm:
|
||||
dm_tm_destroy(pmd->tm);
|
||||
pmd->tm = NULL;
|
||||
dm_sm_destroy(pmd->metadata_sm);
|
||||
pmd->metadata_sm = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -711,6 +718,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
sizeof(disk_super->metadata_space_map_root),
|
||||
&pmd->tm, &pmd->metadata_sm);
|
||||
if (r < 0) {
|
||||
pmd->tm = NULL;
|
||||
pmd->metadata_sm = NULL;
|
||||
DMERR("tm_open_with_sm failed");
|
||||
goto bad_unlock_sblock;
|
||||
}
|
||||
@ -720,6 +729,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
if (IS_ERR(pmd->data_sm)) {
|
||||
DMERR("sm_disk_open failed");
|
||||
r = PTR_ERR(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
goto bad_cleanup_tm;
|
||||
}
|
||||
|
||||
@ -746,9 +756,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
|
||||
bad_cleanup_data_sm:
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
bad_cleanup_tm:
|
||||
dm_tm_destroy(pmd->tm);
|
||||
pmd->tm = NULL;
|
||||
dm_sm_destroy(pmd->metadata_sm);
|
||||
pmd->metadata_sm = NULL;
|
||||
bad_unlock_sblock:
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
@ -795,9 +808,13 @@ static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
|
||||
bool destroy_bm)
|
||||
{
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
dm_sm_destroy(pmd->metadata_sm);
|
||||
pmd->metadata_sm = NULL;
|
||||
dm_tm_destroy(pmd->nb_tm);
|
||||
pmd->nb_tm = NULL;
|
||||
dm_tm_destroy(pmd->tm);
|
||||
pmd->tm = NULL;
|
||||
if (destroy_bm)
|
||||
dm_block_manager_destroy(pmd->bm);
|
||||
}
|
||||
@ -1005,8 +1022,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
|
||||
__func__, r);
|
||||
}
|
||||
pmd_write_unlock(pmd);
|
||||
if (!pmd->fail_io)
|
||||
__destroy_persistent_data_objects(pmd, true);
|
||||
__destroy_persistent_data_objects(pmd, true);
|
||||
|
||||
kfree(pmd);
|
||||
return 0;
|
||||
@ -1881,53 +1897,29 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
|
||||
int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
int r = -EINVAL;
|
||||
struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
|
||||
|
||||
/* fail_io is double-checked with pmd->root_lock held below */
|
||||
if (unlikely(pmd->fail_io))
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
|
||||
* pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
|
||||
* shrinker associated with the block manager's bufio client vs pmd root_lock).
|
||||
* - must take shrinker_rwsem without holding pmd->root_lock
|
||||
*/
|
||||
new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
|
||||
THIN_MAX_CONCURRENT_LOCKS);
|
||||
|
||||
pmd_write_lock(pmd);
|
||||
if (pmd->fail_io) {
|
||||
pmd_write_unlock(pmd);
|
||||
goto out;
|
||||
return r;
|
||||
}
|
||||
|
||||
__set_abort_with_changes_flags(pmd);
|
||||
__destroy_persistent_data_objects(pmd, false);
|
||||
old_bm = pmd->bm;
|
||||
if (IS_ERR(new_bm)) {
|
||||
DMERR("could not create block manager during abort");
|
||||
pmd->bm = NULL;
|
||||
r = PTR_ERR(new_bm);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
pmd->bm = new_bm;
|
||||
/* destroy data_sm/metadata_sm/nb_tm/tm */
|
||||
__destroy_persistent_data_objects(pmd, false);
|
||||
|
||||
/* reset bm */
|
||||
dm_block_manager_reset(pmd->bm);
|
||||
|
||||
/* rebuild data_sm/metadata_sm/nb_tm/tm */
|
||||
r = __open_or_format_metadata(pmd, false);
|
||||
if (r) {
|
||||
pmd->bm = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
new_bm = NULL;
|
||||
out_unlock:
|
||||
if (r)
|
||||
pmd->fail_io = true;
|
||||
pmd_write_unlock(pmd);
|
||||
dm_block_manager_destroy(old_bm);
|
||||
out:
|
||||
if (new_bm && !IS_ERR(new_bm))
|
||||
dm_block_manager_destroy(new_bm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2527,16 +2527,11 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
static bool passdown_enabled(struct pool_c *pt)
|
||||
{
|
||||
return pt->adjusted_pf.discard_passdown;
|
||||
}
|
||||
|
||||
static void set_discard_callbacks(struct pool *pool)
|
||||
{
|
||||
struct pool_c *pt = pool->ti->private;
|
||||
|
||||
if (passdown_enabled(pt)) {
|
||||
if (pt->adjusted_pf.discard_passdown) {
|
||||
pool->process_discard_cell = process_discard_cell_passdown;
|
||||
pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
|
||||
pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
|
||||
@ -2845,7 +2840,7 @@ static bool is_factor(sector_t block_size, uint32_t n)
|
||||
* If discard_passdown was enabled verify that the data device
|
||||
* supports discards. Disable discard_passdown if not.
|
||||
*/
|
||||
static void disable_passdown_if_not_supported(struct pool_c *pt)
|
||||
static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
|
||||
{
|
||||
struct pool *pool = pt->pool;
|
||||
struct block_device *data_bdev = pt->data_dev->bdev;
|
||||
@ -3446,7 +3441,6 @@ out_unlock:
|
||||
|
||||
static int pool_map(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
int r;
|
||||
struct pool_c *pt = ti->private;
|
||||
struct pool *pool = pt->pool;
|
||||
|
||||
@ -3455,10 +3449,9 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
|
||||
*/
|
||||
spin_lock_irq(&pool->lock);
|
||||
bio_set_dev(bio, pt->data_dev->bdev);
|
||||
r = DM_MAPIO_REMAPPED;
|
||||
spin_unlock_irq(&pool->lock);
|
||||
|
||||
return r;
|
||||
return DM_MAPIO_REMAPPED;
|
||||
}
|
||||
|
||||
static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
|
||||
@ -4099,21 +4092,22 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
* They get transferred to the live pool in bind_control_target()
|
||||
* called from pool_preresume().
|
||||
*/
|
||||
if (!pt->adjusted_pf.discard_enabled) {
|
||||
|
||||
if (pt->adjusted_pf.discard_enabled) {
|
||||
disable_discard_passdown_if_not_supported(pt);
|
||||
if (!pt->adjusted_pf.discard_passdown)
|
||||
limits->max_discard_sectors = 0;
|
||||
/*
|
||||
* The pool uses the same discard limits as the underlying data
|
||||
* device. DM core has already set this up.
|
||||
*/
|
||||
} else {
|
||||
/*
|
||||
* Must explicitly disallow stacking discard limits otherwise the
|
||||
* block layer will stack them if pool's data device has support.
|
||||
*/
|
||||
limits->discard_granularity = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
disable_passdown_if_not_supported(pt);
|
||||
|
||||
/*
|
||||
* The pool uses the same discard limits as the underlying data
|
||||
* device. DM core has already set this up.
|
||||
*/
|
||||
}
|
||||
|
||||
static struct target_type pool_target = {
|
||||
@ -4497,11 +4491,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
struct thin_c *tc = ti->private;
|
||||
struct pool *pool = tc->pool;
|
||||
|
||||
if (!pool->pf.discard_enabled)
|
||||
return;
|
||||
|
||||
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
|
||||
limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
|
||||
if (pool->pf.discard_enabled) {
|
||||
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
|
||||
limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
|
||||
}
|
||||
}
|
||||
|
||||
static struct target_type thin_target = {
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#include "dm-core.h"
|
||||
|
||||
@ -140,9 +141,9 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
|
||||
void dm_cleanup_zoned_dev(struct mapped_device *md)
|
||||
{
|
||||
if (md->disk) {
|
||||
kfree(md->disk->conv_zones_bitmap);
|
||||
bitmap_free(md->disk->conv_zones_bitmap);
|
||||
md->disk->conv_zones_bitmap = NULL;
|
||||
kfree(md->disk->seq_zones_wlock);
|
||||
bitmap_free(md->disk->seq_zones_wlock);
|
||||
md->disk->seq_zones_wlock = NULL;
|
||||
}
|
||||
|
||||
@ -182,9 +183,8 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
|
||||
switch (zone->type) {
|
||||
case BLK_ZONE_TYPE_CONVENTIONAL:
|
||||
if (!disk->conv_zones_bitmap) {
|
||||
disk->conv_zones_bitmap =
|
||||
kcalloc(BITS_TO_LONGS(disk->nr_zones),
|
||||
sizeof(unsigned long), GFP_NOIO);
|
||||
disk->conv_zones_bitmap = bitmap_zalloc(disk->nr_zones,
|
||||
GFP_NOIO);
|
||||
if (!disk->conv_zones_bitmap)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -193,9 +193,8 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
|
||||
case BLK_ZONE_TYPE_SEQWRITE_REQ:
|
||||
case BLK_ZONE_TYPE_SEQWRITE_PREF:
|
||||
if (!disk->seq_zones_wlock) {
|
||||
disk->seq_zones_wlock =
|
||||
kcalloc(BITS_TO_LONGS(disk->nr_zones),
|
||||
sizeof(unsigned long), GFP_NOIO);
|
||||
disk->seq_zones_wlock = bitmap_zalloc(disk->nr_zones,
|
||||
GFP_NOIO);
|
||||
if (!disk->seq_zones_wlock)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -487,48 +487,50 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
|
||||
|
||||
static bool bio_is_flush_with_data(struct bio *bio)
|
||||
static inline bool bio_is_flush_with_data(struct bio *bio)
|
||||
{
|
||||
return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
|
||||
}
|
||||
|
||||
static void dm_io_acct(struct dm_io *io, bool end)
|
||||
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
|
||||
{
|
||||
struct dm_stats_aux *stats_aux = &io->stats_aux;
|
||||
unsigned long start_time = io->start_time;
|
||||
struct mapped_device *md = io->md;
|
||||
struct bio *bio = io->orig_bio;
|
||||
unsigned int sectors;
|
||||
|
||||
/*
|
||||
* If REQ_PREFLUSH set, don't account payload, it will be
|
||||
* submitted (and accounted) after this flush completes.
|
||||
*/
|
||||
if (bio_is_flush_with_data(bio))
|
||||
sectors = 0;
|
||||
else if (likely(!(dm_io_flagged(io, DM_IO_WAS_SPLIT))))
|
||||
sectors = bio_sectors(bio);
|
||||
else
|
||||
sectors = io->sectors;
|
||||
return 0;
|
||||
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
|
||||
return io->sectors;
|
||||
return bio_sectors(bio);
|
||||
}
|
||||
|
||||
if (!end)
|
||||
bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
|
||||
else
|
||||
bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
|
||||
start_time);
|
||||
static void dm_io_acct(struct dm_io *io, bool end)
|
||||
{
|
||||
struct bio *bio = io->orig_bio;
|
||||
|
||||
if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
|
||||
if (!end)
|
||||
bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
|
||||
io->start_time);
|
||||
else
|
||||
bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
|
||||
dm_io_sectors(io, bio),
|
||||
io->start_time);
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&stats_enabled) &&
|
||||
unlikely(dm_stats_used(&md->stats))) {
|
||||
unlikely(dm_stats_used(&io->md->stats))) {
|
||||
sector_t sector;
|
||||
|
||||
if (likely(!dm_io_flagged(io, DM_IO_WAS_SPLIT)))
|
||||
sector = bio->bi_iter.bi_sector;
|
||||
else
|
||||
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
|
||||
sector = bio_end_sector(bio) - io->sector_offset;
|
||||
else
|
||||
sector = bio->bi_iter.bi_sector;
|
||||
|
||||
dm_stats_account_io(&md->stats, bio_data_dir(bio),
|
||||
sector, sectors,
|
||||
end, start_time, stats_aux);
|
||||
dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
|
||||
sector, dm_io_sectors(io, bio),
|
||||
end, io->start_time, &io->stats_aux);
|
||||
}
|
||||
}
|
||||
|
||||
@ -592,8 +594,11 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
||||
spin_lock_init(&io->lock);
|
||||
io->start_time = jiffies;
|
||||
io->flags = 0;
|
||||
if (blk_queue_io_stat(md->queue))
|
||||
dm_io_set_flag(io, DM_IO_BLK_STAT);
|
||||
|
||||
if (static_branch_unlikely(&stats_enabled))
|
||||
if (static_branch_unlikely(&stats_enabled) &&
|
||||
unlikely(dm_stats_used(&md->stats)))
|
||||
dm_stats_record_start(&md->stats, &io->stats_aux);
|
||||
|
||||
return io;
|
||||
@ -2348,6 +2353,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
||||
break;
|
||||
case DM_TYPE_BIO_BASED:
|
||||
case DM_TYPE_DAX_BIO_BASED:
|
||||
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
|
||||
break;
|
||||
case DM_TYPE_NONE:
|
||||
WARN_ON_ONCE(true);
|
||||
|
@ -210,9 +210,6 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
|
||||
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
||||
unsigned int cookie, bool need_resize_uevent);
|
||||
|
||||
void dm_internal_suspend(struct mapped_device *md);
|
||||
void dm_internal_resume(struct mapped_device *md);
|
||||
|
||||
int dm_io_init(void);
|
||||
void dm_io_exit(void);
|
||||
|
||||
|
@ -421,6 +421,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
|
||||
|
||||
void dm_block_manager_reset(struct dm_block_manager *bm)
|
||||
{
|
||||
dm_bufio_client_reset(bm->bufio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_block_manager_reset);
|
||||
|
||||
unsigned int dm_bm_block_size(struct dm_block_manager *bm)
|
||||
{
|
||||
return dm_bufio_get_block_size(bm->bufio);
|
||||
|
@ -36,6 +36,7 @@ struct dm_block_manager *dm_block_manager_create(
|
||||
struct block_device *bdev, unsigned int block_size,
|
||||
unsigned int max_held_per_thread);
|
||||
void dm_block_manager_destroy(struct dm_block_manager *bm);
|
||||
void dm_block_manager_reset(struct dm_block_manager *bm);
|
||||
|
||||
unsigned int dm_bm_block_size(struct dm_block_manager *bm);
|
||||
dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
|
||||
|
@ -77,7 +77,8 @@ struct dm_space_map {
|
||||
|
||||
static inline void dm_sm_destroy(struct dm_space_map *sm)
|
||||
{
|
||||
sm->destroy(sm);
|
||||
if (sm)
|
||||
sm->destroy(sm);
|
||||
}
|
||||
|
||||
static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
|
||||
|
@ -199,6 +199,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
|
||||
|
||||
void dm_tm_destroy(struct dm_transaction_manager *tm)
|
||||
{
|
||||
if (!tm)
|
||||
return;
|
||||
|
||||
if (!tm->is_clone)
|
||||
wipe_shadow_table(tm);
|
||||
|
||||
|
@ -38,6 +38,8 @@ dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
|
||||
*/
|
||||
void dm_bufio_client_destroy(struct dm_bufio_client *c);
|
||||
|
||||
void dm_bufio_client_reset(struct dm_bufio_client *c);
|
||||
|
||||
/*
|
||||
* Set the sector range.
|
||||
* When this function is called, there must be no I/O in progress on the bufio
|
||||
|
Loading…
Reference in New Issue
Block a user