mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
- Fix DM ioctl interface to avoid INT_MAX overflow warnings from
kvmalloc by limiting the number of targets and parameter size area. - Fix DM stats to avoid INT_MAX overflow warnings from kvmalloc by limiting the number of entries supported. - Fix DM writecache to support mapping devices larger than 1 TiB by switching from using kvmalloc_array to vmalloc_array -- which avoids INT_MAX overflow in kvmalloc_node and associated warnings. - Remove the (ab)use of tasklets from both the DM crypt and verity targets. They will be converted to use BH workqueue in future. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmW9LCsACgkQxSPxCi2d A1qILwgAmL9XOtavSKJ/8o9scJygutYpNSLE0f6mdkdCgJB2nknJ1vR38bXyDpNr X3s6QC5TqKTtG7DtRTfnZc8zgtBHajjUZTFBu1NUF9kgNQcrjG3jW+quZ51pxkV0 1rvzOiYts6ca8csbFViMPS9FJVq1h3PnAyrkhI0SUS7+jEvDZy/QIX4DP20ye9SX wKguOSK544haSLHPNYuZqqCEoTBF+Vh1k1gDkxr594NwjsIJJK0+HGelamjzN/96 /jr88P4bm/6OIVdvwTUefnpIhNIum1Dfa8QWciKOzuct0jqsub65+SUSoTLmoiY4 /3AZDvp0ZMEwpMAvCIyvHnm81K72MA== =sioN -----END PGP SIGNATURE----- Merge tag 'for-6.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - Fix DM ioctl interface to avoid INT_MAX overflow warnings from kvmalloc by limiting the number of targets and parameter size area. - Fix DM stats to avoid INT_MAX overflow warnings from kvmalloc by limiting the number of entries supported. - Fix DM writecache to support mapping devices larger than 1 TiB by switching from using kvmalloc_array to vmalloc_array -- which avoids INT_MAX overflow in kvmalloc_node and associated warnings. - Remove the (ab)use of tasklets from both the DM crypt and verity targets. They will be converted to use BH workqueue in future. * tag 'for-6.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm-crypt, dm-verity: disable tasklets dm writecache: allow allocations larger than 2GiB dm stats: limit the number of entries dm: limit the number of targets and parameter size area
This commit is contained in:
commit
6897cea718
@ -22,6 +22,8 @@
|
||||
#include "dm-ima.h"
|
||||
|
||||
#define DM_RESERVED_MAX_IOS 1024
|
||||
#define DM_MAX_TARGETS 1048576
|
||||
#define DM_MAX_TARGET_PARAMS 1024
|
||||
|
||||
struct dm_io;
|
||||
|
||||
|
@ -73,10 +73,8 @@ struct dm_crypt_io {
|
||||
struct bio *base_bio;
|
||||
u8 *integrity_metadata;
|
||||
bool integrity_metadata_from_pool:1;
|
||||
bool in_tasklet:1;
|
||||
|
||||
struct work_struct work;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
struct convert_context ctx;
|
||||
|
||||
@ -1762,7 +1760,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
|
||||
io->ctx.r.req = NULL;
|
||||
io->integrity_metadata = NULL;
|
||||
io->integrity_metadata_from_pool = false;
|
||||
io->in_tasklet = false;
|
||||
atomic_set(&io->io_pending, 0);
|
||||
}
|
||||
|
||||
@ -1771,13 +1768,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
|
||||
atomic_inc(&io->io_pending);
|
||||
}
|
||||
|
||||
static void kcryptd_io_bio_endio(struct work_struct *work)
|
||||
{
|
||||
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
|
||||
|
||||
bio_endio(io->base_bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* One of the bios was finished. Check for completion of
|
||||
* the whole request and correctly clean up the buffer.
|
||||
@ -1801,20 +1791,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
||||
|
||||
base_bio->bi_status = error;
|
||||
|
||||
/*
|
||||
* If we are running this function from our tasklet,
|
||||
* we can't call bio_endio() here, because it will call
|
||||
* clone_endio() from dm.c, which in turn will
|
||||
* free the current struct dm_crypt_io structure with
|
||||
* our tasklet. In this case we need to delay bio_endio()
|
||||
* execution to after the tasklet is done and dequeued.
|
||||
*/
|
||||
if (io->in_tasklet) {
|
||||
INIT_WORK(&io->work, kcryptd_io_bio_endio);
|
||||
queue_work(cc->io_queue, &io->work);
|
||||
return;
|
||||
}
|
||||
|
||||
bio_endio(base_bio);
|
||||
}
|
||||
|
||||
@ -2246,11 +2222,6 @@ static void kcryptd_crypt(struct work_struct *work)
|
||||
kcryptd_crypt_write_convert(io);
|
||||
}
|
||||
|
||||
static void kcryptd_crypt_tasklet(unsigned long work)
|
||||
{
|
||||
kcryptd_crypt((struct work_struct *)work);
|
||||
}
|
||||
|
||||
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
||||
{
|
||||
struct crypt_config *cc = io->cc;
|
||||
@ -2262,15 +2233,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
||||
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
|
||||
* it is being executed with irqs disabled.
|
||||
*/
|
||||
if (in_hardirq() || irqs_disabled()) {
|
||||
io->in_tasklet = true;
|
||||
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
|
||||
tasklet_schedule(&io->tasklet);
|
||||
if (!(in_hardirq() || irqs_disabled())) {
|
||||
kcryptd_crypt(&io->work);
|
||||
return;
|
||||
}
|
||||
|
||||
kcryptd_crypt(&io->work);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&io->work, kcryptd_crypt);
|
||||
|
@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
|
||||
minimum_data_size - sizeof(param_kernel->version)))
|
||||
return -EFAULT;
|
||||
|
||||
if (param_kernel->data_size < minimum_data_size) {
|
||||
if (unlikely(param_kernel->data_size < minimum_data_size) ||
|
||||
unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
|
||||
DMERR("Invalid data size in the ioctl structure: %u",
|
||||
param_kernel->data_size);
|
||||
return -EINVAL;
|
||||
|
@ -66,6 +66,9 @@ struct dm_stats_last_position {
|
||||
unsigned int last_rw;
|
||||
};
|
||||
|
||||
#define DM_STAT_MAX_ENTRIES 8388608
|
||||
#define DM_STAT_MAX_HISTOGRAM_ENTRIES 134217728
|
||||
|
||||
/*
|
||||
* A typo on the command line could possibly make the kernel run out of memory
|
||||
* and crash. To prevent the crash we account all used memory. We fail if we
|
||||
@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
||||
if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (n_entries > DM_STAT_MAX_ENTRIES)
|
||||
return -EOVERFLOW;
|
||||
|
||||
shared_alloc_size = struct_size(s, stat_shared, n_entries);
|
||||
if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
|
||||
return -EOVERFLOW;
|
||||
@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
||||
if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
|
||||
return -EOVERFLOW;
|
||||
|
||||
if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
|
||||
num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
|
||||
return -ENOMEM;
|
||||
|
@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
|
||||
int dm_table_create(struct dm_table **result, blk_mode_t mode,
|
||||
unsigned int num_targets, struct mapped_device *md)
|
||||
{
|
||||
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
struct dm_table *t;
|
||||
|
||||
if (num_targets > DM_MAX_TARGETS)
|
||||
return -EOVERFLOW;
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
|
||||
if (!t)
|
||||
return -ENOMEM;
|
||||
@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
|
||||
|
||||
if (!num_targets) {
|
||||
kfree(t);
|
||||
return -ENOMEM;
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
if (alloc_targets(t, num_targets)) {
|
||||
|
@ -645,23 +645,6 @@ static void verity_work(struct work_struct *w)
|
||||
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
|
||||
}
|
||||
|
||||
static void verity_tasklet(unsigned long data)
|
||||
{
|
||||
struct dm_verity_io *io = (struct dm_verity_io *)data;
|
||||
int err;
|
||||
|
||||
io->in_tasklet = true;
|
||||
err = verity_verify_io(io);
|
||||
if (err == -EAGAIN || err == -ENOMEM) {
|
||||
/* fallback to retrying with work-queue */
|
||||
INIT_WORK(&io->work, verity_work);
|
||||
queue_work(io->v->verify_wq, &io->work);
|
||||
return;
|
||||
}
|
||||
|
||||
verity_finish_io(io, errno_to_blk_status(err));
|
||||
}
|
||||
|
||||
static void verity_end_io(struct bio *bio)
|
||||
{
|
||||
struct dm_verity_io *io = bio->bi_private;
|
||||
@ -674,13 +657,8 @@ static void verity_end_io(struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
|
||||
tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
|
||||
tasklet_schedule(&io->tasklet);
|
||||
} else {
|
||||
INIT_WORK(&io->work, verity_work);
|
||||
queue_work(io->v->verify_wq, &io->work);
|
||||
}
|
||||
INIT_WORK(&io->work, verity_work);
|
||||
queue_work(io->v->verify_wq, &io->work);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -83,7 +83,6 @@ struct dm_verity_io {
|
||||
struct bvec_iter iter;
|
||||
|
||||
struct work_struct work;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
/*
|
||||
* Three variably-size fields follow this struct:
|
||||
|
@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
long i;
|
||||
|
||||
wc->memory_map = NULL;
|
||||
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
|
||||
pages = vmalloc_array(p, sizeof(struct page *));
|
||||
if (!pages) {
|
||||
r = -ENOMEM;
|
||||
goto err2;
|
||||
@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
r = -ENOMEM;
|
||||
goto err3;
|
||||
}
|
||||
kvfree(pages);
|
||||
vfree(pages);
|
||||
wc->memory_vmapped = true;
|
||||
}
|
||||
|
||||
@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
|
||||
return 0;
|
||||
err3:
|
||||
kvfree(pages);
|
||||
vfree(pages);
|
||||
err2:
|
||||
dax_read_unlock(id);
|
||||
err1:
|
||||
@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
|
||||
|
||||
if (wc->entries)
|
||||
return 0;
|
||||
wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
|
||||
wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry));
|
||||
if (!wc->entries)
|
||||
return -ENOMEM;
|
||||
for (b = 0; b < wc->n_blocks; b++) {
|
||||
|
Loading…
Reference in New Issue
Block a user