2023-01-25 20:00:44 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001 Sistina Software (UK) Limited.
|
dm table: rework reference counting
Rework table reference counting.
The existing code uses a reference counter. When the last reference is
dropped and the counter reaches zero, the table destructor is called.
Table reference counters are acquired/released from upcalls from other
kernel code (dm_any_congested, dm_merge_bvec, dm_unplug_all).
If the reference counter reaches zero in one of the upcalls, the table
destructor is called from almost random kernel code.
This leads to various problems:
* dm_any_congested being called under a spinlock, which calls the
destructor, which calls some sleeping function.
* the destructor attempting to take a lock that is already taken by the
same process.
* stale reference from some other kernel code keeps the table
constructed, which keeps some devices open, even after successful
return from "dmsetup remove". This can confuse lvm and prevent closing
of underlying devices or reusing device minor numbers.
The patch changes reference counting so that the table destructor can be
called only at predetermined places.
The table has always exactly one reference from either mapped_device->map
or hash_cell->new_map. After this patch, this reference is not counted
in table->holders. A pair of dm_create_table/dm_destroy_table functions
is used for table creation/destruction.
Temporary references from the other code increase table->holders. A pair
of dm_table_get/dm_table_put functions is used to manipulate it.
When the table is about to be destroyed, we wait for table->holders to
reach 0. Then, we call the table destructor. We use active waiting with
msleep(1), because the situation happens rarely (to one user in 5 years)
and removing the device isn't performance-critical task: the user doesn't
care if it takes one tick more or not.
This way, the destructor is called only at specific points
(dm_table_destroy function) and the above problems associated with lazy
destruction can't happen.
Finally remove the temporary protection added to dm_any_congested().
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-01-06 03:05:10 +00:00
|
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
2016-05-12 20:28:10 +00:00
|
|
|
#include "dm-core.h"
|
2022-06-08 06:34:09 +00:00
|
|
|
#include "dm-rq.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/blkdev.h>
|
2021-09-20 12:33:27 +00:00
|
|
|
#include <linux/blk-integrity.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/ctype.h>
|
2009-12-15 02:01:06 +00:00
|
|
|
#include <linux/string.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/interrupt.h>
|
2006-03-27 09:18:20 +00:00
|
|
|
#include <linux/mutex.h>
|
dm table: rework reference counting
Rework table reference counting.
The existing code uses a reference counter. When the last reference is
dropped and the counter reaches zero, the table destructor is called.
Table reference counters are acquired/released from upcalls from other
kernel code (dm_any_congested, dm_merge_bvec, dm_unplug_all).
If the reference counter reaches zero in one of the upcalls, the table
destructor is called from almost random kernel code.
This leads to various problems:
* dm_any_congested being called under a spinlock, which calls the
destructor, which calls some sleeping function.
* the destructor attempting to take a lock that is already taken by the
same process.
* stale reference from some other kernel code keeps the table
constructed, which keeps some devices open, even after successful
return from "dmsetup remove". This can confuse lvm and prevent closing
of underlying devices or reusing device minor numbers.
The patch changes reference counting so that the table destructor can be
called only at predetermined places.
The table has always exactly one reference from either mapped_device->map
or hash_cell->new_map. After this patch, this reference is not counted
in table->holders. A pair of dm_create_table/dm_destroy_table functions
is used for table creation/destruction.
Temporary references from the other code increase table->holders. A pair
of dm_table_get/dm_table_put functions is used to manipulate it.
When the table is about to be destroyed, we wait for table->holders to
reach 0. Then, we call the table destructor. We use active waiting with
msleep(1), because the situation happens rarely (to one user in 5 years)
and removing the device isn't performance-critical task: the user doesn't
care if it takes one tick more or not.
This way, the destructor is called only at specific points
(dm_table_destroy function) and the above problems associated with lazy
destruction can't happen.
Finally remove the temporary protection added to dm_any_congested().
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-01-06 03:05:10 +00:00
|
|
|
#include <linux/delay.h>
|
2011-07-26 23:09:06 +00:00
|
|
|
#include <linux/atomic.h>
|
2015-03-08 05:51:47 +00:00
|
|
|
#include <linux/blk-mq.h>
|
dm table: fall back to getting device using name_to_dev_t()
If a device is used as the root filesystem, it can't be built
off of devices which are within the root filesystem (just like
command line arguments to root=). For this reason, Linux has a
pseudo-filesystem for root= and MD initialization (based on the
function name_to_dev_t) which handles different ways of specifying
devices including PARTUUID and major:minor.
Switch to using name_to_dev_t() in dm_get_device(). Rather than
having DM assume that all things which are not major:minor are paths in
an already-mounted filesystem, change dm_get_device() to first attempt
to look up the device in the filesystem, and if not found it will fall
back to using name_to_dev_t().
In terms of backwards compatibility, there are some cases where
behavior will be different:
- If you have a file in the current working directory named 1:2 and
you initialze DM there, then it will try to use that file rather
than the disk with that major:minor pair as a backing device.
- Similarly for other bdev types which name_to_dev_t() knows how to
interpret, the previous behavior was to repeatedly check for the
existence of the file (e.g., while waiting for rootfs to come up)
but the new behavior is to use the name_to_dev_t() interpretation.
For example, if you have a file named /dev/ubiblock0_0 which is
a symlink to /dev/sda3, but it is not yet present when DM starts
to initialize, then the name_to_dev_t() interpretation will take
precedence.
These incompatibilities would only show up in really strange setups
with bad practices so we shouldn't have to worry about them.
Signed-off-by: Dan Ehrenberg <dehrenberg@chromium.org>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2015-02-10 23:20:51 +00:00
|
|
|
#include <linux/mount.h>
|
2017-07-26 13:35:09 +00:00
|
|
|
#include <linux/dax.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 07:27:35 +00:00
|
|
|
#define DM_MSG_PREFIX "table"
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#define NODE_SIZE L1_CACHE_BYTES
|
|
|
|
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
|
|
|
|
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Similar to ceiling(log_size(n))
|
|
|
|
*/
|
|
|
|
static unsigned int int_log(unsigned int n, unsigned int base)
|
|
|
|
{
|
|
|
|
int result = 0;
|
|
|
|
|
|
|
|
while (n > 1) {
|
|
|
|
n = dm_div_up(n, base);
|
|
|
|
result++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the index of the child node of the n'th node k'th key.
|
|
|
|
*/
|
|
|
|
static inline unsigned int get_child(unsigned int n, unsigned int k)
|
|
|
|
{
|
|
|
|
return (n * CHILDREN_PER_NODE) + k;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the n'th node of level l from table t.
|
|
|
|
*/
|
|
|
|
static inline sector_t *get_node(struct dm_table *t,
|
|
|
|
unsigned int l, unsigned int n)
|
|
|
|
{
|
|
|
|
return t->index[l] + (n * KEYS_PER_NODE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the highest key that you could lookup from the n'th
|
|
|
|
* node on level l of the btree.
|
|
|
|
*/
|
|
|
|
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
|
|
|
|
{
|
|
|
|
for (; l < t->depth - 1; l++)
|
|
|
|
n = get_child(n, CHILDREN_PER_NODE - 1);
|
|
|
|
|
|
|
|
if (n >= t->counts[l])
|
2023-01-25 22:31:55 +00:00
|
|
|
return (sector_t) -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return get_node(t, l, n)[KEYS_PER_NODE - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fills in a level of the btree based on the highs of the level
|
|
|
|
* below it.
|
|
|
|
*/
|
|
|
|
static int setup_btree_index(unsigned int l, struct dm_table *t)
|
|
|
|
{
|
|
|
|
unsigned int n, k;
|
|
|
|
sector_t *node;
|
|
|
|
|
|
|
|
for (n = 0U; n < t->counts[l]; n++) {
|
|
|
|
node = get_node(t, l, n);
|
|
|
|
|
|
|
|
for (k = 0U; k < KEYS_PER_NODE; k++)
|
|
|
|
node[k] = high(t, l + 1, get_child(n, k));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* highs, and targets are managed as dynamic arrays during a
|
|
|
|
* table load.
|
|
|
|
*/
|
|
|
|
static int alloc_targets(struct dm_table *t, unsigned int num)
|
|
|
|
{
|
|
|
|
sector_t *n_highs;
|
|
|
|
struct dm_target *n_targets;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate both the target array and offset array at once.
|
|
|
|
*/
|
2021-04-07 13:25:22 +00:00
|
|
|
n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t),
|
|
|
|
GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!n_highs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
n_targets = (struct dm_target *) (n_highs + num);
|
|
|
|
|
dm table: remove unused buggy code that extends the targets array
A device mapper table is allocated in the following way:
* The function dm_table_create is called, it gets the number of targets
as an argument -- it allocates a targets array accordingly.
* For each target, we call dm_table_add_target.
If we add more targets than were specified in dm_table_create, the
function dm_table_add_target reallocates the targets array. However,
this reallocation code is wrong - it moves the targets array to a new
location, while some target constructors hold pointers to the array in
the old location.
The following DM target drivers save the pointer to the target
structure, so they corrupt memory if the target array is moved:
multipath, raid, mirror, snapshot, stripe, switch, thin, verity.
Under normal circumstances, the reallocation function is not called
(because dm_table_create is called with the correct number of targets),
so the buggy reallocation code is not used.
Prior to the fix "dm table: fail dm_table_create on dm_round_up
overflow", the reallocation code could only be used in case the user
specifies too large a value in param->target_count, such as 0xffffffff.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2013-11-23 00:51:39 +00:00
|
|
|
memset(n_highs, -1, sizeof(*n_highs) * num);
|
2021-04-07 13:25:22 +00:00
|
|
|
kvfree(t->highs);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
t->num_allocated = num;
|
|
|
|
t->highs = n_highs;
|
|
|
|
t->targets = n_targets;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
int dm_table_create(struct dm_table **result, blk_mode_t mode,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int num_targets, struct mapped_device *md)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2024-01-09 14:57:56 +00:00
|
|
|
struct dm_table *t;
|
|
|
|
|
|
|
|
if (num_targets > DM_MAX_TARGETS)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!t)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&t->devices);
|
2023-08-09 10:44:20 +00:00
|
|
|
init_rwsem(&t->devices_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!num_targets)
|
|
|
|
num_targets = KEYS_PER_NODE;
|
|
|
|
|
|
|
|
num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
|
|
|
|
|
2013-11-23 00:52:06 +00:00
|
|
|
if (!num_targets) {
|
|
|
|
kfree(t);
|
2024-01-09 14:57:56 +00:00
|
|
|
return -EOVERFLOW;
|
2013-11-23 00:52:06 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (alloc_targets(t, num_targets)) {
|
|
|
|
kfree(t);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
t->type = DM_TYPE_NONE;
|
2005-04-16 22:20:36 +00:00
|
|
|
t->mode = mode;
|
2006-03-27 09:17:54 +00:00
|
|
|
t->md = md;
|
2024-05-28 11:32:34 +00:00
|
|
|
t->flush_bypasses_map = true;
|
2005-04-16 22:20:36 +00:00
|
|
|
*result = t;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-13 18:53:43 +00:00
|
|
|
static void free_devices(struct list_head *devices, struct mapped_device *md)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct list_head *tmp, *next;
|
|
|
|
|
2008-02-08 02:09:59 +00:00
|
|
|
list_for_each_safe(tmp, next, devices) {
|
2008-10-10 12:37:09 +00:00
|
|
|
struct dm_dev_internal *dd =
|
|
|
|
list_entry(tmp, struct dm_dev_internal, list);
|
2014-08-13 18:53:43 +00:00
|
|
|
DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
|
|
|
|
dm_device_name(md), dd->dm_dev->name);
|
|
|
|
dm_put_table_device(md, dd->dm_dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(dd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static void dm_table_destroy_crypto_profile(struct dm_table *t);
|
2021-02-01 05:10:17 +00:00
|
|
|
|
dm table: rework reference counting
Rework table reference counting.
The existing code uses a reference counter. When the last reference is
dropped and the counter reaches zero, the table destructor is called.
Table reference counters are acquired/released from upcalls from other
kernel code (dm_any_congested, dm_merge_bvec, dm_unplug_all).
If the reference counter reaches zero in one of the upcalls, the table
destructor is called from almost random kernel code.
This leads to various problems:
* dm_any_congested being called under a spinlock, which calls the
destructor, which calls some sleeping function.
* the destructor attempting to take a lock that is already taken by the
same process.
* stale reference from some other kernel code keeps the table
constructed, which keeps some devices open, even after successful
return from "dmsetup remove". This can confuse lvm and prevent closing
of underlying devices or reusing device minor numbers.
The patch changes reference counting so that the table destructor can be
called only at predetermined places.
The table has always exactly one reference from either mapped_device->map
or hash_cell->new_map. After this patch, this reference is not counted
in table->holders. A pair of dm_create_table/dm_destroy_table functions
is used for table creation/destruction.
Temporary references from the other code increase table->holders. A pair
of dm_table_get/dm_table_put functions is used to manipulate it.
When the table is about to be destroyed, we wait for table->holders to
reach 0. Then, we call the table destructor. We use active waiting with
msleep(1), because the situation happens rarely (to one user in 5 years)
and removing the device isn't performance-critical task: the user doesn't
care if it takes one tick more or not.
This way, the destructor is called only at specific points
(dm_table_destroy function) and the above problems associated with lazy
destruction can't happen.
Finally remove the temporary protection added to dm_any_congested().
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-01-06 03:05:10 +00:00
|
|
|
void dm_table_destroy(struct dm_table *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-12-10 23:52:23 +00:00
|
|
|
if (!t)
|
|
|
|
return;
|
|
|
|
|
2010-08-12 03:14:03 +00:00
|
|
|
/* free the indexes */
|
2005-04-16 22:20:36 +00:00
|
|
|
if (t->depth >= 2)
|
2021-04-07 13:25:22 +00:00
|
|
|
kvfree(t->index[t->depth - 2]);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* free the targets */
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
if (ti->type->dtr)
|
|
|
|
ti->type->dtr(ti);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
dm_put_target_type(ti->type);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 13:25:22 +00:00
|
|
|
kvfree(t->highs);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* free the device list */
|
2014-08-13 18:53:43 +00:00
|
|
|
free_devices(&t->devices, t->md);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
dm_free_md_mempools(t->mempools);
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
dm_table_destroy_crypto_profile(t);
|
2021-02-01 05:10:17 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we've already got a device in the list.
|
|
|
|
*/
|
2008-10-10 12:37:09 +00:00
|
|
|
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-10-10 12:37:09 +00:00
|
|
|
struct dm_dev_internal *dd;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-01-30 20:43:57 +00:00
|
|
|
list_for_each_entry(dd, l, list)
|
2014-08-13 18:53:43 +00:00
|
|
|
if (dd->dm_dev->bdev->bd_dev == dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
return dd;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-09-04 19:40:22 +00:00
|
|
|
* If possible, this checks an area of a destination device is invalid.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-09-04 19:40:22 +00:00
|
|
|
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-06-22 09:12:34 +00:00
|
|
|
struct queue_limits *limits = data;
|
|
|
|
struct block_device *bdev = dev->bdev;
|
2021-10-18 10:11:05 +00:00
|
|
|
sector_t dev_size = bdev_nr_sectors(bdev);
|
2009-06-22 09:12:30 +00:00
|
|
|
unsigned short logical_block_size_sectors =
|
2009-06-22 09:12:34 +00:00
|
|
|
limits->logical_block_size >> SECTOR_SHIFT;
|
2007-05-09 09:32:57 +00:00
|
|
|
|
|
|
|
if (!dev_size)
|
2009-09-04 19:40:22 +00:00
|
|
|
return 0;
|
2007-05-09 09:32:57 +00:00
|
|
|
|
2009-07-23 19:30:42 +00:00
|
|
|
if ((start >= dev_size) || (start + len > dev_size)) {
|
2023-02-03 17:55:47 +00:00
|
|
|
DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
|
2022-08-24 11:25:57 +00:00
|
|
|
dm_device_name(ti->table->md), bdev,
|
|
|
|
(unsigned long long)start,
|
|
|
|
(unsigned long long)len,
|
|
|
|
(unsigned long long)dev_size);
|
2009-09-04 19:40:22 +00:00
|
|
|
return 1;
|
2009-06-22 09:12:30 +00:00
|
|
|
}
|
|
|
|
|
2017-05-08 23:40:43 +00:00
|
|
|
/*
|
|
|
|
* If the target is mapped to zoned block device(s), check
|
|
|
|
* that the zones are not partially mapped.
|
|
|
|
*/
|
2021-05-25 21:24:55 +00:00
|
|
|
if (bdev_is_zoned(bdev)) {
|
2017-05-08 23:40:43 +00:00
|
|
|
unsigned int zone_sectors = bdev_zone_sectors(bdev);
|
|
|
|
|
|
|
|
if (start & (zone_sectors - 1)) {
|
2022-08-24 11:25:57 +00:00
|
|
|
DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
|
|
|
|
dm_device_name(ti->table->md),
|
|
|
|
(unsigned long long)start,
|
|
|
|
zone_sectors, bdev);
|
2017-05-08 23:40:43 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: The last zone of a zoned block device may be smaller
|
|
|
|
* than other zones. So for a target mapping the end of a
|
|
|
|
* zoned block device with such a zone, len would not be zone
|
|
|
|
* aligned. We do not allow such last smaller zone to be part
|
|
|
|
* of the mapping here to ensure that mappings with multiple
|
|
|
|
* devices do not end up with a smaller zone in the middle of
|
|
|
|
* the sector range.
|
|
|
|
*/
|
|
|
|
if (len & (zone_sectors - 1)) {
|
2022-08-24 11:25:57 +00:00
|
|
|
DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
|
|
|
|
dm_device_name(ti->table->md),
|
|
|
|
(unsigned long long)len,
|
|
|
|
zone_sectors, bdev);
|
2017-05-08 23:40:43 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-22 09:12:30 +00:00
|
|
|
if (logical_block_size_sectors <= 1)
|
2009-09-04 19:40:22 +00:00
|
|
|
return 0;
|
2009-06-22 09:12:30 +00:00
|
|
|
|
|
|
|
if (start & (logical_block_size_sectors - 1)) {
|
2023-02-03 17:55:47 +00:00
|
|
|
DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
|
2022-08-24 11:25:57 +00:00
|
|
|
dm_device_name(ti->table->md),
|
|
|
|
(unsigned long long)start,
|
|
|
|
limits->logical_block_size, bdev);
|
2009-09-04 19:40:22 +00:00
|
|
|
return 1;
|
2009-06-22 09:12:30 +00:00
|
|
|
}
|
|
|
|
|
2009-07-23 19:30:42 +00:00
|
|
|
if (len & (logical_block_size_sectors - 1)) {
|
2023-02-03 17:55:47 +00:00
|
|
|
DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
|
2022-08-24 11:25:57 +00:00
|
|
|
dm_device_name(ti->table->md),
|
|
|
|
(unsigned long long)len,
|
|
|
|
limits->logical_block_size, bdev);
|
2009-09-04 19:40:22 +00:00
|
|
|
return 1;
|
2009-06-22 09:12:30 +00:00
|
|
|
}
|
|
|
|
|
2009-09-04 19:40:22 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-04-02 18:55:28 +00:00
|
|
|
* This upgrades the mode on an already open dm_dev, being
|
2005-04-16 22:20:36 +00:00
|
|
|
* careful to leave things as they were if we fail to reopen the
|
2009-04-02 18:55:28 +00:00
|
|
|
* device and not to touch the existing bdev field in case
|
2020-07-01 09:06:22 +00:00
|
|
|
* it is accessed concurrently.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2023-06-08 11:02:55 +00:00
|
|
|
static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
|
2008-10-10 12:37:09 +00:00
|
|
|
struct mapped_device *md)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int r;
|
2014-08-13 18:53:43 +00:00
|
|
|
struct dm_dev *old_dev, *new_dev;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-08-13 18:53:43 +00:00
|
|
|
old_dev = dd->dm_dev;
|
2009-04-02 18:55:28 +00:00
|
|
|
|
2014-08-13 18:53:43 +00:00
|
|
|
r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
|
|
|
|
dd->dm_dev->mode | new_mode, &new_dev);
|
2009-04-02 18:55:28 +00:00
|
|
|
if (r)
|
|
|
|
return r;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-08-13 18:53:43 +00:00
|
|
|
dd->dm_dev = new_dev;
|
|
|
|
dm_put_table_device(md, old_dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-04-02 18:55:28 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-05-31 12:55:31 +00:00
|
|
|
* Note: the __ref annotation is because this function can call the __init
|
|
|
|
* marked early_lookup_bdev when called during early boot code from dm-init.c.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2024-07-04 14:17:15 +00:00
|
|
|
int __ref dm_devt_from_path(const char *path, dev_t *dev_p)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int r;
|
2016-02-02 04:29:18 +00:00
|
|
|
dev_t dev;
|
2021-01-21 17:50:56 +00:00
|
|
|
unsigned int major, minor;
|
|
|
|
char dummy;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-01-21 17:50:56 +00:00
|
|
|
if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
|
|
|
|
/* Extract the major/minor numbers */
|
|
|
|
dev = MKDEV(major, minor);
|
|
|
|
if (MAJOR(dev) != major || MINOR(dev) != minor)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
} else {
|
2023-05-31 12:55:30 +00:00
|
|
|
r = lookup_bdev(path, &dev);
|
2023-05-31 12:55:31 +00:00
|
|
|
#ifndef MODULE
|
|
|
|
if (r && system_state < SYSTEM_RUNNING)
|
2023-05-31 12:55:30 +00:00
|
|
|
r = early_lookup_bdev(path, &dev);
|
2023-05-31 12:55:31 +00:00
|
|
|
#endif
|
2023-05-31 12:55:30 +00:00
|
|
|
if (r)
|
|
|
|
return r;
|
2021-01-21 17:50:56 +00:00
|
|
|
}
|
2024-07-04 14:17:15 +00:00
|
|
|
*dev_p = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_devt_from_path);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a device to the list, or just increment the usage count if
|
|
|
|
* it's already present.
|
|
|
|
*/
|
|
|
|
int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
|
|
|
|
struct dm_dev **result)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
dev_t dev;
|
|
|
|
struct dm_dev_internal *dd;
|
|
|
|
struct dm_table *t = ti->table;
|
|
|
|
|
|
|
|
BUG_ON(!t);
|
|
|
|
|
|
|
|
r = dm_devt_from_path(path, &dev);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2023-01-31 21:22:57 +00:00
|
|
|
if (dev == disk_devt(t->md->disk))
|
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-08-09 10:44:20 +00:00
|
|
|
down_write(&t->devices_lock);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
dd = find_device(&t->devices, dev);
|
|
|
|
if (!dd) {
|
|
|
|
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
|
2023-08-09 10:44:20 +00:00
|
|
|
if (!dd) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto unlock_ret_r;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-02-01 20:17:44 +00:00
|
|
|
r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
|
|
|
|
if (r) {
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(dd);
|
2023-08-09 10:44:20 +00:00
|
|
|
goto unlock_ret_r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-10-20 07:37:38 +00:00
|
|
|
refcount_set(&dd->count, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
list_add(&dd->list, &t->devices);
|
2017-11-25 05:27:26 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-08-13 18:53:43 +00:00
|
|
|
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
|
2006-03-27 09:17:59 +00:00
|
|
|
r = upgrade_mode(dd, mode, t->md);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (r)
|
2023-08-09 10:44:20 +00:00
|
|
|
goto unlock_ret_r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2017-11-25 05:27:26 +00:00
|
|
|
refcount_inc(&dd->count);
|
|
|
|
out:
|
2023-08-09 10:44:20 +00:00
|
|
|
up_write(&t->devices_lock);
|
2014-08-13 18:53:43 +00:00
|
|
|
*result = dd->dm_dev;
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
2023-08-09 10:44:20 +00:00
|
|
|
|
|
|
|
unlock_ret_r:
|
|
|
|
up_write(&t->devices_lock);
|
|
|
|
return r;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
EXPORT_SYMBOL(dm_get_device);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-06-03 14:30:28 +00:00
|
|
|
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-06-22 09:12:34 +00:00
|
|
|
struct queue_limits *limits = data;
|
|
|
|
struct block_device *bdev = dev->bdev;
|
2007-07-24 07:28:11 +00:00
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
2008-10-10 12:37:13 +00:00
|
|
|
|
|
|
|
if (unlikely(!q)) {
|
2022-03-01 08:38:15 +00:00
|
|
|
DMWARN("%s: Cannot set limits for nonexistent device %pg",
|
|
|
|
dm_device_name(ti->table->md), bdev);
|
2009-06-22 09:12:34 +00:00
|
|
|
return 0;
|
2008-10-10 12:37:13 +00:00
|
|
|
}
|
2006-10-03 08:15:42 +00:00
|
|
|
|
2020-07-20 06:12:50 +00:00
|
|
|
if (blk_stack_limits(limits, &q->limits,
|
|
|
|
get_start_sect(bdev) + start) < 0)
|
2022-03-01 08:38:15 +00:00
|
|
|
DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
|
2009-09-04 19:40:24 +00:00
|
|
|
"physical_block_size=%u, logical_block_size=%u, "
|
|
|
|
"alignment_offset=%u, start=%llu",
|
2022-03-01 08:38:15 +00:00
|
|
|
dm_device_name(ti->table->md), bdev,
|
2009-09-04 19:40:24 +00:00
|
|
|
q->limits.physical_block_size,
|
|
|
|
q->limits.logical_block_size,
|
|
|
|
q->limits.alignment_offset,
|
2010-01-11 08:21:50 +00:00
|
|
|
(unsigned long long) start << SECTOR_SHIFT);
|
2024-06-13 08:48:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only stack the integrity profile if the target doesn't have native
|
|
|
|
* integrity support.
|
|
|
|
*/
|
|
|
|
if (!dm_target_has_integrity(ti->type))
|
|
|
|
queue_limits_stack_integrity_bdev(limits, bdev);
|
2009-06-22 09:12:34 +00:00
|
|
|
return 0;
|
2006-10-03 08:15:42 +00:00
|
|
|
}
|
2006-03-27 09:17:49 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2011-08-02 11:32:04 +00:00
|
|
|
* Decrement a device's use count and remove it if necessary.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-10-10 12:37:09 +00:00
|
|
|
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-08-13 18:53:43 +00:00
|
|
|
int found = 0;
|
2023-08-09 10:44:20 +00:00
|
|
|
struct dm_table *t = ti->table;
|
|
|
|
struct list_head *devices = &t->devices;
|
2014-08-13 18:53:43 +00:00
|
|
|
struct dm_dev_internal *dd;
|
2008-10-10 12:37:09 +00:00
|
|
|
|
2023-08-09 10:44:20 +00:00
|
|
|
down_write(&t->devices_lock);
|
|
|
|
|
2014-08-13 18:53:43 +00:00
|
|
|
list_for_each_entry(dd, devices, list) {
|
|
|
|
if (dd->dm_dev == d) {
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2022-08-24 11:25:57 +00:00
|
|
|
DMERR("%s: device %s not in table devices list",
|
2023-08-09 10:44:20 +00:00
|
|
|
dm_device_name(t->md), d->name);
|
|
|
|
goto unlock_ret;
|
2014-08-13 18:53:43 +00:00
|
|
|
}
|
2017-10-20 07:37:38 +00:00
|
|
|
if (refcount_dec_and_test(&dd->count)) {
|
2023-08-09 10:44:20 +00:00
|
|
|
dm_put_table_device(t->md, d);
|
2005-04-16 22:20:36 +00:00
|
|
|
list_del(&dd->list);
|
|
|
|
kfree(dd);
|
|
|
|
}
|
2023-08-09 10:44:20 +00:00
|
|
|
|
|
|
|
unlock_ret:
|
|
|
|
up_write(&t->devices_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
EXPORT_SYMBOL(dm_put_device);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks to see if the target joins onto the end of the table.
|
|
|
|
*/
|
2022-07-05 20:12:27 +00:00
|
|
|
static int adjoin(struct dm_table *t, struct dm_target *ti)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct dm_target *prev;
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
if (!t->num_targets)
|
2005-04-16 22:20:36 +00:00
|
|
|
return !ti->begin;
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
prev = &t->targets[t->num_targets - 1];
|
2005-04-16 22:20:36 +00:00
|
|
|
return (ti->begin == (prev->begin + prev->len));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used to dynamically allocate the arg array.
|
2013-10-31 17:55:45 +00:00
|
|
|
*
|
|
|
|
* We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
|
|
|
|
* process messages even if some device is suspended. These messages have a
|
|
|
|
* small fixed number of arguments.
|
|
|
|
*
|
|
|
|
* On the other hand, dm-switch needs to process bulk data using messages and
|
|
|
|
* excessive use of GFP_NOIO could cause trouble.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2023-01-25 20:14:58 +00:00
|
|
|
static char **realloc_argv(unsigned int *size, char **old_argv)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
char **argv;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int new_size;
|
2013-10-31 17:55:45 +00:00
|
|
|
gfp_t gfp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-05-07 23:47:02 +00:00
|
|
|
if (*size) {
|
|
|
|
new_size = *size * 2;
|
2013-10-31 17:55:45 +00:00
|
|
|
gfp = GFP_KERNEL;
|
|
|
|
} else {
|
|
|
|
new_size = 8;
|
|
|
|
gfp = GFP_NOIO;
|
|
|
|
}
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
|
dm table: don't copy from a NULL pointer in realloc_argv()
For the first call to realloc_argv() in dm_split_args(), old_argv is
NULL and size is zero. Then memcpy is called, with the NULL old_argv
as the source argument and a zero size argument. AFAIK, this is
undefined behavior and generates the following warning when compiled
with UBSAN on ppc64le:
In file included from ./arch/powerpc/include/asm/paca.h:19,
from ./arch/powerpc/include/asm/current.h:16,
from ./include/linux/sched.h:12,
from ./include/linux/kthread.h:6,
from drivers/md/dm-core.h:12,
from drivers/md/dm-table.c:8:
In function 'memcpy',
inlined from 'realloc_argv' at drivers/md/dm-table.c:565:3,
inlined from 'dm_split_args' at drivers/md/dm-table.c:588:9:
./include/linux/string.h:345:9: error: argument 2 null where non-null expected [-Werror=nonnull]
return __builtin_memcpy(p, q, size);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/md/dm-table.c: In function 'dm_split_args':
./include/linux/string.h:345:9: note: in a call to built-in function '__builtin_memcpy'
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2019-06-12 16:22:26 +00:00
|
|
|
if (argv && old_argv) {
|
2018-05-07 23:47:02 +00:00
|
|
|
memcpy(argv, old_argv, *size * sizeof(*argv));
|
|
|
|
*size = new_size;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kfree(old_argv);
|
|
|
|
return argv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destructively splits up the argument list to pass to ctr.
|
|
|
|
*/
|
|
|
|
int dm_split_args(int *argc, char ***argvp, char *input)
|
|
|
|
{
|
|
|
|
char *start, *end = input, *out, **argv = NULL;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int array_size = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
*argc = 0;
|
2006-06-26 07:27:31 +00:00
|
|
|
|
|
|
|
if (!input) {
|
|
|
|
*argvp = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
argv = realloc_argv(&array_size, argv);
|
|
|
|
if (!argv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
/* Skip whitespace */
|
2009-12-15 02:01:06 +00:00
|
|
|
start = skip_spaces(end);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!*start)
|
|
|
|
break; /* success, we hit the end */
|
|
|
|
|
|
|
|
/* 'out' is used to remove any back-quotes */
|
|
|
|
end = out = start;
|
|
|
|
while (*end) {
|
|
|
|
/* Everything apart from '\0' can be quoted */
|
|
|
|
if (*end == '\\' && *(end + 1)) {
|
|
|
|
*out++ = *(end + 1);
|
|
|
|
end += 2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isspace(*end))
|
|
|
|
break; /* end of token */
|
|
|
|
|
|
|
|
*out++ = *end++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* have we already filled the array ? */
|
|
|
|
if ((*argc + 1) > array_size) {
|
|
|
|
argv = realloc_argv(&array_size, argv);
|
|
|
|
if (!argv)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we know this is whitespace */
|
|
|
|
if (*end)
|
|
|
|
end++;
|
|
|
|
|
|
|
|
/* terminate the string and put it in the array */
|
|
|
|
*out = '\0';
|
|
|
|
argv[*argc] = start;
|
|
|
|
(*argc)++;
|
|
|
|
}
|
|
|
|
|
|
|
|
*argvp = argv;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-17 06:04:43 +00:00
|
|
|
static void dm_set_stacking_limits(struct queue_limits *limits)
|
|
|
|
{
|
|
|
|
blk_set_stacking_limits(limits);
|
2024-06-17 06:04:48 +00:00
|
|
|
limits->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
|
2024-06-17 06:04:43 +00:00
|
|
|
}
|
|
|
|
|
2009-06-22 09:12:31 +00:00
|
|
|
/*
|
|
|
|
* Impose necessary and sufficient conditions on a devices's table such
|
|
|
|
* that any incoming bio which respects its logical_block_size can be
|
|
|
|
* processed successfully. If it falls across the boundary between
|
|
|
|
* two or more targets, the size of each piece it gets split into must
|
|
|
|
* be compatible with the logical_block_size of the target processing it.
|
|
|
|
*/
|
2022-07-05 20:12:27 +00:00
|
|
|
static int validate_hardware_logical_block_alignment(struct dm_table *t,
|
|
|
|
struct queue_limits *limits)
|
2009-06-22 09:12:31 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This function uses arithmetic modulo the logical_block_size
|
|
|
|
* (in units of 512-byte sectors).
|
|
|
|
*/
|
|
|
|
unsigned short device_logical_block_size_sects =
|
2009-06-22 09:12:34 +00:00
|
|
|
limits->logical_block_size >> SECTOR_SHIFT;
|
2009-06-22 09:12:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Offset of the start of the next table entry, mod logical_block_size.
|
|
|
|
*/
|
|
|
|
unsigned short next_target_start = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given an aligned bio that extends beyond the end of a
|
|
|
|
* target, how many sectors must the next target handle?
|
|
|
|
*/
|
|
|
|
unsigned short remaining = 0;
|
|
|
|
|
treewide: Remove uninitialized_var() usage
Using uninitialized_var() is dangerous as it papers over real bugs[1]
(or can in the future), and suppresses unrelated compiler warnings
(e.g. "unused variable"). If the compiler thinks it is uninitialized,
either simply initialize the variable or make compiler changes.
In preparation for removing[2] the[3] macro[4], remove all remaining
needless uses with the following script:
git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \
xargs perl -pi -e \
's/\buninitialized_var\(([^\)]+)\)/\1/g;
s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;'
drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid
pathological white-space.
No outstanding warnings were found building allmodconfig with GCC 9.3.0
for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64,
alpha, and m68k.
[1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/
[2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/
[3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/
[4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/
Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5
Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB
Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers
Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs
Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-03 20:09:38 +00:00
|
|
|
struct dm_target *ti;
|
2009-06-22 09:12:34 +00:00
|
|
|
struct queue_limits ti_limits;
|
2022-07-05 20:12:27 +00:00
|
|
|
unsigned int i;
|
2009-06-22 09:12:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check each entry in the table in turn.
|
|
|
|
*/
|
2022-07-05 20:12:27 +00:00
|
|
|
for (i = 0; i < t->num_targets; i++) {
|
|
|
|
ti = dm_table_get_target(t, i);
|
2009-06-22 09:12:31 +00:00
|
|
|
|
2024-06-17 06:04:43 +00:00
|
|
|
dm_set_stacking_limits(&ti_limits);
|
2009-06-22 09:12:34 +00:00
|
|
|
|
|
|
|
/* combine all target devices' limits */
|
|
|
|
if (ti->type->iterate_devices)
|
|
|
|
ti->type->iterate_devices(ti, dm_set_device_limits,
|
|
|
|
&ti_limits);
|
|
|
|
|
2009-06-22 09:12:31 +00:00
|
|
|
/*
|
|
|
|
* If the remaining sectors fall entirely within this
|
|
|
|
* table entry are they compatible with its logical_block_size?
|
|
|
|
*/
|
|
|
|
if (remaining < ti->len &&
|
2009-06-22 09:12:34 +00:00
|
|
|
remaining & ((ti_limits.logical_block_size >>
|
2009-06-22 09:12:31 +00:00
|
|
|
SECTOR_SHIFT) - 1))
|
|
|
|
break; /* Error */
|
|
|
|
|
|
|
|
next_target_start =
|
|
|
|
(unsigned short) ((next_target_start + ti->len) &
|
|
|
|
(device_logical_block_size_sects - 1));
|
|
|
|
remaining = next_target_start ?
|
|
|
|
device_logical_block_size_sects - next_target_start : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (remaining) {
|
2022-08-24 11:25:57 +00:00
|
|
|
DMERR("%s: table line %u (start sect %llu len %llu) "
|
|
|
|
"not aligned to h/w logical block size %u",
|
|
|
|
dm_device_name(t->md), i,
|
|
|
|
(unsigned long long) ti->begin,
|
|
|
|
(unsigned long long) ti->len,
|
|
|
|
limits->logical_block_size);
|
2009-06-22 09:12:31 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int dm_table_add_target(struct dm_table *t, const char *type,
|
|
|
|
sector_t start, sector_t len, char *params)
|
|
|
|
{
|
|
|
|
int r = -EINVAL, argc;
|
|
|
|
char **argv;
|
2022-07-05 20:29:09 +00:00
|
|
|
struct dm_target *ti;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-10-31 20:19:00 +00:00
|
|
|
if (t->singleton) {
|
|
|
|
DMERR("%s: target type %s must appear alone in table",
|
|
|
|
dm_device_name(t->md), t->targets->type->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
dm table: remove unused buggy code that extends the targets array
A device mapper table is allocated in the following way:
* The function dm_table_create is called, it gets the number of targets
as an argument -- it allocates a targets array accordingly.
* For each target, we call dm_table_add_target.
If we add more targets than were specified in dm_table_create, the
function dm_table_add_target reallocates the targets array. However,
this reallocation code is wrong - it moves the targets array to a new
location, while some target constructors hold pointers to the array in
the old location.
The following DM target drivers save the pointer to the target
structure, so they corrupt memory if the target array is moved:
multipath, raid, mirror, snapshot, stripe, switch, thin, verity.
Under normal circumstances, the reallocation function is not called
(because dm_table_create is called with the correct number of targets),
so the buggy reallocation code is not used.
Prior to the fix "dm table: fail dm_table_create on dm_round_up
overflow", the reallocation code could only be used in case the user
specifies too large a value in param->target_count, such as 0xffffffff.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2013-11-23 00:51:39 +00:00
|
|
|
BUG_ON(t->num_targets >= t->num_allocated);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
ti = t->targets + t->num_targets;
|
|
|
|
memset(ti, 0, sizeof(*ti));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!len) {
|
2006-06-26 07:27:35 +00:00
|
|
|
DMERR("%s: zero-length target", dm_device_name(t->md));
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
ti->type = dm_get_target_type(type);
|
|
|
|
if (!ti->type) {
|
2016-10-21 01:35:32 +00:00
|
|
|
DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
if (dm_target_needs_singleton(ti->type)) {
|
2011-10-31 20:19:00 +00:00
|
|
|
if (t->num_targets) {
|
2022-07-05 20:29:09 +00:00
|
|
|
ti->error = "singleton target type must appear alone in table";
|
2016-10-21 01:35:32 +00:00
|
|
|
goto bad;
|
2011-10-31 20:19:00 +00:00
|
|
|
}
|
2016-05-25 01:16:51 +00:00
|
|
|
t->singleton = true;
|
2011-10-31 20:19:00 +00:00
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
if (dm_target_always_writeable(ti->type) &&
|
|
|
|
!(t->mode & BLK_OPEN_WRITE)) {
|
2022-07-05 20:29:09 +00:00
|
|
|
ti->error = "target type may not be included in a read-only table";
|
2016-10-21 01:35:32 +00:00
|
|
|
goto bad;
|
2011-10-31 20:19:02 +00:00
|
|
|
}
|
|
|
|
|
2011-10-31 20:19:04 +00:00
|
|
|
if (t->immutable_target_type) {
|
2022-07-05 20:29:09 +00:00
|
|
|
if (t->immutable_target_type != ti->type) {
|
|
|
|
ti->error = "immutable target type cannot be mixed with other target types";
|
2016-10-21 01:35:32 +00:00
|
|
|
goto bad;
|
2011-10-31 20:19:04 +00:00
|
|
|
}
|
2022-07-05 20:29:09 +00:00
|
|
|
} else if (dm_target_is_immutable(ti->type)) {
|
2011-10-31 20:19:04 +00:00
|
|
|
if (t->num_targets) {
|
2022-07-05 20:29:09 +00:00
|
|
|
ti->error = "immutable target type cannot be mixed with other target types";
|
2016-10-21 01:35:32 +00:00
|
|
|
goto bad;
|
2011-10-31 20:19:04 +00:00
|
|
|
}
|
2022-07-05 20:29:09 +00:00
|
|
|
t->immutable_target_type = ti->type;
|
2011-10-31 20:19:04 +00:00
|
|
|
}
|
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
ti->table = t;
|
|
|
|
ti->begin = start;
|
|
|
|
ti->len = len;
|
|
|
|
ti->error = "Unknown error";
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Does this target adjoin the previous one ?
|
|
|
|
*/
|
2022-07-05 20:29:09 +00:00
|
|
|
if (!adjoin(t, ti)) {
|
|
|
|
ti->error = "Gap in table";
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_split_args(&argc, &argv, params);
|
|
|
|
if (r) {
|
2022-07-05 20:29:09 +00:00
|
|
|
ti->error = "couldn't split parameters";
|
2005-04-16 22:20:36 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
r = ti->type->ctr(ti, argc, argv);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(argv);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
t->highs[t->num_targets++] = ti->begin + ti->len - 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
if (!ti->num_discard_bios && ti->discards_supported)
|
2013-03-01 22:45:47 +00:00
|
|
|
DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
|
2011-08-02 11:32:01 +00:00
|
|
|
dm_device_name(t->md), type);
|
2010-08-12 03:14:08 +00:00
|
|
|
|
2022-07-05 20:29:09 +00:00
|
|
|
if (ti->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
|
2022-03-26 18:14:00 +00:00
|
|
|
static_branch_enable(&swap_bios_enabled);
|
|
|
|
|
2024-05-28 11:32:34 +00:00
|
|
|
if (!ti->flush_bypasses_map)
|
|
|
|
t->flush_bypasses_map = false;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
2022-07-05 20:29:09 +00:00
|
|
|
DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, ti->error, ERR_PTR(r));
|
|
|
|
dm_put_target_type(ti->type);
|
2005-04-16 22:20:36 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2011-08-02 11:32:04 +00:00
|
|
|
/*
|
|
|
|
* Target argument parsing helpers.
|
|
|
|
*/
|
2023-01-25 20:14:58 +00:00
|
|
|
static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
|
|
|
|
unsigned int *value, char **error, unsigned int grouped)
|
2011-08-02 11:32:04 +00:00
|
|
|
{
|
|
|
|
const char *arg_str = dm_shift_arg(arg_set);
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-28 17:41:26 +00:00
|
|
|
char dummy;
|
2011-08-02 11:32:04 +00:00
|
|
|
|
|
|
|
if (!arg_str ||
|
dm: reject trailing characters in sccanf input
Device mapper uses sscanf to convert arguments to numbers. The problem is that
the way we use it ignores additional unmatched characters in the scanned string.
For example, this `if (sscanf(string, "%d", &number) == 1)' will match a number,
but also it will match number with some garbage appended, like "123abc".
As a result, device mapper accepts garbage after some numbers. For example
the command `dmsetup create vg1-new --table "0 16384 linear 254:1bla 34816bla"'
will pass without an error.
This patch fixes all sscanf uses in device mapper. It appends "%c" with
a pointer to a dummy character variable to every sscanf statement.
The construct `if (sscanf(string, "%d%c", &number, &dummy) == 1)' succeeds
only if string is a null-terminated number (optionally preceded by some
whitespace characters). If there is some character appended after the number,
sscanf matches "%c", writes the character to the dummy variable and returns 2.
We check the return value for 1 and consequently reject numbers with some
garbage appended.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2012-03-28 17:41:26 +00:00
|
|
|
(sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
|
2011-08-02 11:32:04 +00:00
|
|
|
(*value < arg->min) ||
|
|
|
|
(*value > arg->max) ||
|
|
|
|
(grouped && arg_set->argc < *value)) {
|
|
|
|
*error = arg->error;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-22 18:32:45 +00:00
|
|
|
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int *value, char **error)
|
2011-08-02 11:32:04 +00:00
|
|
|
{
|
|
|
|
return validate_next_arg(arg, arg_set, value, error, 0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_read_arg);
|
|
|
|
|
2017-06-22 18:32:45 +00:00
|
|
|
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int *value, char **error)
|
2011-08-02 11:32:04 +00:00
|
|
|
{
|
|
|
|
return validate_next_arg(arg, arg_set, value, error, 1);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_read_arg_group);
|
|
|
|
|
|
|
|
const char *dm_shift_arg(struct dm_arg_set *as)
|
|
|
|
{
|
|
|
|
char *r;
|
|
|
|
|
|
|
|
if (as->argc) {
|
|
|
|
as->argc--;
|
|
|
|
r = *as->argv;
|
|
|
|
as->argv++;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_shift_arg);
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
|
2011-08-02 11:32:04 +00:00
|
|
|
{
|
|
|
|
BUG_ON(as->argc < num_args);
|
|
|
|
as->argc -= num_args;
|
|
|
|
as->argv += num_args;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_consume_args);
|
|
|
|
|
2017-04-27 17:11:23 +00:00
|
|
|
static bool __table_type_bio_based(enum dm_queue_mode table_type)
|
2016-06-22 23:54:53 +00:00
|
|
|
{
|
|
|
|
return (table_type == DM_TYPE_BIO_BASED ||
|
2020-10-07 19:15:08 +00:00
|
|
|
table_type == DM_TYPE_DAX_BIO_BASED);
|
2016-06-22 23:54:53 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 17:11:23 +00:00
|
|
|
static bool __table_type_request_based(enum dm_queue_mode table_type)
|
2015-05-29 08:51:03 +00:00
|
|
|
{
|
2018-10-11 15:06:29 +00:00
|
|
|
return table_type == DM_TYPE_REQUEST_BASED;
|
2015-05-29 08:51:03 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 17:11:23 +00:00
|
|
|
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
|
2016-05-25 01:16:51 +00:00
|
|
|
{
|
|
|
|
t->type = type;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_table_set_type);
|
|
|
|
|
2019-05-16 20:26:29 +00:00
|
|
|
/* validate the dax capability of the target device span */
|
2021-11-29 10:21:42 +00:00
|
|
|
static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
|
2019-07-30 18:39:43 +00:00
|
|
|
sector_t start, sector_t len, void *data)
|
2016-06-22 23:54:53 +00:00
|
|
|
{
|
2021-11-29 10:21:42 +00:00
|
|
|
if (dev->dax_dev)
|
|
|
|
return false;
|
2019-05-16 20:26:29 +00:00
|
|
|
|
2021-11-29 10:21:42 +00:00
|
|
|
DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
|
|
|
|
return true;
|
2016-06-22 23:54:53 +00:00
|
|
|
}
|
|
|
|
|
2019-07-05 14:03:25 +00:00
|
|
|
/* Check devices support synchronous DAX */
|
2021-02-09 03:34:36 +00:00
|
|
|
static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2019-07-05 14:03:25 +00:00
|
|
|
{
|
2021-02-09 03:34:36 +00:00
|
|
|
return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
|
2019-07-05 14:03:25 +00:00
|
|
|
}
|
|
|
|
|
2021-11-29 10:21:42 +00:00
|
|
|
static bool dm_table_supports_dax(struct dm_table *t,
|
2022-07-05 20:12:27 +00:00
|
|
|
iterate_devices_callout_fn iterate_fn)
|
2016-06-22 23:54:53 +00:00
|
|
|
{
|
|
|
|
/* Ensure that all targets support DAX. */
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2016-06-22 23:54:53 +00:00
|
|
|
|
|
|
|
if (!ti->type->direct_access)
|
|
|
|
return false;
|
|
|
|
|
dm error: Add support for zoned block devices
dm-error is used in several test cases in the xfstests test suite to
check the handling of IO errors in file systems. However, with several
file systems getting native support for zoned block devices (e.g.
btrfs and f2fs), dm-error's lack of zoned block device support creates
problems as the file system attempts executing zone commands (e.g. a
zone append operation) against a dm-error non-zoned block device,
which causes various issues in the block layer (e.g. WARN_ON
triggers).
This commit adds supports for zoned block devices to dm-error, allowing
a DM device table containing an error target to be exposed as a zoned
block device (if all targets have a compatible zoned model support and
mapping). This is done as follows:
1) Allow passing 2 arguments to an error target, similar to dm-linear:
a backing device and a start sector. These arguments are optional and
dm-error retains its characteristics if the arguments are not
specified.
2) Implement the iterate_devices method so that dm-core can normally
check the zone support and restrictions (e.g. zone alignment of the
targets). When the backing device arguments are not specified, the
iterate_devices method never calls the fn() argument.
When no backing device is specified, as before, we assume that the DM
device is not zoned. When the backing device arguments are specified,
the zoned model of the DM device will depend on the backing device
type:
- If the backing device is zoned and its model and mapping is
compatible with other targets of the device, the resulting device
will be zoned, with the dm-error mapped portion always returning
errors (similar to the default non-zoned case).
- If the backing device is not zoned, then the DM device will not be
either.
This zone support for dm-error requires the definition of a functional
report_zones operation so that dm_revalidate_zones() can operate
correctly and resources for emulating zone append operations
initialized. This is necessary for cases where dm-error is used to
partially map a device and have an overall correct handling of zone
append. This means that dm-error does not fail report zones operations.
Two changes that are not obvious are included to avoid issues:
1) dm_table_supports_zoned_model() is changed to directly check if
the backing device of a wildcard target (= dm-error target) is
zoned. Otherwise, we wouldn't be able to catch the invalid setup of
dm-error without a backing device (non zoned case) being combined
with zoned targets.
2) dm_table_supports_dax() is modified to return false if the wildcard
target is found. Otherwise, when dm-error is set without a backing
device, we end up with a NULL pointer dereference in
set_dax_synchronous (dax_dev is NULL). This is consistent with the
current behavior because dm_table_supports_dax() always returned
false for targets that do not define the iterate_devices method.
Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Tested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2023-10-26 05:12:05 +00:00
|
|
|
if (dm_target_is_wildcard(ti->type) ||
|
|
|
|
!ti->type->iterate_devices ||
|
2021-11-29 10:21:42 +00:00
|
|
|
ti->type->iterate_devices(ti, iterate_fn, NULL))
|
2016-06-22 23:54:53 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-11-05 15:43:44 +00:00
|
|
|
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2018-01-13 19:33:30 +00:00
|
|
|
{
|
2019-11-05 15:43:44 +00:00
|
|
|
struct block_device *bdev = dev->bdev;
|
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
2018-01-13 19:33:30 +00:00
|
|
|
|
2019-11-05 15:43:44 +00:00
|
|
|
/* request-based cannot stack on partitions! */
|
2020-09-03 05:40:57 +00:00
|
|
|
if (bdev_is_partition(bdev))
|
2019-11-05 15:43:44 +00:00
|
|
|
return false;
|
2018-01-13 19:33:30 +00:00
|
|
|
|
2018-11-15 19:22:51 +00:00
|
|
|
return queue_is_mq(q);
|
2018-01-13 19:33:30 +00:00
|
|
|
}
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
static int dm_table_determine_type(struct dm_table *t)
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int bio_based = 0, request_based = 0, hybrid = 0;
|
2022-07-05 20:12:27 +00:00
|
|
|
struct dm_target *ti;
|
2016-05-25 01:16:51 +00:00
|
|
|
struct list_head *devices = dm_table_get_devices(t);
|
2017-04-27 17:11:23 +00:00
|
|
|
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
if (t->type != DM_TYPE_NONE) {
|
|
|
|
/* target already set the table's type */
|
2018-03-05 20:26:06 +00:00
|
|
|
if (t->type == DM_TYPE_BIO_BASED) {
|
|
|
|
/* possibly upgrade to a variant of bio-based */
|
|
|
|
goto verify_bio_based;
|
2017-12-05 02:07:37 +00:00
|
|
|
}
|
2016-06-22 23:54:53 +00:00
|
|
|
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
|
2016-05-25 01:16:51 +00:00
|
|
|
goto verify_rq_based;
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
ti = dm_table_get_target(t, i);
|
|
|
|
if (dm_target_hybrid(ti))
|
2013-08-22 22:21:38 +00:00
|
|
|
hybrid = 1;
|
2022-07-05 20:12:27 +00:00
|
|
|
else if (dm_target_request_based(ti))
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
request_based = 1;
|
|
|
|
else
|
|
|
|
bio_based = 1;
|
|
|
|
|
|
|
|
if (bio_based && request_based) {
|
2023-02-03 17:55:47 +00:00
|
|
|
DMERR("Inconsistent table: different target types can't be mixed up");
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-22 22:21:38 +00:00
|
|
|
if (hybrid && !bio_based && !request_based) {
|
|
|
|
/*
|
|
|
|
* The targets can work either way.
|
|
|
|
* Determine the type from the live device.
|
|
|
|
* Default to bio-based if device is new.
|
|
|
|
*/
|
2015-05-29 08:51:03 +00:00
|
|
|
if (__table_type_request_based(live_md_type))
|
2013-08-22 22:21:38 +00:00
|
|
|
request_based = 1;
|
|
|
|
else
|
|
|
|
bio_based = 1;
|
|
|
|
}
|
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
if (bio_based) {
|
2018-03-05 20:26:06 +00:00
|
|
|
verify_bio_based:
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
/* We must use this table as bio-based */
|
|
|
|
t->type = DM_TYPE_BIO_BASED;
|
2021-11-29 10:21:42 +00:00
|
|
|
if (dm_table_supports_dax(t, device_not_dax_capable) ||
|
2017-12-05 02:07:37 +00:00
|
|
|
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
|
2016-06-22 23:54:53 +00:00
|
|
|
t->type = DM_TYPE_DAX_BIO_BASED;
|
2017-12-05 02:07:37 +00:00
|
|
|
}
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(!request_based); /* No targets in this table */
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
t->type = DM_TYPE_REQUEST_BASED;
|
|
|
|
|
|
|
|
verify_rq_based:
|
2014-12-18 21:26:47 +00:00
|
|
|
/*
|
|
|
|
* Request-based dm supports only tables that have a single target now.
|
|
|
|
* To support multiple targets, request splitting support is needed,
|
|
|
|
* and that needs lots of changes in the block-layer.
|
|
|
|
* (e.g. request completion process for partial completion.)
|
|
|
|
*/
|
|
|
|
if (t->num_targets > 1) {
|
2020-10-07 19:15:08 +00:00
|
|
|
DMERR("request-based DM doesn't support multiple targets");
|
2014-12-18 21:26:47 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-11-23 18:51:09 +00:00
|
|
|
if (list_empty(devices)) {
|
|
|
|
int srcu_idx;
|
|
|
|
struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
|
|
|
|
|
2018-10-11 02:49:26 +00:00
|
|
|
/* inherit live table's type */
|
|
|
|
if (live_table)
|
2016-11-23 18:51:09 +00:00
|
|
|
t->type = live_table->type;
|
|
|
|
dm_put_live_table(t->md, srcu_idx);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
ti = dm_table_get_immutable_target(t);
|
|
|
|
if (!ti) {
|
2017-12-05 02:07:37 +00:00
|
|
|
DMERR("table load rejected: immutable target is required");
|
|
|
|
return -EINVAL;
|
2022-07-05 20:12:27 +00:00
|
|
|
} else if (ti->max_io_len) {
|
2017-12-05 02:07:37 +00:00
|
|
|
DMERR("table load rejected: immutable target that splits IO is not supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
/* Non-request-stackable devices can't be used for request-based dm */
|
2022-07-05 20:12:27 +00:00
|
|
|
if (!ti->type->iterate_devices ||
|
|
|
|
!ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
|
2018-01-13 19:33:30 +00:00
|
|
|
DMERR("table load rejected: including non-request-stackable devices");
|
|
|
|
return -EINVAL;
|
2014-12-18 02:08:12 +00:00
|
|
|
}
|
2016-12-08 00:56:06 +00:00
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-27 17:11:23 +00:00
|
|
|
enum dm_queue_mode dm_table_get_type(struct dm_table *t)
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
{
|
|
|
|
return t->type;
|
|
|
|
}
|
|
|
|
|
2011-10-31 20:19:04 +00:00
|
|
|
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
|
|
|
|
{
|
|
|
|
return t->immutable_target_type;
|
|
|
|
}
|
|
|
|
|
2016-01-31 22:22:27 +00:00
|
|
|
struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
|
|
|
|
{
|
|
|
|
/* Immutable target is implicitly a singleton */
|
|
|
|
if (t->num_targets > 1 ||
|
|
|
|
!dm_target_is_immutable(t->targets[0].type))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return t->targets;
|
|
|
|
}
|
|
|
|
|
2016-02-06 23:38:46 +00:00
|
|
|
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
|
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2016-02-06 23:38:46 +00:00
|
|
|
|
|
|
|
if (dm_target_is_wildcard(ti->type))
|
|
|
|
return ti;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-22 23:54:53 +00:00
|
|
|
bool dm_table_bio_based(struct dm_table *t)
|
|
|
|
{
|
|
|
|
return __table_type_bio_based(dm_table_get_type(t));
|
|
|
|
}
|
|
|
|
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
bool dm_table_request_based(struct dm_table *t)
|
|
|
|
{
|
2015-05-29 08:51:03 +00:00
|
|
|
return __table_type_request_based(dm_table_get_type(t));
|
2014-12-18 02:08:12 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 19:01:09 +00:00
|
|
|
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
{
|
2017-04-27 17:11:23 +00:00
|
|
|
enum dm_queue_mode type = dm_table_get_type(t);
|
2022-06-08 06:34:09 +00:00
|
|
|
unsigned int per_io_data_size = 0, front_pad, io_front_pad;
|
|
|
|
unsigned int min_pool_size = 0, pool_size;
|
|
|
|
struct dm_md_mempools *pools;
|
2024-06-17 06:04:48 +00:00
|
|
|
unsigned int bioset_flags = 0;
|
2024-07-10 18:53:12 +00:00
|
|
|
bool mempool_needs_integrity = t->integrity_supported;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
|
2015-06-26 14:01:13 +00:00
|
|
|
if (unlikely(type == DM_TYPE_NONE)) {
|
2022-08-24 11:25:57 +00:00
|
|
|
DMERR("no table type is set, can't allocate mempools");
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2022-06-08 06:34:09 +00:00
|
|
|
pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
|
|
|
|
if (!pools)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (type == DM_TYPE_REQUEST_BASED) {
|
|
|
|
pool_size = dm_get_reserved_rq_based_ios();
|
|
|
|
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
|
|
|
goto init_bs;
|
2022-03-24 18:36:47 +00:00
|
|
|
}
|
2015-06-26 14:01:13 +00:00
|
|
|
|
2024-06-17 06:04:48 +00:00
|
|
|
if (md->queue->limits.features & BLK_FEAT_POLL)
|
|
|
|
bioset_flags |= BIOSET_PERCPU_CACHE;
|
|
|
|
|
2022-06-08 06:34:09 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
2022-07-05 20:12:27 +00:00
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
|
2022-06-08 06:34:09 +00:00
|
|
|
per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
|
|
|
|
min_pool_size = max(min_pool_size, ti->num_flush_bios);
|
2024-07-10 18:53:12 +00:00
|
|
|
|
|
|
|
mempool_needs_integrity |= ti->mempool_needs_integrity;
|
2022-06-08 06:34:09 +00:00
|
|
|
}
|
|
|
|
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
|
|
|
|
front_pad = roundup(per_io_data_size,
|
|
|
|
__alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
|
|
|
|
|
|
|
|
io_front_pad = roundup(per_io_data_size,
|
|
|
|
__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
|
2024-06-17 06:04:48 +00:00
|
|
|
if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
|
2022-06-08 06:34:09 +00:00
|
|
|
goto out_free_pools;
|
2024-07-10 18:53:12 +00:00
|
|
|
if (mempool_needs_integrity &&
|
2022-06-08 06:34:09 +00:00
|
|
|
bioset_integrity_create(&pools->io_bs, pool_size))
|
|
|
|
goto out_free_pools;
|
|
|
|
init_bs:
|
|
|
|
if (bioset_init(&pools->bs, pool_size, front_pad, 0))
|
|
|
|
goto out_free_pools;
|
2024-07-10 18:53:12 +00:00
|
|
|
if (mempool_needs_integrity &&
|
2022-06-08 06:34:09 +00:00
|
|
|
bioset_integrity_create(&pools->bs, pool_size))
|
|
|
|
goto out_free_pools;
|
|
|
|
|
|
|
|
t->mempools = pools;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
return 0;
|
2022-06-08 06:34:09 +00:00
|
|
|
|
|
|
|
out_free_pools:
|
|
|
|
dm_free_md_mempools(pools);
|
|
|
|
return -ENOMEM;
|
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2009-06-22 09:12:36 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int setup_indexes(struct dm_table *t)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned int total = 0;
|
|
|
|
sector_t *indexes;
|
|
|
|
|
|
|
|
/* allocate the space for *all* the indexes */
|
|
|
|
for (i = t->depth - 2; i >= 0; i--) {
|
|
|
|
t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
|
|
|
|
total += t->counts[i];
|
|
|
|
}
|
|
|
|
|
2021-04-07 13:25:22 +00:00
|
|
|
indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!indexes)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* set up internal nodes, bottom-up */
|
2008-02-08 02:10:04 +00:00
|
|
|
for (i = t->depth - 2; i >= 0; i--) {
|
2005-04-16 22:20:36 +00:00
|
|
|
t->index[i] = indexes;
|
|
|
|
indexes += (KEYS_PER_NODE * t->counts[i]);
|
|
|
|
setup_btree_index(i, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Builds the btree to index the map.
|
|
|
|
*/
|
2010-08-12 03:14:03 +00:00
|
|
|
static int dm_table_build_index(struct dm_table *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
unsigned int leaf_nodes;
|
|
|
|
|
|
|
|
/* how many indexes will the btree have ? */
|
|
|
|
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
|
|
|
|
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
|
|
|
|
|
|
|
|
/* leaf layer has already been set up */
|
|
|
|
t->counts[t->depth - 1] = leaf_nodes;
|
|
|
|
t->index[t->depth - 1] = t->highs;
|
|
|
|
|
|
|
|
if (t->depth >= 2)
|
|
|
|
r = setup_indexes(t);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2021-02-01 05:10:17 +00:00
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
struct dm_crypto_profile {
|
|
|
|
struct blk_crypto_profile profile;
|
2021-02-01 05:10:17 +00:00
|
|
|
struct mapped_device *md;
|
|
|
|
};
|
|
|
|
|
2021-02-01 05:10:18 +00:00
|
|
|
static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
|
|
|
{
|
2023-03-15 18:39:03 +00:00
|
|
|
const struct blk_crypto_key *key = data;
|
2021-02-01 05:10:18 +00:00
|
|
|
|
2023-03-15 18:39:03 +00:00
|
|
|
blk_crypto_evict_key(dev->bdev, key);
|
2021-02-01 05:10:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When an inline encryption key is evicted from a device-mapper device, evict
|
|
|
|
* it from all the underlying devices.
|
|
|
|
*/
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static int dm_keyslot_evict(struct blk_crypto_profile *profile,
|
2021-02-01 05:10:18 +00:00
|
|
|
const struct blk_crypto_key *key, unsigned int slot)
|
|
|
|
{
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
struct mapped_device *md =
|
|
|
|
container_of(profile, struct dm_crypto_profile, profile)->md;
|
2021-02-01 05:10:18 +00:00
|
|
|
struct dm_table *t;
|
|
|
|
int srcu_idx;
|
|
|
|
|
|
|
|
t = dm_get_live_table(md, &srcu_idx);
|
|
|
|
if (!t)
|
|
|
|
return 0;
|
2022-07-05 20:12:27 +00:00
|
|
|
|
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
|
|
|
|
2021-02-01 05:10:18 +00:00
|
|
|
if (!ti->type->iterate_devices)
|
|
|
|
continue;
|
2023-03-15 18:39:03 +00:00
|
|
|
ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
|
|
|
|
(void *)key);
|
2021-02-01 05:10:18 +00:00
|
|
|
}
|
2022-07-05 20:12:27 +00:00
|
|
|
|
2021-02-01 05:10:18 +00:00
|
|
|
dm_put_live_table(md, srcu_idx);
|
2023-03-15 18:39:03 +00:00
|
|
|
return 0;
|
2021-02-01 05:10:18 +00:00
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static int
|
|
|
|
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
struct blk_crypto_profile *parent = data;
|
|
|
|
struct blk_crypto_profile *child =
|
|
|
|
bdev_get_queue(dev->bdev)->crypto_profile;
|
2021-02-01 05:10:17 +00:00
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
blk_crypto_intersect_capabilities(parent, child);
|
2021-02-01 05:10:17 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
struct dm_crypto_profile *dmcp = container_of(profile,
|
|
|
|
struct dm_crypto_profile,
|
|
|
|
profile);
|
2021-02-01 05:10:17 +00:00
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
if (!profile)
|
2021-02-01 05:10:17 +00:00
|
|
|
return;
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
blk_crypto_profile_destroy(profile);
|
|
|
|
kfree(dmcp);
|
2021-02-01 05:10:17 +00:00
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static void dm_table_destroy_crypto_profile(struct dm_table *t)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
dm_destroy_crypto_profile(t->crypto_profile);
|
|
|
|
t->crypto_profile = NULL;
|
2021-02-01 05:10:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
* Constructs and initializes t->crypto_profile with a crypto profile that
|
|
|
|
* represents the common set of crypto capabilities of the devices described by
|
|
|
|
* the dm_table. However, if the constructed crypto profile doesn't support all
|
|
|
|
* crypto capabilities that are supported by the current mapped_device, it
|
|
|
|
* returns an error instead, since we don't support removing crypto capabilities
|
|
|
|
* on table changes. Finally, if the constructed crypto profile is "empty" (has
|
|
|
|
* no crypto capabilities at all), it just sets t->crypto_profile to NULL.
|
2021-02-01 05:10:17 +00:00
|
|
|
*/
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static int dm_table_construct_crypto_profile(struct dm_table *t)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
struct dm_crypto_profile *dmcp;
|
|
|
|
struct blk_crypto_profile *profile;
|
2021-02-01 05:10:17 +00:00
|
|
|
unsigned int i;
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
bool empty_profile = true;
|
2021-02-01 05:10:17 +00:00
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
|
|
|
|
if (!dmcp)
|
2021-02-01 05:10:17 +00:00
|
|
|
return -ENOMEM;
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
dmcp->md = t->md;
|
2021-02-01 05:10:17 +00:00
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
profile = &dmcp->profile;
|
|
|
|
blk_crypto_profile_init(profile, 0);
|
|
|
|
profile->ll_ops.keyslot_evict = dm_keyslot_evict;
|
|
|
|
profile->max_dun_bytes_supported = UINT_MAX;
|
|
|
|
memset(profile->modes_supported, 0xFF,
|
|
|
|
sizeof(profile->modes_supported));
|
2021-02-01 05:10:17 +00:00
|
|
|
|
2022-07-05 18:00:36 +00:00
|
|
|
for (i = 0; i < t->num_targets; i++) {
|
2022-07-05 20:12:27 +00:00
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2021-02-01 05:10:17 +00:00
|
|
|
|
|
|
|
if (!dm_target_passes_crypto(ti->type)) {
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
blk_crypto_intersect_capabilities(profile, NULL);
|
2021-02-01 05:10:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!ti->type->iterate_devices)
|
|
|
|
continue;
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
ti->type->iterate_devices(ti,
|
|
|
|
device_intersect_crypto_capabilities,
|
|
|
|
profile);
|
2021-02-01 05:10:17 +00:00
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
if (t->md->queue &&
|
|
|
|
!blk_crypto_has_capabilities(profile,
|
|
|
|
t->md->queue->crypto_profile)) {
|
2022-08-24 11:25:57 +00:00
|
|
|
DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
dm_destroy_crypto_profile(profile);
|
2021-02-01 05:10:17 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
* If the new profile doesn't actually support any crypto capabilities,
|
|
|
|
* we may as well represent it with a NULL profile.
|
2021-02-01 05:10:17 +00:00
|
|
|
*/
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
|
|
|
|
if (profile->modes_supported[i]) {
|
|
|
|
empty_profile = false;
|
2021-02-01 05:10:17 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
if (empty_profile) {
|
|
|
|
dm_destroy_crypto_profile(profile);
|
|
|
|
profile = NULL;
|
2021-02-01 05:10:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
* t->crypto_profile is only set temporarily while the table is being
|
|
|
|
* set up, and it gets set to NULL after the profile has been
|
|
|
|
* transferred to the request_queue.
|
2021-02-01 05:10:17 +00:00
|
|
|
*/
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
t->crypto_profile = profile;
|
2021-02-01 05:10:17 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static void dm_update_crypto_profile(struct request_queue *q,
|
|
|
|
struct dm_table *t)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
if (!t->crypto_profile)
|
2021-02-01 05:10:17 +00:00
|
|
|
return;
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
/* Make the crypto profile less restrictive. */
|
|
|
|
if (!q->crypto_profile) {
|
|
|
|
blk_crypto_register(t->crypto_profile, q);
|
2021-02-01 05:10:17 +00:00
|
|
|
} else {
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
blk_crypto_update_capabilities(q->crypto_profile,
|
|
|
|
t->crypto_profile);
|
|
|
|
dm_destroy_crypto_profile(t->crypto_profile);
|
2021-02-01 05:10:17 +00:00
|
|
|
}
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
t->crypto_profile = NULL;
|
2021-02-01 05:10:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static int dm_table_construct_crypto_profile(struct dm_table *t)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static void dm_table_destroy_crypto_profile(struct dm_table *t)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
static void dm_update_crypto_profile(struct request_queue *q,
|
|
|
|
struct dm_table *t)
|
2021-02-01 05:10:17 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
|
2010-08-12 03:14:03 +00:00
|
|
|
/*
|
|
|
|
* Prepares the table for use by building the indices,
|
|
|
|
* setting the type, and allocating mempools.
|
|
|
|
*/
|
|
|
|
int dm_table_complete(struct dm_table *t)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2016-05-25 01:16:51 +00:00
|
|
|
r = dm_table_determine_type(t);
|
2010-08-12 03:14:03 +00:00
|
|
|
if (r) {
|
2016-05-25 01:16:51 +00:00
|
|
|
DMERR("unable to determine table type");
|
2010-08-12 03:14:03 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_table_build_index(t);
|
|
|
|
if (r) {
|
|
|
|
DMERR("unable to build btrees");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
r = dm_table_construct_crypto_profile(t);
|
2021-02-01 05:10:17 +00:00
|
|
|
if (r) {
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
DMERR("could not construct crypto profile.");
|
2021-02-01 05:10:17 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-03-11 19:01:09 +00:00
|
|
|
r = dm_table_alloc_md_mempools(t, t->md);
|
2010-08-12 03:14:03 +00:00
|
|
|
if (r)
|
|
|
|
DMERR("unable to allocate mempools");
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2006-03-27 09:18:20 +00:00
|
|
|
static DEFINE_MUTEX(_event_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
void dm_table_event_callback(struct dm_table *t,
|
|
|
|
void (*fn)(void *), void *context)
|
|
|
|
{
|
2006-03-27 09:18:20 +00:00
|
|
|
mutex_lock(&_event_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
t->event_fn = fn;
|
|
|
|
t->event_context = context;
|
2006-03-27 09:18:20 +00:00
|
|
|
mutex_unlock(&_event_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dm_table_event(struct dm_table *t)
|
|
|
|
{
|
2006-03-27 09:18:20 +00:00
|
|
|
mutex_lock(&_event_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (t->event_fn)
|
|
|
|
t->event_fn(t->event_context);
|
2006-03-27 09:18:20 +00:00
|
|
|
mutex_unlock(&_event_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
EXPORT_SYMBOL(dm_table_event);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-23 13:54:09 +00:00
|
|
|
inline sector_t dm_table_get_size(struct dm_table *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
|
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
EXPORT_SYMBOL(dm_table_get_size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Search the btree for the correct target.
|
2007-12-13 14:15:25 +00:00
|
|
|
*
|
2019-08-23 13:55:26 +00:00
|
|
|
* Caller should check returned pointer for NULL
|
2007-12-13 14:15:25 +00:00
|
|
|
* to trap I/O beyond end of device.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
|
|
|
|
{
|
|
|
|
unsigned int l, n = 0, k = 0;
|
|
|
|
sector_t *node;
|
|
|
|
|
2019-08-23 13:54:09 +00:00
|
|
|
if (unlikely(sector >= dm_table_get_size(t)))
|
2019-08-23 13:55:26 +00:00
|
|
|
return NULL;
|
2019-08-23 13:54:09 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
for (l = 0; l < t->depth; l++) {
|
|
|
|
n = get_child(n, k);
|
|
|
|
node = get_node(t, l, n);
|
|
|
|
|
|
|
|
for (k = 0; k < KEYS_PER_NODE; k++)
|
|
|
|
if (node[k] >= sector)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &t->targets[(KEYS_PER_NODE * n) + k];
|
|
|
|
}
|
|
|
|
|
2021-02-02 03:35:28 +00:00
|
|
|
/*
|
|
|
|
* type->iterate_devices() should be called when the sanity check needs to
|
|
|
|
* iterate and check all underlying data devices. iterate_devices() will
|
|
|
|
* iterate all underlying data devices until it encounters a non-zero return
|
|
|
|
* code, returned by whether the input iterate_devices_callout_fn, or
|
|
|
|
* iterate_devices() itself internally.
|
|
|
|
*
|
|
|
|
* For some target type (e.g. dm-stripe), one call of iterate_devices() may
|
|
|
|
* iterate multiple underlying devices internally, in which case a non-zero
|
|
|
|
* return code returned by iterate_devices_callout_fn will stop the iteration
|
|
|
|
* in advance.
|
|
|
|
*
|
|
|
|
* Cases requiring _any_ underlying device supporting some kind of attribute,
|
|
|
|
* should use the iteration structure like dm_table_any_dev_attr(), or call
|
|
|
|
* it directly. @func should handle semantics of positive examples, e.g.
|
|
|
|
* capable of something.
|
|
|
|
*
|
|
|
|
* Cases requiring _all_ underlying devices supporting some kind of attribute,
|
|
|
|
* should use the iteration structure like dm_table_supports_nowait() or
|
|
|
|
* dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
|
|
|
|
* uses an @anti_func that handle semantics of counter examples, e.g. not
|
2021-02-09 03:46:38 +00:00
|
|
|
* capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
|
2021-02-02 03:35:28 +00:00
|
|
|
*/
|
|
|
|
static bool dm_table_any_dev_attr(struct dm_table *t,
|
2021-02-09 03:46:38 +00:00
|
|
|
iterate_devices_callout_fn func, void *data)
|
2021-02-02 03:35:28 +00:00
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2021-02-02 03:35:28 +00:00
|
|
|
|
|
|
|
if (ti->type->iterate_devices &&
|
2021-02-09 03:46:38 +00:00
|
|
|
ti->type->iterate_devices(ti, func, data))
|
2021-02-02 03:35:28 +00:00
|
|
|
return true;
|
2023-01-25 22:31:55 +00:00
|
|
|
}
|
2021-02-02 03:35:28 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-09-26 22:45:45 +00:00
|
|
|
static int count_device(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int *num_devices = data;
|
2012-09-26 22:45:45 +00:00
|
|
|
|
|
|
|
(*num_devices)++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether a table has no data devices attached using each
|
|
|
|
* target's iterate_devices method.
|
|
|
|
* Returns false if the result is unknown because a target doesn't
|
|
|
|
* support iterate_devices.
|
|
|
|
*/
|
2022-07-05 20:12:27 +00:00
|
|
|
bool dm_table_has_no_data_devices(struct dm_table *t)
|
2012-09-26 22:45:45 +00:00
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int num_devices = 0;
|
2012-09-26 22:45:45 +00:00
|
|
|
|
|
|
|
if (!ti->type->iterate_devices)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ti->type->iterate_devices(ti, count_device, &num_devices);
|
|
|
|
if (num_devices)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-12-17 16:53:57 +00:00
|
|
|
static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2017-05-08 23:40:43 +00:00
|
|
|
{
|
2023-12-17 16:53:57 +00:00
|
|
|
bool *zoned = data;
|
2017-05-08 23:40:43 +00:00
|
|
|
|
2023-12-17 16:53:57 +00:00
|
|
|
return bdev_is_zoned(dev->bdev) != *zoned;
|
2017-05-08 23:40:43 +00:00
|
|
|
}
|
|
|
|
|
dm error: Add support for zoned block devices
dm-error is used in several test cases in the xfstests test suite to
check the handling of IO errors in file systems. However, with several
file systems getting native support for zoned block devices (e.g.
btrfs and f2fs), dm-error's lack of zoned block device support creates
problems as the file system attempts executing zone commands (e.g. a
zone append operation) against a dm-error non-zoned block device,
which causes various issues in the block layer (e.g. WARN_ON
triggers).
This commit adds supports for zoned block devices to dm-error, allowing
a DM device table containing an error target to be exposed as a zoned
block device (if all targets have a compatible zoned model support and
mapping). This is done as follows:
1) Allow passing 2 arguments to an error target, similar to dm-linear:
a backing device and a start sector. These arguments are optional and
dm-error retains its characteristics if the arguments are not
specified.
2) Implement the iterate_devices method so that dm-core can normally
check the zone support and restrictions (e.g. zone alignment of the
targets). When the backing device arguments are not specified, the
iterate_devices method never calls the fn() argument.
When no backing device is specified, as before, we assume that the DM
device is not zoned. When the backing device arguments are specified,
the zoned model of the DM device will depend on the backing device
type:
- If the backing device is zoned and its model and mapping is
compatible with other targets of the device, the resulting device
will be zoned, with the dm-error mapped portion always returning
errors (similar to the default non-zoned case).
- If the backing device is not zoned, then the DM device will not be
either.
This zone support for dm-error requires the definition of a functional
report_zones operation so that dm_revalidate_zones() can operate
correctly and resources for emulating zone append operations
initialized. This is necessary for cases where dm-error is used to
partially map a device and have an overall correct handling of zone
append. This means that dm-error does not fail report zones operations.
Two changes that are not obvious are included to avoid issues:
1) dm_table_supports_zoned_model() is changed to directly check if
the backing device of a wildcard target (= dm-error target) is
zoned. Otherwise, we wouldn't be able to catch the invalid setup of
dm-error without a backing device (non zoned case) being combined
with zoned targets.
2) dm_table_supports_dax() is modified to return false if the wildcard
target is found. Otherwise, when dm-error is set without a backing
device, we end up with a NULL pointer dereference in
set_dax_synchronous (dax_dev is NULL). This is consistent with the
current behavior because dm_table_supports_dax() always returned
false for targets that do not define the iterate_devices method.
Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Tested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2023-10-26 05:12:05 +00:00
|
|
|
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
|
|
|
{
|
2023-12-17 16:53:57 +00:00
|
|
|
return bdev_is_zoned(dev->bdev);
|
dm error: Add support for zoned block devices
dm-error is used in several test cases in the xfstests test suite to
check the handling of IO errors in file systems. However, with several
file systems getting native support for zoned block devices (e.g.
btrfs and f2fs), dm-error's lack of zoned block device support creates
problems as the file system attempts executing zone commands (e.g. a
zone append operation) against a dm-error non-zoned block device,
which causes various issues in the block layer (e.g. WARN_ON
triggers).
This commit adds supports for zoned block devices to dm-error, allowing
a DM device table containing an error target to be exposed as a zoned
block device (if all targets have a compatible zoned model support and
mapping). This is done as follows:
1) Allow passing 2 arguments to an error target, similar to dm-linear:
a backing device and a start sector. These arguments are optional and
dm-error retains its characteristics if the arguments are not
specified.
2) Implement the iterate_devices method so that dm-core can normally
check the zone support and restrictions (e.g. zone alignment of the
targets). When the backing device arguments are not specified, the
iterate_devices method never calls the fn() argument.
When no backing device is specified, as before, we assume that the DM
device is not zoned. When the backing device arguments are specified,
the zoned model of the DM device will depend on the backing device
type:
- If the backing device is zoned and its model and mapping is
compatible with other targets of the device, the resulting device
will be zoned, with the dm-error mapped portion always returning
errors (similar to the default non-zoned case).
- If the backing device is not zoned, then the DM device will not be
either.
This zone support for dm-error requires the definition of a functional
report_zones operation so that dm_revalidate_zones() can operate
correctly and resources for emulating zone append operations
initialized. This is necessary for cases where dm-error is used to
partially map a device and have an overall correct handling of zone
append. This means that dm-error does not fail report zones operations.
Two changes that are not obvious are included to avoid issues:
1) dm_table_supports_zoned_model() is changed to directly check if
the backing device of a wildcard target (= dm-error target) is
zoned. Otherwise, we wouldn't be able to catch the invalid setup of
dm-error without a backing device (non zoned case) being combined
with zoned targets.
2) dm_table_supports_dax() is modified to return false if the wildcard
target is found. Otherwise, when dm-error is set without a backing
device, we end up with a NULL pointer dereference in
set_dax_synchronous (dax_dev is NULL). This is consistent with the
current behavior because dm_table_supports_dax() always returned
false for targets that do not define the iterate_devices method.
Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Tested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2023-10-26 05:12:05 +00:00
|
|
|
}
|
|
|
|
|
2021-03-16 04:36:02 +00:00
|
|
|
/*
|
|
|
|
* Check the device zoned model based on the target feature flag. If the target
|
|
|
|
* has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
|
|
|
|
* also accepted but all devices must have the same zoned model. If the target
|
|
|
|
* has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
|
|
|
|
* zoned model with all zoned devices having the same zone size.
|
|
|
|
*/
|
2023-12-17 16:53:57 +00:00
|
|
|
static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
|
2017-05-08 23:40:43 +00:00
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2017-05-08 23:40:43 +00:00
|
|
|
|
dm error: Add support for zoned block devices
dm-error is used in several test cases in the xfstests test suite to
check the handling of IO errors in file systems. However, with several
file systems getting native support for zoned block devices (e.g.
btrfs and f2fs), dm-error's lack of zoned block device support creates
problems as the file system attempts executing zone commands (e.g. a
zone append operation) against a dm-error non-zoned block device,
which causes various issues in the block layer (e.g. WARN_ON
triggers).
This commit adds supports for zoned block devices to dm-error, allowing
a DM device table containing an error target to be exposed as a zoned
block device (if all targets have a compatible zoned model support and
mapping). This is done as follows:
1) Allow passing 2 arguments to an error target, similar to dm-linear:
a backing device and a start sector. These arguments are optional and
dm-error retains its characteristics if the arguments are not
specified.
2) Implement the iterate_devices method so that dm-core can normally
check the zone support and restrictions (e.g. zone alignment of the
targets). When the backing device arguments are not specified, the
iterate_devices method never calls the fn() argument.
When no backing device is specified, as before, we assume that the DM
device is not zoned. When the backing device arguments are specified,
the zoned model of the DM device will depend on the backing device
type:
- If the backing device is zoned and its model and mapping is
compatible with other targets of the device, the resulting device
will be zoned, with the dm-error mapped portion always returning
errors (similar to the default non-zoned case).
- If the backing device is not zoned, then the DM device will not be
either.
This zone support for dm-error requires the definition of a functional
report_zones operation so that dm_revalidate_zones() can operate
correctly and resources for emulating zone append operations
initialized. This is necessary for cases where dm-error is used to
partially map a device and have an overall correct handling of zone
append. This means that dm-error does not fail report zones operations.
Two changes that are not obvious are included to avoid issues:
1) dm_table_supports_zoned_model() is changed to directly check if
the backing device of a wildcard target (= dm-error target) is
zoned. Otherwise, we wouldn't be able to catch the invalid setup of
dm-error without a backing device (non zoned case) being combined
with zoned targets.
2) dm_table_supports_dax() is modified to return false if the wildcard
target is found. Otherwise, when dm-error is set without a backing
device, we end up with a NULL pointer dereference in
set_dax_synchronous (dax_dev is NULL). This is consistent with the
current behavior because dm_table_supports_dax() always returned
false for targets that do not define the iterate_devices method.
Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
Tested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
2023-10-26 05:12:05 +00:00
|
|
|
/*
|
|
|
|
* For the wildcard target (dm-error), if we do not have a
|
|
|
|
* backing device, we must always return false. If we have a
|
|
|
|
* backing device, the result must depend on checking zoned
|
|
|
|
* model, like for any other target. So for this, check directly
|
|
|
|
* if the target backing device is zoned as we get "false" when
|
|
|
|
* dm-error was set without a backing device.
|
|
|
|
*/
|
|
|
|
if (dm_target_is_wildcard(ti->type) &&
|
|
|
|
!ti->type->iterate_devices(ti, device_is_zoned_model, NULL))
|
|
|
|
return false;
|
|
|
|
|
2021-03-16 04:36:02 +00:00
|
|
|
if (dm_target_supports_zoned_hm(ti->type)) {
|
|
|
|
if (!ti->type->iterate_devices ||
|
2023-12-17 16:53:57 +00:00
|
|
|
ti->type->iterate_devices(ti, device_not_zoned,
|
|
|
|
&zoned))
|
2021-03-16 04:36:02 +00:00
|
|
|
return false;
|
|
|
|
} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
|
2023-12-17 16:53:57 +00:00
|
|
|
if (zoned)
|
2021-03-16 04:36:02 +00:00
|
|
|
return false;
|
|
|
|
}
|
2017-05-08 23:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-02-09 03:46:38 +00:00
|
|
|
static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2017-05-08 23:40:43 +00:00
|
|
|
{
|
|
|
|
unsigned int *zone_sectors = data;
|
|
|
|
|
2022-07-06 07:03:37 +00:00
|
|
|
if (!bdev_is_zoned(dev->bdev))
|
2021-03-16 04:36:02 +00:00
|
|
|
return 0;
|
2022-07-06 07:03:49 +00:00
|
|
|
return bdev_zone_sectors(dev->bdev) != *zone_sectors;
|
2017-05-08 23:40:43 +00:00
|
|
|
}
|
|
|
|
|
2021-03-16 04:36:02 +00:00
|
|
|
/*
|
|
|
|
* Check consistency of zoned model and zone sectors across all targets. For
|
|
|
|
* zone sectors, if the destination device is a zoned block device, it shall
|
|
|
|
* have the specified zone_sectors.
|
|
|
|
*/
|
2023-12-17 16:53:57 +00:00
|
|
|
static int validate_hardware_zoned(struct dm_table *t, bool zoned,
|
|
|
|
unsigned int zone_sectors)
|
2017-05-08 23:40:43 +00:00
|
|
|
{
|
2023-12-17 16:53:57 +00:00
|
|
|
if (!zoned)
|
2017-05-08 23:40:43 +00:00
|
|
|
return 0;
|
|
|
|
|
2023-12-17 16:53:57 +00:00
|
|
|
if (!dm_table_supports_zoned(t, zoned)) {
|
2017-05-08 23:40:43 +00:00
|
|
|
DMERR("%s: zoned model is not consistent across all devices",
|
2022-07-05 20:12:27 +00:00
|
|
|
dm_device_name(t->md));
|
2017-05-08 23:40:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check zone size validity and compatibility */
|
|
|
|
if (!zone_sectors || !is_power_of_2(zone_sectors))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
if (dm_table_any_dev_attr(t, device_not_matches_zone_sectors, &zone_sectors)) {
|
2021-03-16 04:36:02 +00:00
|
|
|
DMERR("%s: zone sectors is not consistent across all zoned devices",
|
2022-07-05 20:12:27 +00:00
|
|
|
dm_device_name(t->md));
|
2017-05-08 23:40:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-06-22 09:12:34 +00:00
|
|
|
/*
|
|
|
|
* Establish the new table's queue_limits and validate them.
|
|
|
|
*/
|
2022-07-05 20:12:27 +00:00
|
|
|
int dm_calculate_queue_limits(struct dm_table *t,
|
2009-06-22 09:12:34 +00:00
|
|
|
struct queue_limits *limits)
|
|
|
|
{
|
|
|
|
struct queue_limits ti_limits;
|
2017-05-08 23:40:43 +00:00
|
|
|
unsigned int zone_sectors = 0;
|
2023-12-17 16:53:57 +00:00
|
|
|
bool zoned = false;
|
2009-06-22 09:12:34 +00:00
|
|
|
|
2024-06-17 06:04:43 +00:00
|
|
|
dm_set_stacking_limits(limits);
|
2009-06-22 09:12:34 +00:00
|
|
|
|
2024-06-13 08:48:22 +00:00
|
|
|
t->integrity_supported = true;
|
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
|
|
|
|
|
|
|
if (!dm_target_passes_integrity(ti->type))
|
|
|
|
t->integrity_supported = false;
|
|
|
|
}
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2009-06-22 09:12:34 +00:00
|
|
|
|
2024-06-17 06:04:43 +00:00
|
|
|
dm_set_stacking_limits(&ti_limits);
|
2009-06-22 09:12:34 +00:00
|
|
|
|
2023-04-04 15:14:26 +00:00
|
|
|
if (!ti->type->iterate_devices) {
|
|
|
|
/* Set I/O hints portion of queue limits */
|
|
|
|
if (ti->type->io_hints)
|
|
|
|
ti->type->io_hints(ti, &ti_limits);
|
2009-06-22 09:12:34 +00:00
|
|
|
goto combine_limits;
|
2023-04-04 15:14:26 +00:00
|
|
|
}
|
2009-06-22 09:12:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Combine queue limits of all the devices this target uses.
|
|
|
|
*/
|
|
|
|
ti->type->iterate_devices(ti, dm_set_device_limits,
|
|
|
|
&ti_limits);
|
|
|
|
|
2024-06-17 06:04:49 +00:00
|
|
|
if (!zoned && (ti_limits.features & BLK_FEAT_ZONED)) {
|
2017-05-08 23:40:43 +00:00
|
|
|
/*
|
|
|
|
* After stacking all limits, validate all devices
|
|
|
|
* in table support this zoned model and zone sectors.
|
|
|
|
*/
|
2024-06-17 06:04:49 +00:00
|
|
|
zoned = (ti_limits.features & BLK_FEAT_ZONED);
|
2017-05-08 23:40:43 +00:00
|
|
|
zone_sectors = ti_limits.chunk_sectors;
|
|
|
|
}
|
|
|
|
|
2009-09-04 19:40:25 +00:00
|
|
|
/* Set I/O hints portion of queue limits */
|
|
|
|
if (ti->type->io_hints)
|
|
|
|
ti->type->io_hints(ti, &ti_limits);
|
|
|
|
|
2009-06-22 09:12:34 +00:00
|
|
|
/*
|
|
|
|
* Check each device area is consistent with the target's
|
|
|
|
* overall queue limits.
|
|
|
|
*/
|
2009-09-04 19:40:22 +00:00
|
|
|
if (ti->type->iterate_devices(ti, device_area_is_invalid,
|
|
|
|
&ti_limits))
|
2009-06-22 09:12:34 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
combine_limits:
|
|
|
|
/*
|
|
|
|
* Merge this target's queue limits into the overall limits
|
|
|
|
* for the table.
|
|
|
|
*/
|
|
|
|
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
|
2023-02-03 17:55:47 +00:00
|
|
|
DMWARN("%s: adding target device (start sect %llu len %llu) "
|
2010-01-11 08:21:50 +00:00
|
|
|
"caused an alignment inconsistency",
|
2022-07-05 20:12:27 +00:00
|
|
|
dm_device_name(t->md),
|
2009-06-22 09:12:34 +00:00
|
|
|
(unsigned long long) ti->begin,
|
|
|
|
(unsigned long long) ti->len);
|
2024-06-13 08:48:22 +00:00
|
|
|
|
|
|
|
if (t->integrity_supported ||
|
|
|
|
dm_target_has_integrity(ti->type)) {
|
|
|
|
if (!queue_limits_stack_integrity(limits, &ti_limits)) {
|
|
|
|
DMWARN("%s: adding target device (start sect %llu len %llu) "
|
|
|
|
"disabled integrity support due to incompatibility",
|
|
|
|
dm_device_name(t->md),
|
|
|
|
(unsigned long long) ti->begin,
|
|
|
|
(unsigned long long) ti->len);
|
|
|
|
t->integrity_supported = false;
|
|
|
|
}
|
|
|
|
}
|
2009-06-22 09:12:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-08 23:40:43 +00:00
|
|
|
/*
|
|
|
|
* Verify that the zoned model and zone sectors, as determined before
|
|
|
|
* any .io_hints override, are the same across all devices in the table.
|
|
|
|
* - this is especially relevant if .io_hints is emulating a disk-managed
|
2023-12-17 16:53:57 +00:00
|
|
|
* zoned model on host-managed zoned block devices.
|
2017-05-08 23:40:43 +00:00
|
|
|
* BUT...
|
|
|
|
*/
|
2024-06-17 06:04:49 +00:00
|
|
|
if (limits->features & BLK_FEAT_ZONED) {
|
2017-05-08 23:40:43 +00:00
|
|
|
/*
|
|
|
|
* ...IF the above limits stacking determined a zoned model
|
|
|
|
* validate that all of the table's devices conform to it.
|
|
|
|
*/
|
2024-06-17 06:04:49 +00:00
|
|
|
zoned = limits->features & BLK_FEAT_ZONED;
|
2017-05-08 23:40:43 +00:00
|
|
|
zone_sectors = limits->chunk_sectors;
|
|
|
|
}
|
2023-12-17 16:53:57 +00:00
|
|
|
if (validate_hardware_zoned(t, zoned, zone_sectors))
|
2017-05-08 23:40:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
return validate_hardware_logical_block_alignment(t, limits);
|
2009-06-22 09:12:34 +00:00
|
|
|
}
|
|
|
|
|
2024-06-17 06:04:40 +00:00
|
|
|
/*
|
|
|
|
* Check if a target requires flush support even if none of the underlying
|
|
|
|
* devices need it (e.g. to persist target-specific metadata).
|
|
|
|
*/
|
|
|
|
static bool dm_table_supports_flush(struct dm_table *t)
|
2011-08-02 11:32:08 +00:00
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2011-08-02 11:32:08 +00:00
|
|
|
|
2024-06-17 06:04:40 +00:00
|
|
|
if (ti->num_flush_bios && ti->flush_supported)
|
2015-03-30 17:43:18 +00:00
|
|
|
return true;
|
2011-08-02 11:32:08 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 17:43:18 +00:00
|
|
|
return false;
|
2011-08-02 11:32:08 +00:00
|
|
|
}
|
|
|
|
|
2017-07-26 13:35:09 +00:00
|
|
|
static int device_dax_write_cache_enabled(struct dm_target *ti,
|
|
|
|
struct dm_dev *dev, sector_t start,
|
|
|
|
sector_t len, void *data)
|
|
|
|
{
|
|
|
|
struct dax_device *dax_dev = dev->dax_dev;
|
|
|
|
|
|
|
|
if (!dax_dev)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (dax_write_cache_enabled(dax_dev))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-05 17:21:05 +00:00
|
|
|
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
|
|
|
{
|
|
|
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
|
|
|
|
2021-02-08 16:57:05 +00:00
|
|
|
return !q->limits.max_write_zeroes_sectors;
|
2017-04-05 17:21:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool dm_table_supports_write_zeroes(struct dm_table *t)
|
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2017-04-05 17:21:05 +00:00
|
|
|
|
|
|
|
if (!ti->num_write_zeroes_bios)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!ti->type->iterate_devices ||
|
|
|
|
ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-09-23 20:06:52 +00:00
|
|
|
static bool dm_table_supports_nowait(struct dm_table *t)
|
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2020-09-23 20:06:52 +00:00
|
|
|
|
|
|
|
if (!dm_target_supports_nowait(ti->type))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-11-14 20:40:52 +00:00
|
|
|
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
|
|
|
sector_t start, sector_t len, void *data)
|
2014-07-10 16:23:07 +00:00
|
|
|
{
|
2022-04-15 04:52:55 +00:00
|
|
|
return !bdev_max_discard_sectors(dev->bdev);
|
2014-07-10 16:23:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool dm_table_supports_discards(struct dm_table *t)
|
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2014-07-10 16:23:07 +00:00
|
|
|
|
|
|
|
if (!ti->num_discard_bios)
|
2017-11-14 20:40:52 +00:00
|
|
|
return false;
|
2014-07-10 16:23:07 +00:00
|
|
|
|
2017-11-14 20:40:52 +00:00
|
|
|
/*
|
|
|
|
* Either the target provides discard support (as implied by setting
|
|
|
|
* 'discards_supported') or it relies on _all_ data devices having
|
|
|
|
* discard support.
|
|
|
|
*/
|
|
|
|
if (!ti->discards_supported &&
|
|
|
|
(!ti->type->iterate_devices ||
|
|
|
|
ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
|
|
|
|
return false;
|
2014-07-10 16:23:07 +00:00
|
|
|
}
|
|
|
|
|
2017-11-14 20:40:52 +00:00
|
|
|
return true;
|
2014-07-10 16:23:07 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 09:23:45 +00:00
|
|
|
static int device_not_secure_erase_capable(struct dm_target *ti,
|
|
|
|
struct dm_dev *dev, sector_t start,
|
|
|
|
sector_t len, void *data)
|
|
|
|
{
|
2022-04-15 04:52:57 +00:00
|
|
|
return !bdev_max_secure_erase_sectors(dev->bdev);
|
2018-03-13 09:23:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool dm_table_supports_secure_erase(struct dm_table *t)
|
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2018-03-13 09:23:45 +00:00
|
|
|
|
|
|
|
if (!ti->num_secure_erase_bios)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!ti->type->iterate_devices ||
|
|
|
|
ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
dm: introduce zone append emulation
For zoned targets that cannot support zone append operations, implement
an emulation using regular write operations. If the original BIO
submitted by the user is a zone append operation, change its clone into
a regular write operation directed at the target zone write pointer
position.
To do so, an array of write pointer offsets (write pointer position
relative to the start of a zone) is added to struct mapped_device. All
operations that modify a sequential zone write pointer (writes, zone
reset, zone finish and zone append) are intersepted in __map_bio() and
processed using the new functions dm_zone_map_bio().
Detection of the target ability to natively support zone append
operations is done from dm_table_set_restrictions() by calling the
function dm_set_zones_restrictions(). A target that does not support
zone append operation, either by explicitly declaring it using the new
struct dm_target field zone_append_not_supported, or because the device
table contains a non-zoned device, has its mapped device marked with the
new flag DMF_ZONE_APPEND_EMULATED. The helper function
dm_emulate_zone_append() is introduced to test a mapped device for this
new flag.
Atomicity of the zones write pointer tracking and updates is done using
a zone write locking mechanism based on a bitmap. This is similar to
the block layer method but based on BIOs rather than struct request.
A zone write lock is taken in dm_zone_map_bio() for any clone BIO with
an operation type that changes the BIO target zone write pointer
position. The zone write lock is released if the clone BIO is failed
before submission or when dm_zone_endio() is called when the clone BIO
completes.
The zone write lock bitmap of the mapped device, together with a bitmap
indicating zone types (conv_zones_bitmap) and the write pointer offset
array (zwp_offset) are allocated and initialized with a full device zone
report in dm_set_zones_restrictions() using the function
dm_revalidate_zones().
For failed operations that may have modified a zone write pointer, the
zone write pointer offset is marked as invalid in dm_zone_endio().
Zones with an invalid write pointer offset are checked and the write
pointer updated using an internal report zone operation when the
faulty zone is accessed again by the user.
All functions added for this emulation have a minimal overhead for
zoned targets natively supporting zone append operations. Regular
device targets are also not affected. The added code also does not
impact builds with CONFIG_BLK_DEV_ZONED disabled by stubbing out all
dm zone related functions.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2021-05-25 21:25:00 +00:00
|
|
|
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|
|
|
struct queue_limits *limits)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
dm: introduce zone append emulation
For zoned targets that cannot support zone append operations, implement
an emulation using regular write operations. If the original BIO
submitted by the user is a zone append operation, change its clone into
a regular write operation directed at the target zone write pointer
position.
To do so, an array of write pointer offsets (write pointer position
relative to the start of a zone) is added to struct mapped_device. All
operations that modify a sequential zone write pointer (writes, zone
reset, zone finish and zone append) are intersepted in __map_bio() and
processed using the new functions dm_zone_map_bio().
Detection of the target ability to natively support zone append
operations is done from dm_table_set_restrictions() by calling the
function dm_set_zones_restrictions(). A target that does not support
zone append operation, either by explicitly declaring it using the new
struct dm_target field zone_append_not_supported, or because the device
table contains a non-zoned device, has its mapped device marked with the
new flag DMF_ZONE_APPEND_EMULATED. The helper function
dm_emulate_zone_append() is introduced to test a mapped device for this
new flag.
Atomicity of the zones write pointer tracking and updates is done using
a zone write locking mechanism based on a bitmap. This is similar to
the block layer method but based on BIOs rather than struct request.
A zone write lock is taken in dm_zone_map_bio() for any clone BIO with
an operation type that changes the BIO target zone write pointer
position. The zone write lock is released if the clone BIO is failed
before submission or when dm_zone_endio() is called when the clone BIO
completes.
The zone write lock bitmap of the mapped device, together with a bitmap
indicating zone types (conv_zones_bitmap) and the write pointer offset
array (zwp_offset) are allocated and initialized with a full device zone
report in dm_set_zones_restrictions() using the function
dm_revalidate_zones().
For failed operations that may have modified a zone write pointer, the
zone write pointer offset is marked as invalid in dm_zone_endio().
Zones with an invalid write pointer offset are checked and the write
pointer updated using an internal report zone operation when the
faulty zone is accessed again by the user.
All functions added for this emulation have a minimal overhead for
zoned targets natively supporting zone append operations. Regular
device targets are also not affected. The added code also does not
impact builds with CONFIG_BLK_DEV_ZONED disabled by stubbing out all
dm zone related functions.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2021-05-25 21:25:00 +00:00
|
|
|
int r;
|
2011-08-02 11:32:08 +00:00
|
|
|
|
2024-06-17 06:04:46 +00:00
|
|
|
if (!dm_table_supports_nowait(t))
|
|
|
|
limits->features &= ~BLK_FEAT_NOWAIT;
|
2020-09-23 20:06:52 +00:00
|
|
|
|
2024-06-17 06:04:48 +00:00
|
|
|
/*
|
|
|
|
* The current polling impementation does not support request based
|
|
|
|
* stacking.
|
|
|
|
*/
|
|
|
|
if (!__table_type_bio_based(t->type))
|
|
|
|
limits->features &= ~BLK_FEAT_POLL;
|
2020-09-23 20:06:52 +00:00
|
|
|
|
2017-11-16 20:42:26 +00:00
|
|
|
if (!dm_table_supports_discards(t)) {
|
2024-02-28 22:56:42 +00:00
|
|
|
limits->max_hw_discard_sectors = 0;
|
|
|
|
limits->discard_granularity = 0;
|
|
|
|
limits->discard_alignment = 0;
|
2022-04-15 04:52:55 +00:00
|
|
|
}
|
2010-08-12 03:14:08 +00:00
|
|
|
|
2024-02-28 22:56:42 +00:00
|
|
|
if (!dm_table_supports_write_zeroes(t))
|
|
|
|
limits->max_write_zeroes_sectors = 0;
|
|
|
|
|
2022-04-15 04:52:57 +00:00
|
|
|
if (!dm_table_supports_secure_erase(t))
|
2024-02-28 22:56:42 +00:00
|
|
|
limits->max_secure_erase_sectors = 0;
|
|
|
|
|
2024-06-17 06:04:40 +00:00
|
|
|
if (dm_table_supports_flush(t))
|
|
|
|
limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
|
2011-08-02 11:32:08 +00:00
|
|
|
|
2021-11-29 10:21:42 +00:00
|
|
|
if (dm_table_supports_dax(t, device_not_dax_capable)) {
|
2024-06-17 06:04:47 +00:00
|
|
|
limits->features |= BLK_FEAT_DAX;
|
2021-11-29 10:21:42 +00:00
|
|
|
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
|
2019-07-05 14:03:25 +00:00
|
|
|
set_dax_synchronous(t->md->dax_dev);
|
2023-01-30 21:13:54 +00:00
|
|
|
} else
|
2024-06-17 06:04:47 +00:00
|
|
|
limits->features &= ~BLK_FEAT_DAX;
|
2018-06-26 22:30:41 +00:00
|
|
|
|
2021-02-09 03:46:38 +00:00
|
|
|
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
|
2017-07-26 13:35:09 +00:00
|
|
|
dax_write_cache(t->md->dax_dev, true);
|
|
|
|
|
2024-06-11 02:36:37 +00:00
|
|
|
/* For a zoned table, setup the zone related queue attributes. */
|
2024-06-17 06:04:49 +00:00
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
|
2024-06-19 14:02:34 +00:00
|
|
|
(limits->features & BLK_FEAT_ZONED)) {
|
2024-05-27 12:36:20 +00:00
|
|
|
r = dm_set_zones_restrictions(t, q, limits);
|
dm: introduce zone append emulation
For zoned targets that cannot support zone append operations, implement
an emulation using regular write operations. If the original BIO
submitted by the user is a zone append operation, change its clone into
a regular write operation directed at the target zone write pointer
position.
To do so, an array of write pointer offsets (write pointer position
relative to the start of a zone) is added to struct mapped_device. All
operations that modify a sequential zone write pointer (writes, zone
reset, zone finish and zone append) are intersepted in __map_bio() and
processed using the new functions dm_zone_map_bio().
Detection of the target ability to natively support zone append
operations is done from dm_table_set_restrictions() by calling the
function dm_set_zones_restrictions(). A target that does not support
zone append operation, either by explicitly declaring it using the new
struct dm_target field zone_append_not_supported, or because the device
table contains a non-zoned device, has its mapped device marked with the
new flag DMF_ZONE_APPEND_EMULATED. The helper function
dm_emulate_zone_append() is introduced to test a mapped device for this
new flag.
Atomicity of the zones write pointer tracking and updates is done using
a zone write locking mechanism based on a bitmap. This is similar to
the block layer method but based on BIOs rather than struct request.
A zone write lock is taken in dm_zone_map_bio() for any clone BIO with
an operation type that changes the BIO target zone write pointer
position. The zone write lock is released if the clone BIO is failed
before submission or when dm_zone_endio() is called when the clone BIO
completes.
The zone write lock bitmap of the mapped device, together with a bitmap
indicating zone types (conv_zones_bitmap) and the write pointer offset
array (zwp_offset) are allocated and initialized with a full device zone
report in dm_set_zones_restrictions() using the function
dm_revalidate_zones().
For failed operations that may have modified a zone write pointer, the
zone write pointer offset is marked as invalid in dm_zone_endio().
Zones with an invalid write pointer offset are checked and the write
pointer updated using an internal report zone operation when the
faulty zone is accessed again by the user.
All functions added for this emulation have a minimal overhead for
zoned targets natively supporting zone append operations. Regular
device targets are also not affected. The added code also does not
impact builds with CONFIG_BLK_DEV_ZONED disabled by stubbing out all
dm zone related functions.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2021-05-25 21:25:00 +00:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
2018-12-18 17:25:37 +00:00
|
|
|
|
2024-05-27 12:36:20 +00:00
|
|
|
r = queue_limits_set(q, limits);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
|
2024-06-11 02:36:37 +00:00
|
|
|
/*
|
|
|
|
* Now that the limits are set, check the zones mapped by the table
|
|
|
|
* and setup the resources for zone append emulation if necessary.
|
|
|
|
*/
|
2024-06-19 14:02:34 +00:00
|
|
|
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
|
|
|
|
(limits->features & BLK_FEAT_ZONED)) {
|
2024-06-11 02:36:37 +00:00
|
|
|
r = dm_revalidate_zones(t, q);
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
blk-crypto: rename blk_keyslot_manager to blk_crypto_profile
blk_keyslot_manager is misnamed because it doesn't necessarily manage
keyslots. It actually does several different things:
- Contains the crypto capabilities of the device.
- Provides functions to control the inline encryption hardware.
Originally these were just for programming/evicting keyslots;
however, new functionality (hardware-wrapped keys) will require new
functions here which are unrelated to keyslots. Moreover,
device-mapper devices already (ab)use "keyslot_evict" to pass key
eviction requests to their underlying devices even though
device-mapper devices don't have any keyslots themselves (so it
really should be "evict_key", not "keyslot_evict").
- Sometimes (but not always!) it manages keyslots. Originally it
always did, but device-mapper devices don't have keyslots
themselves, so they use a "passthrough keyslot manager" which
doesn't actually manage keyslots. This hack works, but the
terminology is unnatural. Also, some hardware doesn't have keyslots
and thus also uses a "passthrough keyslot manager" (support for such
hardware is yet to be upstreamed, but it will happen eventually).
Let's stop having keyslot managers which don't actually manage keyslots.
Instead, rename blk_keyslot_manager to blk_crypto_profile.
This is a fairly big change, since for consistency it also has to update
keyslot manager-related function names, variable names, and comments --
not just the actual struct name. However it's still a fairly
straightforward change, as it doesn't change any actual functionality.
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2021-10-18 18:04:52 +00:00
|
|
|
dm_update_crypto_profile(q, t);
|
dm: introduce zone append emulation
For zoned targets that cannot support zone append operations, implement
an emulation using regular write operations. If the original BIO
submitted by the user is a zone append operation, change its clone into
a regular write operation directed at the target zone write pointer
position.
To do so, an array of write pointer offsets (write pointer position
relative to the start of a zone) is added to struct mapped_device. All
operations that modify a sequential zone write pointer (writes, zone
reset, zone finish and zone append) are intersepted in __map_bio() and
processed using the new functions dm_zone_map_bio().
Detection of the target ability to natively support zone append
operations is done from dm_table_set_restrictions() by calling the
function dm_set_zones_restrictions(). A target that does not support
zone append operation, either by explicitly declaring it using the new
struct dm_target field zone_append_not_supported, or because the device
table contains a non-zoned device, has its mapped device marked with the
new flag DMF_ZONE_APPEND_EMULATED. The helper function
dm_emulate_zone_append() is introduced to test a mapped device for this
new flag.
Atomicity of the zones write pointer tracking and updates is done using
a zone write locking mechanism based on a bitmap. This is similar to
the block layer method but based on BIOs rather than struct request.
A zone write lock is taken in dm_zone_map_bio() for any clone BIO with
an operation type that changes the BIO target zone write pointer
position. The zone write lock is released if the clone BIO is failed
before submission or when dm_zone_endio() is called when the clone BIO
completes.
The zone write lock bitmap of the mapped device, together with a bitmap
indicating zone types (conv_zones_bitmap) and the write pointer offset
array (zwp_offset) are allocated and initialized with a full device zone
report in dm_set_zones_restrictions() using the function
dm_revalidate_zones().
For failed operations that may have modified a zone write pointer, the
zone write pointer offset is marked as invalid in dm_zone_endio().
Zones with an invalid write pointer offset are checked and the write
pointer updated using an internal report zone operation when the
faulty zone is accessed again by the user.
All functions added for this emulation have a minimal overhead for
zoned targets natively supporting zone append operations. Regular
device targets are also not affected. The added code also does not
impact builds with CONFIG_BLK_DEV_ZONED disabled by stubbing out all
dm zone related functions.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
2021-05-25 21:25:00 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct list_head *dm_table_get_devices(struct dm_table *t)
|
|
|
|
{
|
|
|
|
return &t->devices;
|
|
|
|
}
|
|
|
|
|
2023-06-08 11:02:55 +00:00
|
|
|
blk_mode_t dm_table_get_mode(struct dm_table *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
return t->mode;
|
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
EXPORT_SYMBOL(dm_table_get_mode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-10-29 00:13:31 +00:00
|
|
|
enum suspend_mode {
|
|
|
|
PRESUSPEND,
|
|
|
|
PRESUSPEND_UNDO,
|
|
|
|
POSTSUSPEND,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-04-27 17:11:21 +00:00
|
|
|
lockdep_assert_held(&t->md->suspend_lock);
|
|
|
|
|
2022-07-05 20:12:27 +00:00
|
|
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
|
|
|
|
2014-10-29 00:13:31 +00:00
|
|
|
switch (mode) {
|
|
|
|
case PRESUSPEND:
|
|
|
|
if (ti->type->presuspend)
|
|
|
|
ti->type->presuspend(ti);
|
|
|
|
break;
|
|
|
|
case PRESUSPEND_UNDO:
|
|
|
|
if (ti->type->presuspend_undo)
|
|
|
|
ti->type->presuspend_undo(ti);
|
|
|
|
break;
|
|
|
|
case POSTSUSPEND:
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ti->type->postsuspend)
|
|
|
|
ti->type->postsuspend(ti);
|
2014-10-29 00:13:31 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_table_presuspend_targets(struct dm_table *t)
|
|
|
|
{
|
2005-07-29 04:15:57 +00:00
|
|
|
if (!t)
|
|
|
|
return;
|
|
|
|
|
2014-10-29 00:13:31 +00:00
|
|
|
suspend_targets(t, PRESUSPEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dm_table_presuspend_undo_targets(struct dm_table *t)
|
|
|
|
{
|
|
|
|
if (!t)
|
|
|
|
return;
|
|
|
|
|
|
|
|
suspend_targets(t, PRESUSPEND_UNDO);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dm_table_postsuspend_targets(struct dm_table *t)
|
|
|
|
{
|
2005-07-29 04:15:57 +00:00
|
|
|
if (!t)
|
|
|
|
return;
|
|
|
|
|
2014-10-29 00:13:31 +00:00
|
|
|
suspend_targets(t, POSTSUSPEND);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-10-03 08:15:36 +00:00
|
|
|
int dm_table_resume_targets(struct dm_table *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2022-07-05 20:12:27 +00:00
|
|
|
unsigned int i;
|
|
|
|
int r = 0;
|
2006-10-03 08:15:36 +00:00
|
|
|
|
2017-04-27 17:11:21 +00:00
|
|
|
lockdep_assert_held(&t->md->suspend_lock);
|
|
|
|
|
2006-10-03 08:15:36 +00:00
|
|
|
for (i = 0; i < t->num_targets; i++) {
|
2022-07-05 20:12:27 +00:00
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2006-10-03 08:15:36 +00:00
|
|
|
|
|
|
|
if (!ti->type->preresume)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
r = ti->type->preresume(ti);
|
2013-10-24 18:10:29 +00:00
|
|
|
if (r) {
|
|
|
|
DMERR("%s: %s: preresume failed, error = %d",
|
|
|
|
dm_device_name(t->md), ti->type->name, r);
|
2006-10-03 08:15:36 +00:00
|
|
|
return r;
|
2013-10-24 18:10:29 +00:00
|
|
|
}
|
2006-10-03 08:15:36 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (i = 0; i < t->num_targets; i++) {
|
2022-07-05 20:12:27 +00:00
|
|
|
struct dm_target *ti = dm_table_get_target(t, i);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (ti->type->resume)
|
|
|
|
ti->type->resume(ti);
|
|
|
|
}
|
2006-10-03 08:15:36 +00:00
|
|
|
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-03-27 09:17:54 +00:00
|
|
|
struct mapped_device *dm_table_get_md(struct dm_table *t)
|
|
|
|
{
|
|
|
|
return t->md;
|
|
|
|
}
|
2011-08-02 11:32:04 +00:00
|
|
|
EXPORT_SYMBOL(dm_table_get_md);
|
2006-03-27 09:17:54 +00:00
|
|
|
|
2018-10-09 20:13:42 +00:00
|
|
|
const char *dm_table_device_name(struct dm_table *t)
|
|
|
|
{
|
|
|
|
return dm_device_name(t->md);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dm_table_device_name);
|
|
|
|
|
2014-02-28 14:33:43 +00:00
|
|
|
void dm_table_run_md_queue_async(struct dm_table *t)
|
|
|
|
{
|
|
|
|
if (!dm_table_request_based(t))
|
|
|
|
return;
|
|
|
|
|
2020-09-19 17:09:11 +00:00
|
|
|
if (t->md->queue)
|
|
|
|
blk_mq_run_hw_queues(t->md->queue, true);
|
2014-02-28 14:33:43 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dm_table_run_md_queue_async);
|
|
|
|
|