forked from Minki/linux
- Fix DM's bio splitting changes that were made during v5.9.
Restores splitting in terms of varied per-target ti->max_io_len rather than use block core's single stacked 'chunk_sectors' limit. - Like DM crypt, update DM integrity to not use crypto drivers that have CRYPTO_ALG_ALLOCATES_MEMORY set. - Fix DM writecache target's argument parsing and status display. - Remove needless BUG() from dm writecache's persistent_memory_claim() - Remove old gcc workaround in DM cache target's block_div() for ARM link errors now that gcc >= 4.9 is required. - Fix RCU locking in dm_blk_report_zones and dm_dax_zero_page_range. - Remove old, and now frowned upon, BUG_ON(in_interrupt()) in dm_table_event(). - Remove invalid sparse annotations from dm_prepare_ioctl() and dm_unprepare_ioctl(). -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAl/KonYTHHNuaXR6ZXJA cmVkaGF0LmNvbQAKCRDFI/EKLZ0DWgSJCACNYEndubrROJZL+FOUQixzZQiOfphw 9Brb/XbdXWXIv7F+JV85E6olOqz7JjTGrO91uD5kwHEtVhDx5zT/GCm+5FoBrLa/ FuTphPRWNimZSU1umJe2AG9hOiDpPJJUe/wwj3QkBH2TeEHwBHblB8BkRFzxnP+p 0dGybrQBMtrH3GO65YG7qaASeBPl1+G3mVHfzViyhk1uoZL1y9pKbzPK60TkHcsa VCGTPke5Ri3hvd85hmpDcXmyxjxZfCA8Jc/DrQ+DDEwakHoJFwlSzP7fqwHnpKHT RDL4iOID54SViSGqzNcxlGtr/EHyN9Mom2d4Nnb0cgsRG4woCeJWJZMM =+o1m -----END PGP SIGNATURE----- Merge tag 'for-5.10/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - Fix DM's bio splitting changes that were made during v5.9. This restores splitting in terms of varied per-target ti->max_io_len rather than use block core's single stacked 'chunk_sectors' limit. - Like DM crypt, update DM integrity to not use crypto drivers that have CRYPTO_ALG_ALLOCATES_MEMORY set. - Fix DM writecache target's argument parsing and status display. - Remove needless BUG() from dm writecache's persistent_memory_claim() - Remove old gcc workaround in DM cache target's block_div() for ARM link errors now that gcc >= 4.9 is required. - Fix RCU locking in dm_blk_report_zones and dm_dax_zero_page_range. - Remove old, and now frowned upon, BUG_ON(in_interrupt()) in dm_table_event(). - Remove invalid sparse annotations from dm_prepare_ioctl() and dm_unprepare_ioctl(). * tag 'for-5.10/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: remove invalid sparse __acquires and __releases annotations dm: fix double RCU unlock in dm_dax_zero_page_range() error path dm: fix IO splitting dm writecache: remove BUG() and fail gracefully instead dm table: Remove BUG_ON(in_interrupt()) dm: fix bug with RCU locking in dm_blk_report_zones Revert "dm cache: fix arm link errors with inline" dm writecache: fix the maximum number of arguments dm writecache: advance the number of arguments when reporting max_age dm integrity: don't use drivers that have CRYPTO_ALG_ALLOCATES_MEMORY
This commit is contained in:
commit
b3298500b2
@ -144,7 +144,7 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
|
||||
static inline unsigned get_max_io_size(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
{
|
||||
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
|
||||
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
|
||||
unsigned max_sectors = sectors;
|
||||
unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
|
||||
unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
|
||||
|
@ -712,10 +712,6 @@ static bool block_size_is_power_of_two(struct cache *cache)
|
||||
return cache->sectors_per_block_shift >= 0;
|
||||
}
|
||||
|
||||
/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
|
||||
#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
|
||||
__always_inline
|
||||
#endif
|
||||
static dm_block_t block_div(dm_block_t b, uint32_t n)
|
||||
{
|
||||
do_div(b, n);
|
||||
|
@ -3462,7 +3462,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
|
||||
int r;
|
||||
|
||||
if (a->alg_string) {
|
||||
*hash = crypto_alloc_shash(a->alg_string, 0, 0);
|
||||
*hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
|
||||
if (IS_ERR(*hash)) {
|
||||
*error = error_alg;
|
||||
r = PTR_ERR(*hash);
|
||||
@ -3519,7 +3519,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||
struct journal_completion comp;
|
||||
|
||||
comp.ic = ic;
|
||||
ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
|
||||
ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
|
||||
if (IS_ERR(ic->journal_crypt)) {
|
||||
*error = "Invalid journal cipher";
|
||||
r = PTR_ERR(ic->journal_crypt);
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/lcm.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/dax.h>
|
||||
@ -1247,12 +1246,6 @@ void dm_table_event_callback(struct dm_table *t,
|
||||
|
||||
void dm_table_event(struct dm_table *t)
|
||||
{
|
||||
/*
|
||||
* You can no longer call dm_table_event() from interrupt
|
||||
* context, use a bottom half instead.
|
||||
*/
|
||||
BUG_ON(in_interrupt());
|
||||
|
||||
mutex_lock(&_event_lock);
|
||||
if (t->event_fn)
|
||||
t->event_fn(t->event_context);
|
||||
@ -1455,10 +1448,6 @@ int dm_calculate_queue_limits(struct dm_table *table,
|
||||
zone_sectors = ti_limits.chunk_sectors;
|
||||
}
|
||||
|
||||
/* Stack chunk_sectors if target-specific splitting is required */
|
||||
if (ti->max_io_len)
|
||||
ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
|
||||
ti_limits.chunk_sectors);
|
||||
/* Set I/O hints portion of queue limits */
|
||||
if (ti->type->io_hints)
|
||||
ti->type->io_hints(ti, &ti_limits);
|
||||
|
@ -319,7 +319,7 @@ err1:
|
||||
#else
|
||||
static int persistent_memory_claim(struct dm_writecache *wc)
|
||||
{
|
||||
BUG();
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2041,7 +2041,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
struct wc_memory_superblock s;
|
||||
|
||||
static struct dm_arg _args[] = {
|
||||
{0, 10, "Invalid number of feature args"},
|
||||
{0, 16, "Invalid number of feature args"},
|
||||
};
|
||||
|
||||
as.argc = argc;
|
||||
@ -2479,6 +2479,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
|
||||
extra_args += 2;
|
||||
if (wc->autocommit_time_set)
|
||||
extra_args += 2;
|
||||
if (wc->max_age != MAX_AGE_UNSPECIFIED)
|
||||
extra_args += 2;
|
||||
if (wc->cleaner)
|
||||
extra_args++;
|
||||
if (wc->writeback_fua_set)
|
||||
|
@ -476,8 +476,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
|
||||
return -EAGAIN;
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
if (!map)
|
||||
return -EIO;
|
||||
if (!map) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
struct dm_target *tgt;
|
||||
@ -507,7 +509,6 @@ out:
|
||||
|
||||
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
|
||||
struct block_device **bdev)
|
||||
__acquires(md->io_barrier)
|
||||
{
|
||||
struct dm_target *tgt;
|
||||
struct dm_table *map;
|
||||
@ -541,7 +542,6 @@ retry:
|
||||
}
|
||||
|
||||
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
|
||||
__releases(md->io_barrier)
|
||||
{
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
}
|
||||
@ -1037,15 +1037,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
|
||||
sector_t max_len;
|
||||
|
||||
/*
|
||||
* Does the target need to split even further?
|
||||
* - q->limits.chunk_sectors reflects ti->max_io_len so
|
||||
* blk_max_size_offset() provides required splitting.
|
||||
* - blk_max_size_offset() also respects q->limits.max_sectors
|
||||
* Does the target need to split IO even further?
|
||||
* - varied (per target) IO splitting is a tenet of DM; this
|
||||
* explains why stacked chunk_sectors based splitting via
|
||||
* blk_max_size_offset() isn't possible here. So pass in
|
||||
* ti->max_io_len to override stacked chunk_sectors.
|
||||
*/
|
||||
max_len = blk_max_size_offset(ti->table->md->queue,
|
||||
target_offset);
|
||||
if (len > max_len)
|
||||
len = max_len;
|
||||
if (ti->max_io_len) {
|
||||
max_len = blk_max_size_offset(ti->table->md->queue,
|
||||
target_offset, ti->max_io_len);
|
||||
if (len > max_len)
|
||||
len = max_len;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
@ -1196,11 +1199,9 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
* ->zero_page_range() is mandatory dax operation. If we are
|
||||
* here, something is wrong.
|
||||
*/
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
goto out;
|
||||
}
|
||||
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
|
||||
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
|
@ -1073,11 +1073,12 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
||||
* file system requests.
|
||||
*/
|
||||
static inline unsigned int blk_max_size_offset(struct request_queue *q,
|
||||
sector_t offset)
|
||||
sector_t offset,
|
||||
unsigned int chunk_sectors)
|
||||
{
|
||||
unsigned int chunk_sectors = q->limits.chunk_sectors;
|
||||
|
||||
if (!chunk_sectors)
|
||||
if (!chunk_sectors && q->limits.chunk_sectors)
|
||||
chunk_sectors = q->limits.chunk_sectors;
|
||||
else
|
||||
return q->limits.max_sectors;
|
||||
|
||||
if (likely(is_power_of_2(chunk_sectors)))
|
||||
@ -1101,7 +1102,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
||||
req_op(rq) == REQ_OP_SECURE_ERASE)
|
||||
return blk_queue_get_max_sectors(q, req_op(rq));
|
||||
|
||||
return min(blk_max_size_offset(q, offset),
|
||||
return min(blk_max_size_offset(q, offset, 0),
|
||||
blk_queue_get_max_sectors(q, req_op(rq)));
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user