mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 00:21:59 +00:00
dm: avoid indirect call in __dm_make_request
Indirect calls are inefficient because of retpolines that are used for spectre workaround. This patch replaces an indirect call with a condition (that can be predicted by the branch predictor). Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
cd19181bf9
commit
24113d4878
@ -1696,10 +1696,7 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
|
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
|
|
||||||
process_bio_fn process_bio)
|
|
||||||
{
|
{
|
||||||
struct mapped_device *md = q->queuedata;
|
struct mapped_device *md = q->queuedata;
|
||||||
blk_qc_t ret = BLK_QC_T_NONE;
|
blk_qc_t ret = BLK_QC_T_NONE;
|
||||||
@ -1719,26 +1716,15 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = process_bio(md, map, bio);
|
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
|
||||||
|
ret = __process_bio(md, map, bio);
|
||||||
|
else
|
||||||
|
ret = __split_and_process_bio(md, map, bio);
|
||||||
|
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The request function that remaps the bio to one target and
|
|
||||||
* splits off any remainder.
|
|
||||||
*/
|
|
||||||
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
return __dm_make_request(q, bio, __split_and_process_bio);
|
|
||||||
}
|
|
||||||
|
|
||||||
static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
return __dm_make_request(q, bio, __process_bio);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int dm_any_congested(void *congested_data, int bdi_bits)
|
static int dm_any_congested(void *congested_data, int bdi_bits)
|
||||||
{
|
{
|
||||||
int r = bdi_bits;
|
int r = bdi_bits;
|
||||||
@ -2229,12 +2215,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
|||||||
break;
|
break;
|
||||||
case DM_TYPE_BIO_BASED:
|
case DM_TYPE_BIO_BASED:
|
||||||
case DM_TYPE_DAX_BIO_BASED:
|
case DM_TYPE_DAX_BIO_BASED:
|
||||||
dm_init_normal_md_queue(md);
|
|
||||||
blk_queue_make_request(md->queue, dm_make_request);
|
|
||||||
break;
|
|
||||||
case DM_TYPE_NVME_BIO_BASED:
|
case DM_TYPE_NVME_BIO_BASED:
|
||||||
dm_init_normal_md_queue(md);
|
dm_init_normal_md_queue(md);
|
||||||
blk_queue_make_request(md->queue, dm_make_request_nvme);
|
blk_queue_make_request(md->queue, dm_make_request);
|
||||||
break;
|
break;
|
||||||
case DM_TYPE_NONE:
|
case DM_TYPE_NONE:
|
||||||
WARN_ON_ONCE(true);
|
WARN_ON_ONCE(true);
|
||||||
|
Loading…
Reference in New Issue
Block a user