lightnvm: refactor end_io functions for sync
To implement sync I/O support within the LightNVM core, the end_io functions are refactored to take an end_io function pointer instead of testing for initialized media manager, followed by calling its end_io function. Sync I/O can then be implemented using a callback that signal I/O completion. This is similar to the logic found in blk_to_execute_io(). By implementing it this way, the underlying device I/Os submission logic is abstracted away from core, targets, and media managers. Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
abd805ec9f
commit
91276162de
@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb)
|
|||||||
static void null_lnvm_end_io(struct request *rq, int error)
|
static void null_lnvm_end_io(struct request *rq, int error)
|
||||||
{
|
{
|
||||||
struct nvm_rq *rqd = rq->end_io_data;
|
struct nvm_rq *rqd = rq->end_io_data;
|
||||||
struct nvm_dev *dev = rqd->dev;
|
|
||||||
|
|
||||||
dev->mt->end_io(rqd, error);
|
nvm_end_io(rqd, error);
|
||||||
|
|
||||||
blk_put_request(rq);
|
blk_put_request(rq);
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
#include <linux/lightnvm.h>
|
#include <linux/lightnvm.h>
|
||||||
|
#include <linux/sched/sysctl.h>
|
||||||
#include <uapi/linux/lightnvm.h>
|
#include <uapi/linux/lightnvm.h>
|
||||||
|
|
||||||
static LIST_HEAD(nvm_targets);
|
static LIST_HEAD(nvm_targets);
|
||||||
@ -288,6 +289,21 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(nvm_erase_ppa);
|
EXPORT_SYMBOL(nvm_erase_ppa);
|
||||||
|
|
||||||
|
void nvm_end_io(struct nvm_rq *rqd, int error)
|
||||||
|
{
|
||||||
|
rqd->end_io(rqd, error);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(nvm_end_io);
|
||||||
|
|
||||||
|
static void nvm_end_io_sync(struct nvm_rq *rqd, int errors)
|
||||||
|
{
|
||||||
|
struct completion *waiting = rqd->wait;
|
||||||
|
|
||||||
|
rqd->wait = NULL;
|
||||||
|
|
||||||
|
complete(waiting);
|
||||||
|
}
|
||||||
|
|
||||||
static int nvm_core_init(struct nvm_dev *dev)
|
static int nvm_core_init(struct nvm_dev *dev)
|
||||||
{
|
{
|
||||||
struct nvm_id *id = &dev->identity;
|
struct nvm_id *id = &dev->identity;
|
||||||
|
@ -317,18 +317,6 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
|
|||||||
spin_unlock(&vlun->lock);
|
spin_unlock(&vlun->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|
||||||
{
|
|
||||||
if (!dev->ops->submit_io)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
/* Convert address space */
|
|
||||||
nvm_generic_to_addr_mode(dev, rqd);
|
|
||||||
|
|
||||||
rqd->dev = dev;
|
|
||||||
return dev->ops->submit_io(dev, rqd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
|
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
|
||||||
int type)
|
int type)
|
||||||
{
|
{
|
||||||
@ -375,25 +363,32 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|||||||
gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
|
gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gennvm_end_io(struct nvm_rq *rqd, int error)
|
static void gennvm_end_io(struct nvm_rq *rqd, int error)
|
||||||
{
|
{
|
||||||
struct nvm_tgt_instance *ins = rqd->ins;
|
struct nvm_tgt_instance *ins = rqd->ins;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
switch (error) {
|
switch (error) {
|
||||||
case NVM_RSP_SUCCESS:
|
case NVM_RSP_SUCCESS:
|
||||||
break;
|
|
||||||
case NVM_RSP_ERR_EMPTYPAGE:
|
case NVM_RSP_ERR_EMPTYPAGE:
|
||||||
break;
|
break;
|
||||||
case NVM_RSP_ERR_FAILWRITE:
|
case NVM_RSP_ERR_FAILWRITE:
|
||||||
gennvm_mark_blk_bad(rqd->dev, rqd);
|
gennvm_mark_blk_bad(rqd->dev, rqd);
|
||||||
default:
|
|
||||||
ret++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret += ins->tt->end_io(rqd, error);
|
ins->tt->end_io(rqd, error);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||||
|
{
|
||||||
|
if (!dev->ops->submit_io)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Convert address space */
|
||||||
|
nvm_generic_to_addr_mode(dev, rqd);
|
||||||
|
|
||||||
|
rqd->dev = dev;
|
||||||
|
rqd->end_io = gennvm_end_io;
|
||||||
|
return dev->ops->submit_io(dev, rqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
|
static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
|
||||||
@ -442,7 +437,6 @@ static struct nvmm_type gennvm = {
|
|||||||
.put_blk = gennvm_put_blk,
|
.put_blk = gennvm_put_blk,
|
||||||
|
|
||||||
.submit_io = gennvm_submit_io,
|
.submit_io = gennvm_submit_io,
|
||||||
.end_io = gennvm_end_io,
|
|
||||||
.erase_blk = gennvm_erase_blk,
|
.erase_blk = gennvm_erase_blk,
|
||||||
|
|
||||||
.get_lun = gennvm_get_lun,
|
.get_lun = gennvm_get_lun,
|
||||||
|
@ -642,7 +642,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
static void rrpc_end_io(struct nvm_rq *rqd, int error)
|
||||||
{
|
{
|
||||||
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
|
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
|
||||||
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
|
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
|
||||||
@ -655,7 +655,7 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
|||||||
bio_put(rqd->bio);
|
bio_put(rqd->bio);
|
||||||
|
|
||||||
if (rrqd->flags & NVM_IOTYPE_GC)
|
if (rrqd->flags & NVM_IOTYPE_GC)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
rrpc_unlock_rq(rrpc, rqd);
|
rrpc_unlock_rq(rrpc, rqd);
|
||||||
|
|
||||||
@ -665,8 +665,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
|||||||
nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
|
nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
|
||||||
|
|
||||||
mempool_free(rqd, rrpc->rq_pool);
|
mempool_free(rqd, rrpc->rq_pool);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
|
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
|
||||||
|
@ -453,11 +453,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
|
|||||||
static void nvme_nvm_end_io(struct request *rq, int error)
|
static void nvme_nvm_end_io(struct request *rq, int error)
|
||||||
{
|
{
|
||||||
struct nvm_rq *rqd = rq->end_io_data;
|
struct nvm_rq *rqd = rq->end_io_data;
|
||||||
struct nvm_dev *dev = rqd->dev;
|
|
||||||
|
|
||||||
if (dev->mt && dev->mt->end_io(rqd, error))
|
nvm_end_io(rqd, error);
|
||||||
pr_err("nvme: err status: %x result: %lx\n",
|
|
||||||
rq->errors, (unsigned long)rq->special);
|
|
||||||
|
|
||||||
kfree(rq->cmd);
|
kfree(rq->cmd);
|
||||||
blk_mq_free_request(rq);
|
blk_mq_free_request(rq);
|
||||||
|
@ -148,6 +148,9 @@ struct ppa_addr {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct nvm_rq;
|
||||||
|
typedef void (nvm_end_io_fn)(struct nvm_rq *, int);
|
||||||
|
|
||||||
struct nvm_rq {
|
struct nvm_rq {
|
||||||
struct nvm_tgt_instance *ins;
|
struct nvm_tgt_instance *ins;
|
||||||
struct nvm_dev *dev;
|
struct nvm_dev *dev;
|
||||||
@ -164,6 +167,9 @@ struct nvm_rq {
|
|||||||
void *metadata;
|
void *metadata;
|
||||||
dma_addr_t dma_metadata;
|
dma_addr_t dma_metadata;
|
||||||
|
|
||||||
|
struct completion *wait;
|
||||||
|
nvm_end_io_fn *end_io;
|
||||||
|
|
||||||
uint8_t opcode;
|
uint8_t opcode;
|
||||||
uint16_t nr_pages;
|
uint16_t nr_pages;
|
||||||
uint16_t flags;
|
uint16_t flags;
|
||||||
@ -347,7 +353,6 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
|
|||||||
|
|
||||||
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
|
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
|
||||||
typedef sector_t (nvm_tgt_capacity_fn)(void *);
|
typedef sector_t (nvm_tgt_capacity_fn)(void *);
|
||||||
typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
|
|
||||||
typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
|
typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
|
||||||
typedef void (nvm_tgt_exit_fn)(void *);
|
typedef void (nvm_tgt_exit_fn)(void *);
|
||||||
|
|
||||||
@ -358,7 +363,7 @@ struct nvm_tgt_type {
|
|||||||
/* target entry points */
|
/* target entry points */
|
||||||
nvm_tgt_make_rq_fn *make_rq;
|
nvm_tgt_make_rq_fn *make_rq;
|
||||||
nvm_tgt_capacity_fn *capacity;
|
nvm_tgt_capacity_fn *capacity;
|
||||||
nvm_tgt_end_io_fn *end_io;
|
nvm_end_io_fn *end_io;
|
||||||
|
|
||||||
/* module-specific init/teardown */
|
/* module-specific init/teardown */
|
||||||
nvm_tgt_init_fn *init;
|
nvm_tgt_init_fn *init;
|
||||||
@ -383,7 +388,6 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
|||||||
typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
||||||
typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
||||||
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||||
typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
|
|
||||||
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
|
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
|
||||||
unsigned long);
|
unsigned long);
|
||||||
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
|
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
|
||||||
@ -404,7 +408,6 @@ struct nvmm_type {
|
|||||||
nvmm_flush_blk_fn *flush_blk;
|
nvmm_flush_blk_fn *flush_blk;
|
||||||
|
|
||||||
nvmm_submit_io_fn *submit_io;
|
nvmm_submit_io_fn *submit_io;
|
||||||
nvmm_end_io_fn *end_io;
|
|
||||||
nvmm_erase_blk_fn *erase_blk;
|
nvmm_erase_blk_fn *erase_blk;
|
||||||
|
|
||||||
/* Configuration management */
|
/* Configuration management */
|
||||||
@ -434,6 +437,7 @@ extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
|
|||||||
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
|
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
|
||||||
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr);
|
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr);
|
||||||
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
|
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
|
||||||
|
extern void nvm_end_io(struct nvm_rq *, int);
|
||||||
#else /* CONFIG_NVM */
|
#else /* CONFIG_NVM */
|
||||||
struct nvm_dev_ops;
|
struct nvm_dev_ops;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user