lightnvm: implement get log report chunk helpers
The 2.0 spec provides a report chunk log page that can be retrieved using the stangard nvme get log page. This replaces the dedicated get/put bad block table in 1.2. This patch implements the helper functions to allow targets retrieve the chunk metadata using get log page. It makes nvme_get_log_ext available outside of nvme core so that we can use it form lightnvm. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7100d50a7e
commit
a294c19945
@ -712,6 +712,17 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
|
|||||||
nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
|
nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta,
|
||||||
|
struct ppa_addr ppa, int nchks)
|
||||||
|
{
|
||||||
|
struct nvm_dev *dev = tgt_dev->parent;
|
||||||
|
|
||||||
|
nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
|
||||||
|
|
||||||
|
return dev->ops->get_chk_meta(tgt_dev->parent, meta,
|
||||||
|
(sector_t)ppa.ppa, nchks);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(nvm_get_chunk_meta);
|
||||||
|
|
||||||
int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
|
int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
|
||||||
int nr_ppas, int type)
|
int nr_ppas, int type)
|
||||||
|
@ -2219,8 +2219,8 @@ out_unlock:
|
|||||||
}
|
}
|
||||||
|
|
||||||
int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||||
u8 log_page, void *log,
|
u8 log_page, void *log,
|
||||||
size_t size, size_t offset)
|
size_t size, size_t offset)
|
||||||
{
|
{
|
||||||
struct nvme_command c = { };
|
struct nvme_command c = { };
|
||||||
unsigned long dwlen = size / 4 - 1;
|
unsigned long dwlen = size / 4 - 1;
|
||||||
|
@ -35,6 +35,10 @@ enum nvme_nvm_admin_opcode {
|
|||||||
nvme_nvm_admin_set_bb_tbl = 0xf1,
|
nvme_nvm_admin_set_bb_tbl = 0xf1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum nvme_nvm_log_page {
|
||||||
|
NVME_NVM_LOG_REPORT_CHUNK = 0xca,
|
||||||
|
};
|
||||||
|
|
||||||
struct nvme_nvm_ph_rw {
|
struct nvme_nvm_ph_rw {
|
||||||
__u8 opcode;
|
__u8 opcode;
|
||||||
__u8 flags;
|
__u8 flags;
|
||||||
@ -236,6 +240,16 @@ struct nvme_nvm_id20 {
|
|||||||
__u8 vs[1024];
|
__u8 vs[1024];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct nvme_nvm_chk_meta {
|
||||||
|
__u8 state;
|
||||||
|
__u8 type;
|
||||||
|
__u8 wi;
|
||||||
|
__u8 rsvd[5];
|
||||||
|
__le64 slba;
|
||||||
|
__le64 cnlb;
|
||||||
|
__le64 wp;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check we didn't inadvertently grow the command struct
|
* Check we didn't inadvertently grow the command struct
|
||||||
*/
|
*/
|
||||||
@ -252,6 +266,9 @@ static inline void _nvme_nvm_check_size(void)
|
|||||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
|
||||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
|
||||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
|
||||||
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
|
||||||
|
BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
|
||||||
|
sizeof(struct nvm_chk_meta));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
|
static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
|
||||||
@ -552,6 +569,61 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Expect the lba in device format
|
||||||
|
*/
|
||||||
|
static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
|
||||||
|
struct nvm_chk_meta *meta,
|
||||||
|
sector_t slba, int nchks)
|
||||||
|
{
|
||||||
|
struct nvm_geo *geo = &ndev->geo;
|
||||||
|
struct nvme_ns *ns = ndev->q->queuedata;
|
||||||
|
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||||
|
struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta;
|
||||||
|
struct ppa_addr ppa;
|
||||||
|
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
|
||||||
|
size_t log_pos, offset, len;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
/* Normalize lba address space to obtain log offset */
|
||||||
|
ppa.ppa = slba;
|
||||||
|
ppa = dev_to_generic_addr(ndev, ppa);
|
||||||
|
|
||||||
|
log_pos = ppa.m.chk;
|
||||||
|
log_pos += ppa.m.pu * geo->num_chk;
|
||||||
|
log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
|
||||||
|
|
||||||
|
offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
|
||||||
|
|
||||||
|
while (left) {
|
||||||
|
len = min_t(unsigned int, left, ctrl->max_hw_sectors << 9);
|
||||||
|
|
||||||
|
ret = nvme_get_log_ext(ctrl, ns, NVME_NVM_LOG_REPORT_CHUNK,
|
||||||
|
dev_meta, len, offset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
|
||||||
|
meta->state = dev_meta->state;
|
||||||
|
meta->type = dev_meta->type;
|
||||||
|
meta->wi = dev_meta->wi;
|
||||||
|
meta->slba = le64_to_cpu(dev_meta->slba);
|
||||||
|
meta->cnlb = le64_to_cpu(dev_meta->cnlb);
|
||||||
|
meta->wp = le64_to_cpu(dev_meta->wp);
|
||||||
|
|
||||||
|
meta++;
|
||||||
|
dev_meta++;
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += len;
|
||||||
|
left -= len;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
|
static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
|
||||||
struct nvme_nvm_command *c)
|
struct nvme_nvm_command *c)
|
||||||
{
|
{
|
||||||
@ -683,6 +755,8 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
|
|||||||
.get_bb_tbl = nvme_nvm_get_bb_tbl,
|
.get_bb_tbl = nvme_nvm_get_bb_tbl,
|
||||||
.set_bb_tbl = nvme_nvm_set_bb_tbl,
|
.set_bb_tbl = nvme_nvm_set_bb_tbl,
|
||||||
|
|
||||||
|
.get_chk_meta = nvme_nvm_get_chk_meta,
|
||||||
|
|
||||||
.submit_io = nvme_nvm_submit_io,
|
.submit_io = nvme_nvm_submit_io,
|
||||||
.submit_io_sync = nvme_nvm_submit_io_sync,
|
.submit_io_sync = nvme_nvm_submit_io_sync,
|
||||||
|
|
||||||
|
@ -81,10 +81,13 @@ struct nvm_rq;
|
|||||||
struct nvm_id;
|
struct nvm_id;
|
||||||
struct nvm_dev;
|
struct nvm_dev;
|
||||||
struct nvm_tgt_dev;
|
struct nvm_tgt_dev;
|
||||||
|
struct nvm_chk_meta;
|
||||||
|
|
||||||
typedef int (nvm_id_fn)(struct nvm_dev *);
|
typedef int (nvm_id_fn)(struct nvm_dev *);
|
||||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
|
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
|
||||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
|
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
|
||||||
|
typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
|
||||||
|
sector_t, int);
|
||||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||||
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
|
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||||
@ -98,6 +101,8 @@ struct nvm_dev_ops {
|
|||||||
nvm_op_bb_tbl_fn *get_bb_tbl;
|
nvm_op_bb_tbl_fn *get_bb_tbl;
|
||||||
nvm_op_set_bb_fn *set_bb_tbl;
|
nvm_op_set_bb_fn *set_bb_tbl;
|
||||||
|
|
||||||
|
nvm_get_chk_meta_fn *get_chk_meta;
|
||||||
|
|
||||||
nvm_submit_io_fn *submit_io;
|
nvm_submit_io_fn *submit_io;
|
||||||
nvm_submit_io_sync_fn *submit_io_sync;
|
nvm_submit_io_sync_fn *submit_io_sync;
|
||||||
|
|
||||||
@ -227,6 +232,20 @@ struct nvm_addrf {
|
|||||||
u64 rsv_mask[2];
|
u64 rsv_mask[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: The structure size is linked to nvme_nvm_chk_meta such that the same
|
||||||
|
* buffer can be used when converting from little endian to cpu addressing.
|
||||||
|
*/
|
||||||
|
struct nvm_chk_meta {
|
||||||
|
u8 state;
|
||||||
|
u8 type;
|
||||||
|
u8 wi;
|
||||||
|
u8 rsvd[5];
|
||||||
|
u64 slba;
|
||||||
|
u64 cnlb;
|
||||||
|
u64 wp;
|
||||||
|
};
|
||||||
|
|
||||||
struct nvm_target {
|
struct nvm_target {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct nvm_tgt_dev *dev;
|
struct nvm_tgt_dev *dev;
|
||||||
@ -492,6 +511,11 @@ extern struct nvm_dev *nvm_alloc_dev(int);
|
|||||||
extern int nvm_register(struct nvm_dev *);
|
extern int nvm_register(struct nvm_dev *);
|
||||||
extern void nvm_unregister(struct nvm_dev *);
|
extern void nvm_unregister(struct nvm_dev *);
|
||||||
|
|
||||||
|
|
||||||
|
extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
|
||||||
|
struct nvm_chk_meta *meta, struct ppa_addr ppa,
|
||||||
|
int nchks);
|
||||||
|
|
||||||
extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
|
extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
|
||||||
int, int);
|
int, int);
|
||||||
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
|
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
|
||||||
|
Loading…
Reference in New Issue
Block a user