nvme updates for Linux 5.16

- fix a multipath partition scanning deadlock (Hannes Reinecke)
  - generate uevent once a multipath namespace is operational again
    (Hannes Reinecke)
  - support unique discovery controller NQNs (Hannes Reinecke)
  - fix use-after-free when a port is removed (Israel Rukshin)
  - clear shadow doorbell memory on resets (Keith Busch)
  - use struct_size (Len Baker)
  - add error handling support for add_disk (Luis Chamberlain)
  - limit the maximal queue size for RDMA controllers (Max Gurtovoy)
  - use a few more symbolic names (Max Gurtovoy)
  - fix error code in nvme_rdma_setup_ctrl (Max Gurtovoy)
  - add support for ->map_queues on FC (Saurav Kashyap)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmFxYs4LHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYNzGBAAqGhOE7aTrrvsTkx/lc0oZrcS/WxT5zMj1KC7+C8O
 FT4rFDvLGa4J8PBz+l/u/Dmysw6T70HlDt13WqEy+8l4ckOolAWwoLIqmqaLJM6l
 7LA8S0kXlaJr2Wyj1RHn3YatjPhBhBtSxcSI+VwvuMobibUPtTzUEaUYY80+DyGI
 bWkY1+CzgSXZwhwe72Nf7I5rvkhEvS+pTLsHP70h+AsMlDljUBCNgD9SkvNRciic
 FFJ90NXXGnmvl0mZiZJ4sfb55r8tqGBvphw+vAkv/Gl9aOyVKmD+9nTAHiFXknPT
 LAlTidebE09cRVZERg8oooUwvfmFNTRQg/nD+4q9camWgmDqiQtyLrSFvME+ieL0
 Cd3zOR7KCRTMhfK5AhdKiXGZ3zu7RznBZ9zNciqZEONob3BxbSs7NagariCVXGvQ
 KxIA4EE/3nrPmiosXp1/VMVceCJBGJw8wh8TyNX1tkffZR4G+jNihUhT1k2TQlyE
 KqX9ibN/J0yWWQ/EWqI8r32ox6hIxKjwbtJLgA+wqe3RqF8DjEg6frmvl7c9h4rs
 aI62XgdF+mMFtDQaYkXtTP63oYiWLQeX8Hkv3Vig2r42U36vlYlhUpIU2Ee1FQZ4
 e55pnVCxLQsQBAvVn5vuKd1ivNRynR1NuSeF3NrAtWK33kiziSVTFYFxJiJG8+4Y
 1Os=
 =D1Jt
 -----END PGP SIGNATURE-----

Merge tag 'nvme-5.16-2021-10-21' of git://git.infradead.org/nvme into for-5.16/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.16

 - fix a multipath partition scanning deadlock (Hannes Reinecke)
 - generate uevent once a multipath namespace is operational again
   (Hannes Reinecke)
 - support unique discovery controller NQNs (Hannes Reinecke)
 - fix use-after-free when a port is removed (Israel Rukshin)
 - clear shadow doorbell memory on resets (Keith Busch)
 - use struct_size (Len Baker)
 - add error handling support for add_disk (Luis Chamberlain)
 - limit the maximal queue size for RDMA controllers (Max Gurtovoy)
 - use a few more symbolic names (Max Gurtovoy)
 - fix error code in nvme_rdma_setup_ctrl (Max Gurtovoy)
 - add support for ->map_queues on FC (Saurav Kashyap)"

* tag 'nvme-5.16-2021-10-21' of git://git.infradead.org/nvme: (23 commits)
  nvmet: use struct_size over open coded arithmetic
  nvme: drop scan_lock and always kick requeue list when removing namespaces
  nvme-pci: clear shadow doorbell memory on resets
  nvme-rdma: fix error code in nvme_rdma_setup_ctrl
  nvme-multipath: add error handling support for add_disk()
  nvmet: use macro definitions for setting cmic value
  nvmet: use macro definition for setting nmic value
  nvme: display correct subsystem NQN
  nvme: Add connect option 'discovery'
  nvme: expose subsystem type in sysfs attribute 'subsystype'
  nvmet: set 'CNTRLTYPE' in the identify controller data
  nvmet: add nvmet_is_disc_subsys() helper
  nvme: add CNTRLTYPE definitions for 'identify controller'
  nvmet: make discovery NQN configurable
  nvmet-rdma: implement get_max_queue_size controller op
  nvmet: add get_max_queue_size op for controllers
  nvme-rdma: limit the maximal queue size for RDMA controllers
  nvmet-tcp: fix use-after-free when a port is removed
  nvmet-rdma: fix use-after-free when a port is removed
  nvmet: fix use-after-free when a port is removed
  ...
This commit is contained in:
Jens Axboe 2021-10-21 08:25:54 -06:00
commit cbab6ae0d0
21 changed files with 266 additions and 29 deletions

View File

@ -222,7 +222,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
dev_info(ctrl->device,
"Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
"Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
flush_work(&ctrl->reset_work);
nvme_stop_ctrl(ctrl);
@ -2620,6 +2620,24 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
static ssize_t nvme_subsys_show_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
switch (subsys->subtype) {
case NVME_NQN_DISC:
return sysfs_emit(buf, "discovery\n");
case NVME_NQN_NVME:
return sysfs_emit(buf, "nvm\n");
default:
return sysfs_emit(buf, "reserved\n");
}
}
static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
#define nvme_subsys_show_str_function(field) \
static ssize_t subsys_##field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@ -2640,6 +2658,7 @@ static struct attribute *nvme_subsys_attrs[] = {
&subsys_attr_serial.attr,
&subsys_attr_firmware_rev.attr,
&subsys_attr_subsysnqn.attr,
&subsys_attr_subsystype.attr,
#ifdef CONFIG_NVME_MULTIPATH
&subsys_attr_iopolicy.attr,
#endif
@ -2710,6 +2729,21 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
/* Versions prior to 1.4 don't necessarily report a valid type */
if (id->cntrltype == NVME_CTRL_DISC ||
!strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
subsys->subtype = NVME_NQN_DISC;
else
subsys->subtype = NVME_NQN_NVME;
if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
dev_err(ctrl->device,
"Subsystem %s is not a discovery controller",
subsys->subnqn);
kfree(subsys);
return -EINVAL;
}
subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH
subsys->iopolicy = NVME_IOPOLICY_NUMA;

View File

@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_ERR, NULL }
};
@ -823,6 +824,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@ -949,7 +953,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *

View File

@ -67,6 +67,7 @@ enum {
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
};
/**
@ -178,6 +179,13 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
return true;
}
static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsys)
return ctrl->opts->subsysnqn;
return ctrl->subsys->subnqn;
}
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);

View File

@ -16,6 +16,7 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@ -2841,6 +2842,28 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
int i;
for (i = 0; i < set->nr_maps; i++) {
struct blk_mq_queue_map *map = &set->map[i];
if (!map->nr_queues) {
WARN_ON(i == HCTX_TYPE_DEFAULT);
continue;
}
/* Call LLDD map queue functionality if defined */
if (ctrl->lport->ops->map_queues)
ctrl->lport->ops->map_queues(&ctrl->lport->localport,
map);
else
blk_mq_map_queues(map);
}
return 0;
}
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@ -2849,6 +2872,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
.timeout = nvme_fc_timeout,
.map_queues = nvme_fc_map_queues,
};
static int
@ -3572,7 +3596,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
return &ctrl->ctrl;

View File

@ -105,8 +105,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->head->disk)
kblockd_schedule_work(&ns->head->requeue_work);
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
if (ctrl->state == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
}
@ -143,13 +146,12 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
if (nvme_mpath_clear_current_path(ns))
kblockd_schedule_work(&ns->head->requeue_work);
list_for_each_entry(ns, &ctrl->namespaces, list) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
up_read(&ctrl->namespaces_rwsem);
mutex_unlock(&ctrl->scan_lock);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@ -506,13 +508,23 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
int rc;
if (!head->disk)
return;
/*
* test_and_set_bit() is used because it is protecting against two nvme
* paths simultaneously calling device_add_disk() on the same namespace
* head.
*/
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
rc = device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
if (rc) {
clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
return;
}
nvme_add_ns_head_cdev(head);
}

View File

@ -372,6 +372,7 @@ struct nvme_subsystem {
char model[40];
char firmware_rev[8];
u8 cmic;
enum nvme_subsys_type subtype;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;

View File

@ -245,8 +245,15 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs)
if (dev->dbbuf_dbs) {
/*
* Clear the dbbuf memory so the driver doesn't observe stale
* values from the previous instantiation.
*/
memset(dev->dbbuf_dbs, 0, mem_size);
memset(dev->dbbuf_eis, 0, mem_size);
return 0;
}
dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
&dev->dbbuf_dbs_dma_addr,

View File

@ -1096,11 +1096,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (ctrl->ctrl.icdoff) {
ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@ -1112,6 +1114,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
dev_warn(ctrl->ctrl.device,
"ctrl sqsize %u > max queue size %u, clamping down\n",
ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
}
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
@ -2386,7 +2395,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);

View File

@ -2582,7 +2582,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);

View File

@ -278,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
u16 status;
status = NVME_SC_INTERNAL;
desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
GFP_KERNEL);
if (!desc)
goto out;
@ -374,13 +374,19 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->rab = 6;
if (nvmet_is_disc_subsys(ctrl->subsys))
id->cntrltype = NVME_CTRL_DISC;
else
id->cntrltype = NVME_CTRL_IO;
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
/* Limit MDTS according to transport capability */
if (ctrl->ops->get_mdts)
@ -536,7 +542,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* Our namespace might always be shared. Not just with other
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
@ -1008,7 +1014,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req);

View File

@ -1233,6 +1233,44 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n",
nvmet_disc_subsys->subsysnqn);
}
static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
char *subsysnqn;
int len;
len = strcspn(page, "\n");
if (!len)
return -EINVAL;
subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
if (!subsysnqn)
return -ENOMEM;
/*
* The discovery NQN must be different from subsystem NQN.
*/
if (!strcmp(subsysnqn, subsys->subsysnqn)) {
kfree(subsysnqn);
return -EBUSY;
}
down_write(&nvmet_config_sem);
kfree(nvmet_disc_subsys->subsysnqn);
nvmet_disc_subsys->subsysnqn = subsysnqn;
up_write(&nvmet_config_sem);
return count;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@ -1262,6 +1300,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
&nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
@ -1553,6 +1592,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
/* Let inflight controllers teardown complete */
flush_scheduled_work();
list_del(&port->global_entry);
kfree(port->ana_state);

View File

@ -1140,7 +1140,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* should verify iosqes,iocqes are zeroed, however that
* would break backwards compatibility, so don't enforce it.
*/
if (ctrl->subsys->type != NVME_NQN_DISC &&
if (!nvmet_is_disc_subsys(ctrl->subsys) &&
(nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
ctrl->csts = NVME_CSTS_CFS;
@ -1205,7 +1205,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (ctrl->ops->get_max_queue_size)
ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
else
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@ -1278,7 +1281,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
if (subsys->allow_any_host)
return true;
if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@ -1367,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_init(&ctrl->lock);
ctrl->port = req->port;
ctrl->ops = req->ops;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@ -1405,13 +1409,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
ctrl->ops = req->ops;
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
*/
if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
kato = NVMET_DISC_KATO_MS;
/* keep-alive timeout in seconds */
@ -1491,7 +1493,8 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
!strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;

View File

@ -268,6 +268,8 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
id->cntrltype = NVME_CTRL_DISC;
/* no limit on data transfer sizes for now */
id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);

View File

@ -221,7 +221,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);

View File

@ -309,6 +309,7 @@ struct nvmet_fabrics_ops {
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@ -576,6 +577,11 @@ static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
return req->sq->ctrl->subsys;
}
static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
{
return subsys->type == NVME_NQN_DISC;
}
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);

View File

@ -1819,12 +1819,36 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
{
struct nvmet_rdma_queue *queue, *tmp;
struct nvmet_port *nport = port->nport;
mutex_lock(&nvmet_rdma_queue_mutex);
list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
queue_list) {
if (queue->port != nport)
continue;
list_del_init(&queue->queue_list);
__nvmet_rdma_queue_disconnect(queue);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
}
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
if (cm_id)
rdma_destroy_id(cm_id);
/*
* Destroy the remaining queues, which are not belong to any
* controller yet. Do it here after the RDMA-CM was destroyed
* guarantees that no new queue will be created.
*/
nvmet_rdma_destroy_port_queues(port);
}
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
@ -1976,6 +2000,11 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
return NVMET_RDMA_MAX_MDTS;
}
static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{
return NVME_RDMA_MAX_QUEUE_SIZE;
}
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
@ -1987,6 +2016,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
.get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)

View File

@ -1737,6 +1737,17 @@ err_port:
return ret;
}
static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
{
struct nvmet_tcp_queue *queue;
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
if (queue->port == port)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
}
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@ -1746,6 +1757,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
/*
* Destroy the remaining queues, which are not belong to any
* controller yet.
*/
nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);

View File

@ -8,6 +8,8 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@ -642,6 +644,18 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return rval;
}
static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
struct blk_mq_queue_map *map)
{
struct scsi_qla_host *vha = lport->private;
int rc;
rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
if (rc)
ql_log(ql_log_warn, vha, 0x21de,
"pci map queue failed 0x%x", rc);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
@ -676,6 +690,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
.map_queues = qla_nvme_map_queues,
.max_hw_queues = 8,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,

View File

@ -7,6 +7,7 @@
#define _NVME_FC_DRIVER_H 1
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
/*
@ -497,6 +498,8 @@ struct nvme_fc_port_template {
int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *rport,
struct nvmefc_ls_rsp *ls_rsp);
void (*map_queues)(struct nvme_fc_local_port *localport,
struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@ -779,6 +782,10 @@ struct nvmet_fc_target_port {
* LS received.
* Entrypoint is Mandatory.
*
* @map_queues: This functions lets the driver expose the queue mapping
* to the block layer.
* Entrypoint is Optional.
*
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call

View File

@ -6,6 +6,8 @@
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
#define NVME_RDMA_MAX_QUEUE_SIZE 128
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};

View File

@ -31,6 +31,12 @@ enum nvme_subsys_type {
NVME_NQN_NVME = 2, /* NVME type target subsystem */
};
enum nvme_ctrl_type {
NVME_CTRL_IO = 1, /* I/O controller */
NVME_CTRL_DISC = 2, /* Discovery controller */
NVME_CTRL_ADMIN = 3, /* Administrative controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
enum {
NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
@ -244,7 +250,9 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
__u8 rsvd100[28];
__u8 rsvd100[11];
__u8 cntrltype;
__u8 fguid[16];
__le16 crdt1;
__le16 crdt2;
__le16 crdt3;
@ -312,6 +320,7 @@ struct nvme_id_ctrl {
};
enum {
NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,