mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - Add target_alloc_session() w/ callback helper for doing se_session allocation + tag + se_node_acl lookup. (HCH + nab) - Tree-wide fabric driver conversion to use target_alloc_session() - Convert sbp-target to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab) - Convert usb-gadget to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab) - Convert xen-scsiback to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab) - Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs - Convert ib_srpt to use percpu_ida tag pre-allocation - Add DebugFS node for qla2xxx target sess list (Quinn) - Rework iser-target connection termination (Jenny + Sagi) - Convert iser-target to new CQ API (HCH) - Add pass-through WRITE_SAME support for IBLOCK (Mike Christie) - Introduce data_bitmap for asynchronous access of data area (Sheng Yang + Andy) - Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani) Also, there is a separate PULL request coming for cxgb4 NIC driver prerequisites for supporting hw iscsi segmentation offload (ISO), that will be the base for a number of v4.7 developments involving iscsi-target hw offloads" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits) target: Fix target_release_cmd_kref shutdown comp leak target: Avoid DataIN transfers for non-GOOD SAM status target/user: Report capability of handling out-of-order completions to userspace target/user: Fix size_t format-spec build warning target/user: Don't free expired command when time out target/user: Introduce data_bitmap, replace data_length/data_head/data_tail target/user: Free data ring in unified function target/user: Use iovec[] to describe continuous area target: Remove enum transport_lunflags_table target/iblock: pass WRITE_SAME to device if possible iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc iser-target: Kill struct isert_rdma_wr iser-target: Convert to new CQ API iser-target: Split and properly type the login buffer iser-target: Remove ISER_RECV_DATA_SEG_LEN iser-target: Remove impossible condition from isert_wait_conn iser-target: Remove redundant wait in release_conn iser-target: Rework connection termination iser-target: Separate flows for np listeners and connections cma events iser-target: Add new state ISER_CONN_BOUND to isert_conn ...
This commit is contained in:
commit
5266e5b12c
@ -117,7 +117,9 @@ userspace (respectively) to put commands on the ring, and indicate
|
||||
when the commands are completed.
|
||||
|
||||
version - 1 (userspace should abort if otherwise)
|
||||
flags - none yet defined.
|
||||
flags:
|
||||
- TCMU_MAILBOX_FLAG_CAP_OOOC: indicates out-of-order completion is
|
||||
supported. See "The Command Ring" for details.
|
||||
cmdr_off - The offset of the start of the command ring from the start
|
||||
of the memory region, to account for the mailbox size.
|
||||
cmdr_size - The size of the command ring. This does *not* need to be a
|
||||
@ -162,6 +164,13 @@ rsp.sense_buffer if necessary. Userspace then increments
|
||||
mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
|
||||
kernel via the UIO method, a 4-byte write to the file descriptor.
|
||||
|
||||
If TCMU_MAILBOX_FLAG_CAP_OOOC is set for mailbox->flags, kernel is
|
||||
capable of handling out-of-order completions. In this case, userspace can
|
||||
handle command in different order other than original. Since kernel would
|
||||
still process the commands in the same order it appeared in the command
|
||||
ring, userspace need to update the cmd->id when completing the
|
||||
command(a.k.a steal the original command's entry).
|
||||
|
||||
When the opcode is PAD, userspace only updates cmd_tail as above --
|
||||
it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry
|
||||
is contiguous within the command ring.)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -36,9 +36,7 @@
|
||||
/* Constant PDU lengths calculations */
|
||||
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
|
||||
sizeof(struct iscsi_hdr))
|
||||
#define ISER_RECV_DATA_SEG_LEN 8192
|
||||
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
|
||||
#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
|
||||
#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
|
||||
|
||||
/* QP settings */
|
||||
/* Maximal bounds on received asynchronous PDUs */
|
||||
@ -62,12 +60,11 @@
|
||||
ISERT_MAX_TX_MISC_PDUS + \
|
||||
ISERT_MAX_RX_MISC_PDUS)
|
||||
|
||||
#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \
|
||||
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
|
||||
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
|
||||
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
|
||||
sizeof(struct ib_cqe)))
|
||||
|
||||
#define ISCSI_ISER_SG_TABLESIZE 256
|
||||
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
|
||||
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
|
||||
|
||||
enum isert_desc_type {
|
||||
ISCSI_TX_CONTROL,
|
||||
@ -84,6 +81,7 @@ enum iser_ib_op_code {
|
||||
enum iser_conn_state {
|
||||
ISER_CONN_INIT,
|
||||
ISER_CONN_UP,
|
||||
ISER_CONN_BOUND,
|
||||
ISER_CONN_FULL_FEATURE,
|
||||
ISER_CONN_TERMINATING,
|
||||
ISER_CONN_DOWN,
|
||||
@ -92,23 +90,35 @@ enum iser_conn_state {
|
||||
struct iser_rx_desc {
|
||||
struct iser_ctrl iser_header;
|
||||
struct iscsi_hdr iscsi_header;
|
||||
char data[ISER_RECV_DATA_SEG_LEN];
|
||||
char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
|
||||
u64 dma_addr;
|
||||
struct ib_sge rx_sg;
|
||||
struct ib_cqe rx_cqe;
|
||||
char pad[ISER_RX_PAD_SIZE];
|
||||
} __packed;
|
||||
|
||||
static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
|
||||
{
|
||||
return container_of(cqe, struct iser_rx_desc, rx_cqe);
|
||||
}
|
||||
|
||||
struct iser_tx_desc {
|
||||
struct iser_ctrl iser_header;
|
||||
struct iscsi_hdr iscsi_header;
|
||||
enum isert_desc_type type;
|
||||
u64 dma_addr;
|
||||
struct ib_sge tx_sg[2];
|
||||
struct ib_cqe tx_cqe;
|
||||
int num_sge;
|
||||
struct isert_cmd *isert_cmd;
|
||||
struct ib_send_wr send_wr;
|
||||
} __packed;
|
||||
|
||||
static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
|
||||
{
|
||||
return container_of(cqe, struct iser_tx_desc, tx_cqe);
|
||||
}
|
||||
|
||||
|
||||
enum isert_indicator {
|
||||
ISERT_PROTECTED = 1 << 0,
|
||||
ISERT_DATA_KEY_VALID = 1 << 1,
|
||||
@ -144,20 +154,6 @@ enum {
|
||||
SIG = 2,
|
||||
};
|
||||
|
||||
struct isert_rdma_wr {
|
||||
struct isert_cmd *isert_cmd;
|
||||
enum iser_ib_op_code iser_ib_op;
|
||||
struct ib_sge *ib_sge;
|
||||
struct ib_sge s_ib_sge;
|
||||
int rdma_wr_num;
|
||||
struct ib_rdma_wr *rdma_wr;
|
||||
struct ib_rdma_wr s_rdma_wr;
|
||||
struct ib_sge ib_sg[3];
|
||||
struct isert_data_buf data;
|
||||
struct isert_data_buf prot;
|
||||
struct fast_reg_descriptor *fr_desc;
|
||||
};
|
||||
|
||||
struct isert_cmd {
|
||||
uint32_t read_stag;
|
||||
uint32_t write_stag;
|
||||
@ -170,22 +166,34 @@ struct isert_cmd {
|
||||
struct iscsi_cmd *iscsi_cmd;
|
||||
struct iser_tx_desc tx_desc;
|
||||
struct iser_rx_desc *rx_desc;
|
||||
struct isert_rdma_wr rdma_wr;
|
||||
enum iser_ib_op_code iser_ib_op;
|
||||
struct ib_sge *ib_sge;
|
||||
struct ib_sge s_ib_sge;
|
||||
int rdma_wr_num;
|
||||
struct ib_rdma_wr *rdma_wr;
|
||||
struct ib_rdma_wr s_rdma_wr;
|
||||
struct ib_sge ib_sg[3];
|
||||
struct isert_data_buf data;
|
||||
struct isert_data_buf prot;
|
||||
struct fast_reg_descriptor *fr_desc;
|
||||
struct work_struct comp_work;
|
||||
struct scatterlist sg;
|
||||
};
|
||||
|
||||
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
|
||||
{
|
||||
return container_of(desc, struct isert_cmd, tx_desc);
|
||||
}
|
||||
|
||||
struct isert_device;
|
||||
|
||||
struct isert_conn {
|
||||
enum iser_conn_state state;
|
||||
int post_recv_buf_count;
|
||||
u32 responder_resources;
|
||||
u32 initiator_depth;
|
||||
bool pi_support;
|
||||
u32 max_sge;
|
||||
char *login_buf;
|
||||
char *login_req_buf;
|
||||
struct iser_rx_desc *login_req_buf;
|
||||
char *login_rsp_buf;
|
||||
u64 login_req_dma;
|
||||
int login_req_len;
|
||||
@ -201,7 +209,6 @@ struct isert_conn {
|
||||
struct ib_qp *qp;
|
||||
struct isert_device *device;
|
||||
struct mutex mutex;
|
||||
struct completion wait;
|
||||
struct completion wait_comp_err;
|
||||
struct kref kref;
|
||||
struct list_head fr_pool;
|
||||
@ -221,17 +228,13 @@ struct isert_conn {
|
||||
*
|
||||
* @device: pointer to device handle
|
||||
* @cq: completion queue
|
||||
* @wcs: work completion array
|
||||
* @active_qps: Number of active QPs attached
|
||||
* to completion context
|
||||
* @work: completion work handle
|
||||
*/
|
||||
struct isert_comp {
|
||||
struct isert_device *device;
|
||||
struct ib_cq *cq;
|
||||
struct ib_wc wcs[16];
|
||||
int active_qps;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct isert_device {
|
||||
@ -243,9 +246,8 @@ struct isert_device {
|
||||
struct isert_comp *comps;
|
||||
int comps_used;
|
||||
struct list_head dev_node;
|
||||
int (*reg_rdma_mem)(struct iscsi_conn *conn,
|
||||
struct iscsi_cmd *cmd,
|
||||
struct isert_rdma_wr *wr);
|
||||
int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
|
||||
struct iscsi_conn *conn);
|
||||
void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
|
||||
struct isert_conn *isert_conn);
|
||||
};
|
||||
|
@ -1264,40 +1264,26 @@ free_mem:
|
||||
*/
|
||||
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
|
||||
{
|
||||
struct se_session *se_sess;
|
||||
struct srpt_send_ioctx *ioctx;
|
||||
unsigned long flags;
|
||||
int tag;
|
||||
|
||||
BUG_ON(!ch);
|
||||
se_sess = ch->sess;
|
||||
|
||||
ioctx = NULL;
|
||||
spin_lock_irqsave(&ch->spinlock, flags);
|
||||
if (!list_empty(&ch->free_list)) {
|
||||
ioctx = list_first_entry(&ch->free_list,
|
||||
struct srpt_send_ioctx, free_list);
|
||||
list_del(&ioctx->free_list);
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
|
||||
if (tag < 0) {
|
||||
pr_err("Unable to obtain tag for srpt_send_ioctx\n");
|
||||
return NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&ch->spinlock, flags);
|
||||
|
||||
if (!ioctx)
|
||||
return ioctx;
|
||||
|
||||
BUG_ON(ioctx->ch != ch);
|
||||
ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
|
||||
memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
|
||||
ioctx->ch = ch;
|
||||
spin_lock_init(&ioctx->spinlock);
|
||||
ioctx->state = SRPT_STATE_NEW;
|
||||
ioctx->n_rbuf = 0;
|
||||
ioctx->rbufs = NULL;
|
||||
ioctx->n_rdma = 0;
|
||||
ioctx->n_rdma_wrs = 0;
|
||||
ioctx->rdma_wrs = NULL;
|
||||
ioctx->mapped_sg_count = 0;
|
||||
init_completion(&ioctx->tx_done);
|
||||
ioctx->queue_status_only = false;
|
||||
/*
|
||||
* transport_init_se_cmd() does not initialize all fields, so do it
|
||||
* here.
|
||||
*/
|
||||
memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
|
||||
memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
|
||||
|
||||
ioctx->cmd.map_tag = tag;
|
||||
|
||||
return ioctx;
|
||||
}
|
||||
@ -2034,9 +2020,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
|
||||
struct srp_login_rej *rej;
|
||||
struct ib_cm_rep_param *rep_param;
|
||||
struct srpt_rdma_ch *ch, *tmp_ch;
|
||||
struct se_node_acl *se_acl;
|
||||
u32 it_iu_len;
|
||||
int i, ret = 0;
|
||||
int ret = 0;
|
||||
unsigned char *p;
|
||||
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
@ -2158,12 +2143,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
|
||||
if (!ch->ioctx_ring)
|
||||
goto free_ch;
|
||||
|
||||
INIT_LIST_HEAD(&ch->free_list);
|
||||
for (i = 0; i < ch->rq_size; i++) {
|
||||
ch->ioctx_ring[i]->ch = ch;
|
||||
list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
|
||||
}
|
||||
|
||||
ret = srpt_create_ch_ib(ch);
|
||||
if (ret) {
|
||||
rej->reason = cpu_to_be32(
|
||||
@ -2193,19 +2172,13 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
|
||||
pr_debug("registering session %s\n", ch->sess_name);
|
||||
p = &ch->sess_name[0];
|
||||
|
||||
ch->sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
if (IS_ERR(ch->sess)) {
|
||||
rej->reason = cpu_to_be32(
|
||||
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
|
||||
pr_debug("Failed to create session\n");
|
||||
goto destroy_ib;
|
||||
}
|
||||
|
||||
try_again:
|
||||
se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
|
||||
if (!se_acl) {
|
||||
ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
|
||||
sizeof(struct srpt_send_ioctx),
|
||||
TARGET_PROT_NORMAL, p, ch, NULL);
|
||||
if (IS_ERR(ch->sess)) {
|
||||
pr_info("Rejected login because no ACL has been"
|
||||
" configured yet for initiator %s.\n", ch->sess_name);
|
||||
" configured yet for initiator %s.\n", p);
|
||||
/*
|
||||
* XXX: Hack to retry of ch->i_port_id without leading '0x'
|
||||
*/
|
||||
@ -2213,14 +2186,11 @@ try_again:
|
||||
p += 2;
|
||||
goto try_again;
|
||||
}
|
||||
rej->reason = cpu_to_be32(
|
||||
rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
|
||||
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
|
||||
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
|
||||
transport_free_session(ch->sess);
|
||||
goto destroy_ib;
|
||||
}
|
||||
ch->sess->se_node_acl = se_acl;
|
||||
|
||||
transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
|
||||
|
||||
pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
|
||||
ch->sess_name, ch->cm_id);
|
||||
@ -2911,7 +2881,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
|
||||
struct srpt_send_ioctx *ioctx = container_of(se_cmd,
|
||||
struct srpt_send_ioctx, cmd);
|
||||
struct srpt_rdma_ch *ch = ioctx->ch;
|
||||
unsigned long flags;
|
||||
struct se_session *se_sess = ch->sess;
|
||||
|
||||
WARN_ON(ioctx->state != SRPT_STATE_DONE);
|
||||
WARN_ON(ioctx->mapped_sg_count != 0);
|
||||
@ -2922,9 +2892,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
|
||||
ioctx->n_rbuf = 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ch->spinlock, flags);
|
||||
list_add(&ioctx->free_list, &ch->free_list);
|
||||
spin_unlock_irqrestore(&ch->spinlock, flags);
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -179,7 +179,6 @@ struct srpt_recv_ioctx {
|
||||
* struct srpt_send_ioctx - SRPT send I/O context.
|
||||
* @ioctx: See above.
|
||||
* @ch: Channel pointer.
|
||||
* @free_list: Node in srpt_rdma_ch.free_list.
|
||||
* @n_rbuf: Number of data buffers in the received SRP command.
|
||||
* @rbufs: Pointer to SRP data buffer array.
|
||||
* @single_rbuf: SRP data buffer if the command has only a single buffer.
|
||||
@ -202,7 +201,6 @@ struct srpt_send_ioctx {
|
||||
struct srp_direct_buf *rbufs;
|
||||
struct srp_direct_buf single_rbuf;
|
||||
struct scatterlist *sg;
|
||||
struct list_head free_list;
|
||||
spinlock_t spinlock;
|
||||
enum srpt_command_state state;
|
||||
struct se_cmd cmd;
|
||||
|
@ -2963,6 +2963,7 @@ struct qlt_hw_data {
|
||||
|
||||
uint8_t tgt_node_name[WWN_SIZE];
|
||||
|
||||
struct dentry *dfs_tgt_sess;
|
||||
struct list_head q_full_list;
|
||||
uint32_t num_pend_cmds;
|
||||
uint32_t num_qfull_cmds_alloc;
|
||||
|
@ -12,6 +12,47 @@
|
||||
static struct dentry *qla2x00_dfs_root;
|
||||
static atomic_t qla2x00_dfs_root_count;
|
||||
|
||||
static int
|
||||
qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
scsi_qla_host_t *vha = s->private;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
unsigned long flags;
|
||||
struct qla_tgt_sess *sess = NULL;
|
||||
struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
|
||||
|
||||
seq_printf(s, "%s\n",vha->host_str);
|
||||
if (tgt) {
|
||||
seq_printf(s, "Port ID Port Name Handle\n");
|
||||
|
||||
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
||||
list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
|
||||
seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
|
||||
sess->s_id.b.domain,sess->s_id.b.area,
|
||||
sess->s_id.b.al_pa, sess->port_name,
|
||||
sess->loop_id);
|
||||
}
|
||||
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
scsi_qla_host_t *vha = inode->i_private;
|
||||
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
|
||||
}
|
||||
|
||||
|
||||
static const struct file_operations dfs_tgt_sess_ops = {
|
||||
.open = qla2x00_dfs_tgt_sess_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int
|
||||
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
@ -248,6 +289,15 @@ create_nodes:
|
||||
"Unable to create debugfs fce node.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
|
||||
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
|
||||
if (!ha->tgt.dfs_tgt_sess) {
|
||||
ql_log(ql_log_warn, vha, 0xffff,
|
||||
"Unable to create debugFS tgt_sess node.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
@ -257,6 +307,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (ha->tgt.dfs_tgt_sess) {
|
||||
debugfs_remove(ha->tgt.dfs_tgt_sess);
|
||||
ha->tgt.dfs_tgt_sess = NULL;
|
||||
}
|
||||
|
||||
if (ha->dfs_fw_resource_cnt) {
|
||||
debugfs_remove(ha->dfs_fw_resource_cnt);
|
||||
ha->dfs_fw_resource_cnt = NULL;
|
||||
|
@ -641,7 +641,8 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
|
||||
{
|
||||
struct scsi_qla_host *vha = sess->vha;
|
||||
|
||||
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
|
||||
if (sess->se_sess)
|
||||
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
|
||||
|
||||
if (!list_empty(&sess->del_list_entry))
|
||||
list_del_init(&sess->del_list_entry);
|
||||
@ -856,8 +857,12 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
|
||||
"Timeout: sess %p about to be deleted\n",
|
||||
sess);
|
||||
ha->tgt.tgt_ops->shutdown_sess(sess);
|
||||
ha->tgt.tgt_ops->put_sess(sess);
|
||||
if (sess->se_sess) {
|
||||
ha->tgt.tgt_ops->shutdown_sess(sess);
|
||||
ha->tgt.tgt_ops->put_sess(sess);
|
||||
} else {
|
||||
qlt_unreg_sess(sess);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&tgt->sess_del_work,
|
||||
sess->expires - elapsed);
|
||||
@ -879,7 +884,6 @@ static struct qla_tgt_sess *qlt_create_sess(
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct qla_tgt_sess *sess;
|
||||
unsigned long flags;
|
||||
unsigned char be_sid[3];
|
||||
|
||||
/* Check to avoid double sessions */
|
||||
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
||||
@ -905,6 +909,14 @@ static struct qla_tgt_sess *qlt_create_sess(
|
||||
if (sess->deleted)
|
||||
qlt_undelete_sess(sess);
|
||||
|
||||
if (!sess->se_sess) {
|
||||
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
|
||||
&sess->port_name[0], sess) < 0) {
|
||||
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
kref_get(&sess->se_sess->sess_kref);
|
||||
ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
|
||||
(fcport->flags & FCF_CONF_COMP_SUPPORTED));
|
||||
@ -948,26 +960,6 @@ static struct qla_tgt_sess *qlt_create_sess(
|
||||
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
|
||||
sess, vha->vha_tgt.qla_tgt);
|
||||
|
||||
be_sid[0] = sess->s_id.b.domain;
|
||||
be_sid[1] = sess->s_id.b.area;
|
||||
be_sid[2] = sess->s_id.b.al_pa;
|
||||
/*
|
||||
* Determine if this fc_port->port_name is allowed to access
|
||||
* target mode using explict NodeACLs+MappedLUNs, or using
|
||||
* TPG demo mode. If this is successful a target mode FC nexus
|
||||
* is created.
|
||||
*/
|
||||
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
|
||||
&fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
|
||||
kfree(sess);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
|
||||
* access across ->tgt.sess_lock reaquire.
|
||||
*/
|
||||
kref_get(&sess->se_sess->sess_kref);
|
||||
|
||||
sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
|
||||
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
|
||||
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
|
||||
@ -985,6 +977,23 @@ static struct qla_tgt_sess *qlt_create_sess(
|
||||
fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
|
||||
sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
|
||||
|
||||
/*
|
||||
* Determine if this fc_port->port_name is allowed to access
|
||||
* target mode using explict NodeACLs+MappedLUNs, or using
|
||||
* TPG demo mode. If this is successful a target mode FC nexus
|
||||
* is created.
|
||||
*/
|
||||
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
|
||||
&fcport->port_name[0], sess) < 0) {
|
||||
return NULL;
|
||||
} else {
|
||||
/*
|
||||
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
|
||||
* access across ->tgt.sess_lock reaquire.
|
||||
*/
|
||||
kref_get(&sess->se_sess->sess_kref);
|
||||
}
|
||||
|
||||
return sess;
|
||||
}
|
||||
|
||||
|
@ -731,7 +731,7 @@ struct qla_tgt_func_tmpl {
|
||||
void (*free_session)(struct qla_tgt_sess *);
|
||||
|
||||
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
|
||||
void *, uint8_t *, uint16_t);
|
||||
struct qla_tgt_sess *);
|
||||
void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
|
||||
struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
|
||||
const uint16_t);
|
||||
|
@ -1406,6 +1406,39 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
|
||||
transport_deregister_session(sess->se_sess);
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
struct tcm_qla2xxx_lport *lport = tpg->lport;
|
||||
struct qla_hw_data *ha = lport->qla_vha->hw;
|
||||
struct se_node_acl *se_nacl = se_sess->se_node_acl;
|
||||
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
|
||||
struct tcm_qla2xxx_nacl, se_node_acl);
|
||||
struct qla_tgt_sess *qlat_sess = p;
|
||||
uint16_t loop_id = qlat_sess->loop_id;
|
||||
unsigned long flags;
|
||||
unsigned char be_sid[3];
|
||||
|
||||
be_sid[0] = qlat_sess->s_id.b.domain;
|
||||
be_sid[1] = qlat_sess->s_id.b.area;
|
||||
be_sid[2] = qlat_sess->s_id.b.al_pa;
|
||||
|
||||
/*
|
||||
* And now setup se_nacl and session pointers into HW lport internal
|
||||
* mappings for fabric S_ID and LOOP_ID.
|
||||
*/
|
||||
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
||||
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
|
||||
se_sess, qlat_sess, be_sid);
|
||||
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
|
||||
se_sess, qlat_sess, loop_id);
|
||||
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
|
||||
* to locate struct se_node_acl
|
||||
@ -1413,20 +1446,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
|
||||
static int tcm_qla2xxx_check_initiator_node_acl(
|
||||
scsi_qla_host_t *vha,
|
||||
unsigned char *fc_wwpn,
|
||||
void *qla_tgt_sess,
|
||||
uint8_t *s_id,
|
||||
uint16_t loop_id)
|
||||
struct qla_tgt_sess *qlat_sess)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct tcm_qla2xxx_lport *lport;
|
||||
struct tcm_qla2xxx_tpg *tpg;
|
||||
struct tcm_qla2xxx_nacl *nacl;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct se_node_acl *se_nacl;
|
||||
struct se_session *se_sess;
|
||||
struct qla_tgt_sess *sess = qla_tgt_sess;
|
||||
unsigned char port_name[36];
|
||||
unsigned long flags;
|
||||
int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
|
||||
TCM_QLA2XXX_DEFAULT_TAGS;
|
||||
|
||||
@ -1444,15 +1470,6 @@ static int tcm_qla2xxx_check_initiator_node_acl(
|
||||
pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
se_tpg = &tpg->se_tpg;
|
||||
|
||||
se_sess = transport_init_session_tags(num_tags,
|
||||
sizeof(struct qla_tgt_cmd),
|
||||
TARGET_PROT_ALL);
|
||||
if (IS_ERR(se_sess)) {
|
||||
pr_err("Unable to initialize struct se_session\n");
|
||||
return PTR_ERR(se_sess);
|
||||
}
|
||||
/*
|
||||
* Format the FCP Initiator port_name into colon seperated values to
|
||||
* match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
|
||||
@ -1463,28 +1480,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
|
||||
* Locate our struct se_node_acl either from an explict NodeACL created
|
||||
* via ConfigFS, or via running in TPG demo mode.
|
||||
*/
|
||||
se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
|
||||
port_name);
|
||||
if (!se_sess->se_node_acl) {
|
||||
transport_free_session(se_sess);
|
||||
return -EINVAL;
|
||||
}
|
||||
se_nacl = se_sess->se_node_acl;
|
||||
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
|
||||
/*
|
||||
* And now setup the new se_nacl and session pointers into our HW lport
|
||||
* mappings for fabric S_ID and LOOP_ID.
|
||||
*/
|
||||
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
||||
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
|
||||
qla_tgt_sess, s_id);
|
||||
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
|
||||
qla_tgt_sess, loop_id);
|
||||
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
||||
/*
|
||||
* Finally register the new FC Nexus with TCM
|
||||
*/
|
||||
transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
|
||||
se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
|
||||
sizeof(struct qla_tgt_cmd),
|
||||
TARGET_PROT_ALL, port_name,
|
||||
qlat_sess, tcm_qla2xxx_session_cb);
|
||||
if (IS_ERR(se_sess))
|
||||
return PTR_ERR(se_sess);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
|
||||
|
||||
/* Start items for tcm_loop_nexus_cit */
|
||||
|
||||
static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
|
||||
struct tcm_loop_tpg, tl_se_tpg);
|
||||
|
||||
tl_tpg->tl_nexus = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_loop_make_nexus(
|
||||
struct tcm_loop_tpg *tl_tpg,
|
||||
const char *name)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
int ret = -ENOMEM;
|
||||
int ret;
|
||||
|
||||
if (tl_tpg->tl_nexus) {
|
||||
pr_debug("tl_tpg->tl_nexus already exists\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
|
||||
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
|
||||
if (!tl_nexus) {
|
||||
pr_err("Unable to allocate struct tcm_loop_nexus\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/*
|
||||
* Initialize the struct se_session pointer
|
||||
*/
|
||||
tl_nexus->se_sess = transport_init_session(
|
||||
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
|
||||
|
||||
tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
|
||||
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
|
||||
name, tl_nexus, tcm_loop_alloc_sess_cb);
|
||||
if (IS_ERR(tl_nexus->se_sess)) {
|
||||
ret = PTR_ERR(tl_nexus->se_sess);
|
||||
goto out;
|
||||
kfree(tl_nexus);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Since we are running in 'demo mode' this call with generate a
|
||||
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
|
||||
* Initiator port name of the passed configfs group 'name'.
|
||||
*/
|
||||
tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
|
||||
se_tpg, (unsigned char *)name);
|
||||
if (!tl_nexus->se_sess->se_node_acl) {
|
||||
transport_free_session(tl_nexus->se_sess);
|
||||
goto out;
|
||||
}
|
||||
/* Now, register the I_T Nexus as active. */
|
||||
transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
|
||||
tl_nexus->se_sess, tl_nexus);
|
||||
tl_tpg->tl_nexus = tl_nexus;
|
||||
|
||||
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
name);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
kfree(tl_nexus);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcm_loop_drop_nexus(
|
||||
|
@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create(
|
||||
struct sbp_session *sess;
|
||||
int ret;
|
||||
char guid_str[17];
|
||||
struct se_node_acl *se_nacl;
|
||||
|
||||
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
|
||||
|
||||
sess = kmalloc(sizeof(*sess), GFP_KERNEL);
|
||||
if (!sess) {
|
||||
pr_err("failed to allocate session descriptor\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
spin_lock_init(&sess->lock);
|
||||
INIT_LIST_HEAD(&sess->login_list);
|
||||
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
|
||||
sess->guid = guid;
|
||||
|
||||
sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
|
||||
sizeof(struct sbp_target_request),
|
||||
TARGET_PROT_NORMAL, guid_str,
|
||||
sess, NULL);
|
||||
if (IS_ERR(sess->se_sess)) {
|
||||
pr_err("failed to init se_session\n");
|
||||
|
||||
ret = PTR_ERR(sess->se_sess);
|
||||
kfree(sess);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
|
||||
|
||||
se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
|
||||
if (!se_nacl) {
|
||||
pr_warn("Node ACL not found for %s\n", guid_str);
|
||||
|
||||
transport_free_session(sess->se_sess);
|
||||
kfree(sess);
|
||||
|
||||
return ERR_PTR(-EPERM);
|
||||
}
|
||||
|
||||
sess->se_sess->se_node_acl = se_nacl;
|
||||
|
||||
spin_lock_init(&sess->lock);
|
||||
INIT_LIST_HEAD(&sess->login_list);
|
||||
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
|
||||
|
||||
sess->guid = guid;
|
||||
|
||||
transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
|
||||
|
||||
return sess;
|
||||
}
|
||||
|
||||
@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work)
|
||||
STATUS_BLOCK_SBP_STATUS(
|
||||
SBP_STATUS_REQ_TYPE_NOTSUPP));
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
return;
|
||||
case 3: /* Dummy ORB */
|
||||
req->status.status |= cpu_to_be32(
|
||||
@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work)
|
||||
STATUS_BLOCK_SBP_STATUS(
|
||||
SBP_STATUS_DUMMY_ORB_COMPLETE));
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
|
||||
return active;
|
||||
}
|
||||
|
||||
static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
|
||||
struct fw_card *card, u64 next_orb)
|
||||
{
|
||||
struct se_session *se_sess = sess->se_sess;
|
||||
struct sbp_target_request *req;
|
||||
int tag;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
|
||||
if (tag < 0)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->se_cmd.map_tag = tag;
|
||||
req->se_cmd.tag = next_orb;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static void tgt_agent_fetch_work(struct work_struct *work)
|
||||
{
|
||||
struct sbp_target_agent *agent =
|
||||
@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
|
||||
u64 next_orb = agent->orb_pointer;
|
||||
|
||||
while (next_orb && tgt_agent_check_active(agent)) {
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req) {
|
||||
req = sbp_mgt_get_req(sess, sess->card, next_orb);
|
||||
if (IS_ERR(req)) {
|
||||
spin_lock_bh(&agent->lock);
|
||||
agent->state = AGENT_STATE_DEAD;
|
||||
spin_unlock_bh(&agent->lock);
|
||||
@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
|
||||
spin_unlock_bh(&agent->lock);
|
||||
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
|
||||
req->se_cmd.tag = req->orb_pointer;
|
||||
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
|
||||
req->sense_buf, unpacked_lun, data_length,
|
||||
TCM_SIMPLE_TAG, data_dir, 0))
|
||||
TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
|
||||
goto err;
|
||||
|
||||
return;
|
||||
@ -1244,7 +1245,6 @@ err:
|
||||
STATUS_BLOCK_LEN(1) |
|
||||
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
|
||||
|
||||
static int sbp_send_status(struct sbp_target_request *req)
|
||||
{
|
||||
int ret, length;
|
||||
int rc, ret = 0, length;
|
||||
struct sbp_login_descriptor *login = req->login;
|
||||
|
||||
length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
|
||||
|
||||
ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
|
||||
rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
|
||||
login->status_fifo_addr, &req->status, length);
|
||||
if (ret != RCODE_COMPLETE) {
|
||||
pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
|
||||
return -EIO;
|
||||
if (rc != RCODE_COMPLETE) {
|
||||
pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
|
||||
ret = -EIO;
|
||||
goto put_ref;
|
||||
}
|
||||
|
||||
pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
|
||||
req->orb_pointer);
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
|
||||
* ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
put_ref:
|
||||
target_put_sess_cmd(&req->se_cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sbp_sense_mangle(struct sbp_target_request *req)
|
||||
@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
|
||||
|
||||
static void sbp_free_request(struct sbp_target_request *req)
|
||||
{
|
||||
struct se_cmd *se_cmd = &req->se_cmd;
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
|
||||
kfree(req->pg_tbl);
|
||||
kfree(req->cmd_buf);
|
||||
kfree(req);
|
||||
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
||||
}
|
||||
|
||||
static void sbp_mgt_agent_process(struct work_struct *work)
|
||||
@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
|
||||
rcode = RCODE_CONFLICT_ERROR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req) {
|
||||
rcode = RCODE_CONFLICT_ERROR;
|
||||
@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
|
||||
struct sbp_target_request *req = container_of(se_cmd,
|
||||
struct sbp_target_request, se_cmd);
|
||||
|
||||
transport_generic_free_cmd(&req->se_cmd, 0);
|
||||
return 1;
|
||||
return transport_generic_free_cmd(&req->se_cmd, 0);
|
||||
}
|
||||
|
||||
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
|
||||
|
@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
||||
se_cmd->lun_ref_active = true;
|
||||
|
||||
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
|
||||
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
|
||||
deve->lun_access_ro) {
|
||||
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
|
||||
" Access for 0x%08llx\n",
|
||||
se_cmd->se_tfo->get_fabric_name(),
|
||||
@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd)
|
||||
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
|
||||
ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
|
||||
ret = deve && deve->lun_access_ro;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
@ -258,22 +258,15 @@ void core_free_device_list_for_node(
|
||||
|
||||
void core_update_device_list_access(
|
||||
u64 mapped_lun,
|
||||
u32 lun_access,
|
||||
bool lun_access_ro,
|
||||
struct se_node_acl *nacl)
|
||||
{
|
||||
struct se_dev_entry *deve;
|
||||
|
||||
mutex_lock(&nacl->lun_entry_mutex);
|
||||
deve = target_nacl_find_deve(nacl, mapped_lun);
|
||||
if (deve) {
|
||||
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
|
||||
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
} else {
|
||||
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
}
|
||||
}
|
||||
if (deve)
|
||||
deve->lun_access_ro = lun_access_ro;
|
||||
mutex_unlock(&nacl->lun_entry_mutex);
|
||||
}
|
||||
|
||||
@ -319,7 +312,7 @@ int core_enable_device_list_for_node(
|
||||
struct se_lun *lun,
|
||||
struct se_lun_acl *lun_acl,
|
||||
u64 mapped_lun,
|
||||
u32 lun_access,
|
||||
bool lun_access_ro,
|
||||
struct se_node_acl *nacl,
|
||||
struct se_portal_group *tpg)
|
||||
{
|
||||
@ -340,11 +333,7 @@ int core_enable_device_list_for_node(
|
||||
kref_init(&new->pr_kref);
|
||||
init_completion(&new->pr_comp);
|
||||
|
||||
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
|
||||
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
else
|
||||
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
|
||||
new->lun_access_ro = lun_access_ro;
|
||||
new->creation_time = get_jiffies_64();
|
||||
new->attach_count++;
|
||||
|
||||
@ -433,7 +422,7 @@ void core_disable_device_list_for_node(
|
||||
|
||||
hlist_del_rcu(&orig->link);
|
||||
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
|
||||
orig->lun_flags = 0;
|
||||
orig->lun_access_ro = false;
|
||||
orig->creation_time = 0;
|
||||
orig->attach_count--;
|
||||
/*
|
||||
@ -558,8 +547,7 @@ int core_dev_add_lun(
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = core_tpg_add_lun(tpg, lun,
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
|
||||
rc = core_tpg_add_lun(tpg, lun, false, dev);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun_acl *lacl,
|
||||
struct se_lun *lun,
|
||||
u32 lun_access)
|
||||
bool lun_access_ro)
|
||||
{
|
||||
struct se_node_acl *nacl = lacl->se_lun_nacl;
|
||||
/*
|
||||
@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl(
|
||||
if (!nacl)
|
||||
return -EINVAL;
|
||||
|
||||
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
|
||||
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
if (lun->lun_access_ro)
|
||||
lun_access_ro = true;
|
||||
|
||||
lacl->se_lun = lun;
|
||||
|
||||
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
|
||||
lun_access, nacl, tpg) < 0)
|
||||
lun_access_ro, nacl, tpg) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
|
||||
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
|
||||
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
|
||||
lun_access_ro ? "RO" : "RW",
|
||||
nacl->initiatorname);
|
||||
/*
|
||||
* Check to see if there are any existing persistent reservation APTPL
|
||||
|
@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link(
|
||||
struct se_lun_acl, se_lun_group);
|
||||
struct se_portal_group *se_tpg;
|
||||
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
|
||||
int lun_access;
|
||||
bool lun_access_ro;
|
||||
|
||||
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
|
||||
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
|
||||
@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link(
|
||||
}
|
||||
/*
|
||||
* If this struct se_node_acl was dynamically generated with
|
||||
* tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
|
||||
* which be will write protected (READ-ONLY) when
|
||||
* tpg_1/attrib/generate_node_acls=1, use the existing
|
||||
* deve->lun_access_ro value, which will be true when
|
||||
* tpg_1/attrib/demo_mode_write_protect=1
|
||||
*/
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
|
||||
if (deve)
|
||||
lun_access = deve->lun_flags;
|
||||
lun_access_ro = deve->lun_access_ro;
|
||||
else
|
||||
lun_access =
|
||||
lun_access_ro =
|
||||
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
|
||||
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
se_tpg)) ? true : false;
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* Determine the actual mapped LUN value user wants..
|
||||
@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link(
|
||||
* This value is what the SCSI Initiator actually sees the
|
||||
* $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
|
||||
*/
|
||||
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
|
||||
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
|
||||
}
|
||||
|
||||
static int target_fabric_mappedlun_unlink(
|
||||
@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show(
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
|
||||
if (deve) {
|
||||
len = sprintf(page, "%d\n",
|
||||
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
|
||||
len = sprintf(page, "%d\n", deve->lun_access_ro);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
|
||||
struct se_lun_acl *lacl = item_to_lun_acl(item);
|
||||
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
|
||||
struct se_portal_group *se_tpg = se_nacl->se_tpg;
|
||||
unsigned long op;
|
||||
unsigned long wp;
|
||||
int ret;
|
||||
|
||||
ret = kstrtoul(page, 0, &op);
|
||||
ret = kstrtoul(page, 0, &wp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((op != 1) && (op != 0))
|
||||
if ((wp != 1) && (wp != 0))
|
||||
return -EINVAL;
|
||||
|
||||
core_update_device_list_access(lacl->mapped_lun, (op) ?
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY :
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE,
|
||||
lacl->se_lun_nacl);
|
||||
/* wp=1 means lun_access_ro=true */
|
||||
core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
|
||||
|
||||
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
|
||||
" Mapped LUN: %llu Write Protect bit to %s\n",
|
||||
se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
|
||||
se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
|
||||
|
||||
return count;
|
||||
|
||||
|
@ -412,9 +412,40 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct scatterlist *sg = &cmd->t_data_sg[0];
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
if (sg->offset) {
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return TCM_OUT_OF_RESOURCES;
|
||||
sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
|
||||
dev->dev_attrib.block_size);
|
||||
}
|
||||
|
||||
ret = blkdev_issue_write_same(bdev,
|
||||
target_to_linux_sector(dev, cmd->t_task_lba),
|
||||
target_to_linux_sector(dev,
|
||||
sbc_get_write_same_sectors(cmd)),
|
||||
GFP_KERNEL, page ? page : sg_page(sg));
|
||||
if (page)
|
||||
__free_page(page);
|
||||
if (ret)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
target_complete_cmd(cmd, GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
iblock_execute_write_same(struct se_cmd *cmd)
|
||||
{
|
||||
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
|
||||
struct iblock_req *ibr;
|
||||
struct scatterlist *sg;
|
||||
struct bio *bio;
|
||||
@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
if (bdev_write_same(bdev))
|
||||
return iblock_execute_write_same_direct(bdev, cmd);
|
||||
|
||||
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
||||
if (!ibr)
|
||||
goto fail;
|
||||
|
@ -59,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
|
||||
void target_pr_kref_release(struct kref *);
|
||||
void core_free_device_list_for_node(struct se_node_acl *,
|
||||
struct se_portal_group *);
|
||||
void core_update_device_list_access(u64, u32, struct se_node_acl *);
|
||||
void core_update_device_list_access(u64, bool, struct se_node_acl *);
|
||||
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
|
||||
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
|
||||
u64, u32, struct se_node_acl *, struct se_portal_group *);
|
||||
u64, bool, struct se_node_acl *, struct se_portal_group *);
|
||||
void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
|
||||
struct se_node_acl *, struct se_portal_group *);
|
||||
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
|
||||
@ -72,7 +72,7 @@ void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
|
||||
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
|
||||
struct se_node_acl *, u64, int *);
|
||||
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
|
||||
struct se_lun_acl *, struct se_lun *lun, u32);
|
||||
struct se_lun_acl *, struct se_lun *lun, bool);
|
||||
int core_dev_del_initiator_node_lun_acl(struct se_lun *,
|
||||
struct se_lun_acl *);
|
||||
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
|
||||
@ -118,7 +118,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
|
||||
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
|
||||
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
|
||||
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
|
||||
u32, struct se_device *);
|
||||
bool, struct se_device *);
|
||||
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
|
||||
struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
|
||||
const char *initiatorname);
|
||||
|
@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
|
||||
int length = 0;
|
||||
int ret;
|
||||
int i;
|
||||
bool read_only = target_lun_is_rdonly(cmd);;
|
||||
|
||||
memset(buf, 0, SE_MODE_PAGE_BUF);
|
||||
|
||||
@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
|
||||
length = ten ? 3 : 2;
|
||||
|
||||
/* DEVICE-SPECIFIC PARAMETER */
|
||||
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
|
||||
if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
|
||||
spc_modesense_write_protect(&buf[length], type);
|
||||
|
||||
/*
|
||||
|
@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun_orig)
|
||||
{
|
||||
u32 lun_access = 0;
|
||||
bool lun_access_ro = true;
|
||||
struct se_lun *lun;
|
||||
struct se_device *dev;
|
||||
|
||||
@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs(
|
||||
* demo_mode_write_protect is ON, or READ_ONLY;
|
||||
*/
|
||||
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
lun_access_ro = false;
|
||||
} else {
|
||||
/*
|
||||
* Allow only optical drives to issue R/W in default RO
|
||||
* demo mode.
|
||||
*/
|
||||
if (dev->transport->get_device_type(dev) == TYPE_DISK)
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
lun_access_ro = true;
|
||||
else
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
lun_access_ro = false;
|
||||
}
|
||||
|
||||
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
|
||||
" access for LUN in Demo Mode\n",
|
||||
tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
|
||||
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
|
||||
"READ-WRITE" : "READ-ONLY");
|
||||
lun_access_ro ? "READ-ONLY" : "READ-WRITE");
|
||||
|
||||
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
|
||||
lun_access, acl, tpg);
|
||||
lun_access_ro, acl, tpg);
|
||||
/*
|
||||
* Check to see if there are any existing persistent reservation
|
||||
* APTPL pre-registrations that need to be enabled for this dynamic
|
||||
@ -522,7 +521,7 @@ int core_tpg_register(
|
||||
return PTR_ERR(se_tpg->tpg_virt_lun0);
|
||||
|
||||
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
|
||||
true, g_lun0_dev);
|
||||
if (ret < 0) {
|
||||
kfree(se_tpg->tpg_virt_lun0);
|
||||
return ret;
|
||||
@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun(
|
||||
int core_tpg_add_lun(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun,
|
||||
u32 lun_access,
|
||||
bool lun_access_ro,
|
||||
struct se_device *dev)
|
||||
{
|
||||
int ret;
|
||||
@ -644,9 +643,9 @@ int core_tpg_add_lun(
|
||||
spin_unlock(&dev->se_port_lock);
|
||||
|
||||
if (dev->dev_flags & DF_READ_ONLY)
|
||||
lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
lun->lun_access_ro = true;
|
||||
else
|
||||
lun->lun_access = lun_access;
|
||||
lun->lun_access_ro = lun_access_ro;
|
||||
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
|
||||
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
|
||||
mutex_unlock(&tpg->tpg_lun_mutex);
|
||||
|
@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
|
||||
struct se_session *se_sess;
|
||||
int rc;
|
||||
|
||||
if (tag_num != 0 && !tag_size) {
|
||||
pr_err("init_session_tags called with percpu-ida tag_num:"
|
||||
" %u, but zero tag_size\n", tag_num);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
if (!tag_num && tag_size) {
|
||||
pr_err("init_session_tags called with percpu-ida tag_size:"
|
||||
" %u, but zero tag_num\n", tag_size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
se_sess = transport_init_session(sup_prot_ops);
|
||||
if (IS_ERR(se_sess))
|
||||
return se_sess;
|
||||
@ -374,6 +385,51 @@ void transport_register_session(
|
||||
}
|
||||
EXPORT_SYMBOL(transport_register_session);
|
||||
|
||||
struct se_session *
|
||||
target_alloc_session(struct se_portal_group *tpg,
|
||||
unsigned int tag_num, unsigned int tag_size,
|
||||
enum target_prot_op prot_op,
|
||||
const char *initiatorname, void *private,
|
||||
int (*callback)(struct se_portal_group *,
|
||||
struct se_session *, void *))
|
||||
{
|
||||
struct se_session *sess;
|
||||
|
||||
/*
|
||||
* If the fabric driver is using percpu-ida based pre allocation
|
||||
* of I/O descriptor tags, go ahead and perform that setup now..
|
||||
*/
|
||||
if (tag_num != 0)
|
||||
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
|
||||
else
|
||||
sess = transport_init_session(prot_op);
|
||||
|
||||
if (IS_ERR(sess))
|
||||
return sess;
|
||||
|
||||
sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
|
||||
(unsigned char *)initiatorname);
|
||||
if (!sess->se_node_acl) {
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(-EACCES);
|
||||
}
|
||||
/*
|
||||
* Go ahead and perform any remaining fabric setup that is
|
||||
* required before transport_register_session().
|
||||
*/
|
||||
if (callback != NULL) {
|
||||
int rc = callback(tpg, sess, private);
|
||||
if (rc) {
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
}
|
||||
|
||||
transport_register_session(tpg, sess->se_node_acl, sess, private);
|
||||
return sess;
|
||||
}
|
||||
EXPORT_SYMBOL(target_alloc_session);
|
||||
|
||||
static void target_release_session(struct kref *kref)
|
||||
{
|
||||
struct se_session *se_sess = container_of(kref,
|
||||
@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
||||
|
||||
switch (cmd->data_direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
if (cmd->scsi_status)
|
||||
goto queue_status;
|
||||
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_data_in(cmd);
|
||||
break;
|
||||
@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
||||
}
|
||||
/* Fall through for DMA_TO_DEVICE */
|
||||
case DMA_NONE:
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
break;
|
||||
@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work)
|
||||
queue_rsp:
|
||||
switch (cmd->data_direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
if (cmd->scsi_status)
|
||||
goto queue_status;
|
||||
|
||||
atomic_long_add(cmd->data_length,
|
||||
&cmd->se_lun->lun_stats.tx_data_octets);
|
||||
/*
|
||||
@ -2111,6 +2174,7 @@ queue_rsp:
|
||||
}
|
||||
/* Fall through for DMA_TO_DEVICE */
|
||||
case DMA_NONE:
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
||||
|
||||
list_for_each_entry_safe(se_cmd, tmp_cmd,
|
||||
&se_sess->sess_wait_list, se_cmd_list) {
|
||||
list_del_init(&se_cmd->se_cmd_list);
|
||||
|
||||
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
|
||||
" %d\n", se_cmd, se_cmd->t_state,
|
||||
se_cmd->se_tfo->get_cmd_state(se_cmd));
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/uio_driver.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <net/genetlink.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
#include <scsi/scsi_proto.h>
|
||||
@ -63,8 +64,11 @@
|
||||
|
||||
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
|
||||
|
||||
#define DATA_BLOCK_BITS 256
|
||||
#define DATA_BLOCK_SIZE 4096
|
||||
|
||||
#define CMDR_SIZE (16 * 4096)
|
||||
#define DATA_SIZE (257 * 4096)
|
||||
#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
|
||||
|
||||
#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
|
||||
|
||||
@ -93,12 +97,11 @@ struct tcmu_dev {
|
||||
u32 cmdr_size;
|
||||
u32 cmdr_last_cleaned;
|
||||
/* Offset of data ring from start of mb */
|
||||
/* Must add data_off and mb_addr to get the address */
|
||||
size_t data_off;
|
||||
size_t data_size;
|
||||
/* Ring head + tail values. */
|
||||
/* Must add data_off and mb_addr to get the address */
|
||||
size_t data_head;
|
||||
size_t data_tail;
|
||||
|
||||
DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
wait_queue_head_t wait_cmdr;
|
||||
/* TODO should this be a mutex? */
|
||||
@ -122,9 +125,9 @@ struct tcmu_cmd {
|
||||
|
||||
uint16_t cmd_id;
|
||||
|
||||
/* Can't use se_cmd->data_length when cleaning up expired cmds, because if
|
||||
/* Can't use se_cmd when cleaning up expired cmds, because if
|
||||
cmd has been completed then accessing se_cmd is off limits */
|
||||
size_t data_length;
|
||||
DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
unsigned long deadline;
|
||||
|
||||
@ -168,13 +171,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
||||
|
||||
tcmu_cmd->se_cmd = se_cmd;
|
||||
tcmu_cmd->tcmu_dev = udev;
|
||||
tcmu_cmd->data_length = se_cmd->data_length;
|
||||
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
||||
tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
|
||||
}
|
||||
|
||||
tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
@ -231,105 +227,126 @@ static inline size_t head_to_end(size_t head, size_t size)
|
||||
return size - head;
|
||||
}
|
||||
|
||||
static inline void new_iov(struct iovec **iov, int *iov_cnt,
|
||||
struct tcmu_dev *udev)
|
||||
{
|
||||
struct iovec *iovec;
|
||||
|
||||
if (*iov_cnt != 0)
|
||||
(*iov)++;
|
||||
(*iov_cnt)++;
|
||||
|
||||
iovec = *iov;
|
||||
memset(iovec, 0, sizeof(struct iovec));
|
||||
}
|
||||
|
||||
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
|
||||
|
||||
/* offset is relative to mb_addr */
|
||||
static inline size_t get_block_offset(struct tcmu_dev *dev,
|
||||
int block, int remaining)
|
||||
{
|
||||
return dev->data_off + block * DATA_BLOCK_SIZE +
|
||||
DATA_BLOCK_SIZE - remaining;
|
||||
}
|
||||
|
||||
static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
|
||||
{
|
||||
return (size_t)iov->iov_base + iov->iov_len;
|
||||
}
|
||||
|
||||
static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
|
||||
struct scatterlist *data_sg, unsigned int data_nents,
|
||||
struct iovec **iov, int *iov_cnt, bool copy_data)
|
||||
{
|
||||
int i;
|
||||
int i, block;
|
||||
int block_remaining = 0;
|
||||
void *from, *to;
|
||||
size_t copy_bytes;
|
||||
size_t copy_bytes, to_offset;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(data_sg, sg, data_nents, i) {
|
||||
copy_bytes = min_t(size_t, sg->length,
|
||||
head_to_end(udev->data_head, udev->data_size));
|
||||
int sg_remaining = sg->length;
|
||||
from = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
|
||||
|
||||
if (copy_data) {
|
||||
memcpy(to, from, copy_bytes);
|
||||
tcmu_flush_dcache_range(to, copy_bytes);
|
||||
}
|
||||
|
||||
/* Even iov_base is relative to mb_addr */
|
||||
(*iov)->iov_len = copy_bytes;
|
||||
(*iov)->iov_base = (void __user *) udev->data_off +
|
||||
udev->data_head;
|
||||
(*iov_cnt)++;
|
||||
(*iov)++;
|
||||
|
||||
UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
|
||||
|
||||
/* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
|
||||
if (sg->length != copy_bytes) {
|
||||
void *from_skip = from + copy_bytes;
|
||||
|
||||
copy_bytes = sg->length - copy_bytes;
|
||||
|
||||
(*iov)->iov_len = copy_bytes;
|
||||
(*iov)->iov_base = (void __user *) udev->data_off +
|
||||
udev->data_head;
|
||||
|
||||
while (sg_remaining > 0) {
|
||||
if (block_remaining == 0) {
|
||||
block = find_first_zero_bit(udev->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
block_remaining = DATA_BLOCK_SIZE;
|
||||
set_bit(block, udev->data_bitmap);
|
||||
}
|
||||
copy_bytes = min_t(size_t, sg_remaining,
|
||||
block_remaining);
|
||||
to_offset = get_block_offset(udev, block,
|
||||
block_remaining);
|
||||
to = (void *)udev->mb_addr + to_offset;
|
||||
if (*iov_cnt != 0 &&
|
||||
to_offset == iov_tail(udev, *iov)) {
|
||||
(*iov)->iov_len += copy_bytes;
|
||||
} else {
|
||||
new_iov(iov, iov_cnt, udev);
|
||||
(*iov)->iov_base = (void __user *) to_offset;
|
||||
(*iov)->iov_len = copy_bytes;
|
||||
}
|
||||
if (copy_data) {
|
||||
to = (void *) udev->mb_addr +
|
||||
udev->data_off + udev->data_head;
|
||||
memcpy(to, from_skip, copy_bytes);
|
||||
memcpy(to, from + sg->length - sg_remaining,
|
||||
copy_bytes);
|
||||
tcmu_flush_dcache_range(to, copy_bytes);
|
||||
}
|
||||
|
||||
(*iov_cnt)++;
|
||||
(*iov)++;
|
||||
|
||||
UPDATE_HEAD(udev->data_head,
|
||||
copy_bytes, udev->data_size);
|
||||
sg_remaining -= copy_bytes;
|
||||
block_remaining -= copy_bytes;
|
||||
}
|
||||
|
||||
kunmap_atomic(from - sg->offset);
|
||||
}
|
||||
}
|
||||
|
||||
static void gather_and_free_data_area(struct tcmu_dev *udev,
|
||||
struct scatterlist *data_sg, unsigned int data_nents)
|
||||
static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
|
||||
{
|
||||
int i;
|
||||
bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
}
|
||||
|
||||
static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
|
||||
struct scatterlist *data_sg, unsigned int data_nents)
|
||||
{
|
||||
int i, block;
|
||||
int block_remaining = 0;
|
||||
void *from, *to;
|
||||
size_t copy_bytes;
|
||||
size_t copy_bytes, from_offset;
|
||||
struct scatterlist *sg;
|
||||
|
||||
/* It'd be easier to look at entry's iovec again, but UAM */
|
||||
for_each_sg(data_sg, sg, data_nents, i) {
|
||||
copy_bytes = min_t(size_t, sg->length,
|
||||
head_to_end(udev->data_tail, udev->data_size));
|
||||
|
||||
int sg_remaining = sg->length;
|
||||
to = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
WARN_ON(sg->length + sg->offset > PAGE_SIZE);
|
||||
from = (void *) udev->mb_addr +
|
||||
udev->data_off + udev->data_tail;
|
||||
tcmu_flush_dcache_range(from, copy_bytes);
|
||||
memcpy(to, from, copy_bytes);
|
||||
|
||||
UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
|
||||
|
||||
/* Uh oh, wrapped the data buffer for this sg's data */
|
||||
if (sg->length != copy_bytes) {
|
||||
void *to_skip = to + copy_bytes;
|
||||
|
||||
from = (void *) udev->mb_addr +
|
||||
udev->data_off + udev->data_tail;
|
||||
WARN_ON(udev->data_tail);
|
||||
copy_bytes = sg->length - copy_bytes;
|
||||
while (sg_remaining > 0) {
|
||||
if (block_remaining == 0) {
|
||||
block = find_first_bit(cmd_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
block_remaining = DATA_BLOCK_SIZE;
|
||||
clear_bit(block, cmd_bitmap);
|
||||
}
|
||||
copy_bytes = min_t(size_t, sg_remaining,
|
||||
block_remaining);
|
||||
from_offset = get_block_offset(udev, block,
|
||||
block_remaining);
|
||||
from = (void *) udev->mb_addr + from_offset;
|
||||
tcmu_flush_dcache_range(from, copy_bytes);
|
||||
memcpy(to_skip, from, copy_bytes);
|
||||
memcpy(to + sg->length - sg_remaining, from,
|
||||
copy_bytes);
|
||||
|
||||
UPDATE_HEAD(udev->data_tail,
|
||||
copy_bytes, udev->data_size);
|
||||
sg_remaining -= copy_bytes;
|
||||
block_remaining -= copy_bytes;
|
||||
}
|
||||
kunmap_atomic(to - sg->offset);
|
||||
}
|
||||
}
|
||||
|
||||
static inline size_t spc_bitmap_free(unsigned long *bitmap)
|
||||
{
|
||||
return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
|
||||
bitmap_weight(bitmap, DATA_BLOCK_BITS));
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't queue a command until we have space available on the cmd ring *and*
|
||||
* space available on the data ring.
|
||||
@ -339,9 +356,8 @@ static void gather_and_free_data_area(struct tcmu_dev *udev,
|
||||
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
|
||||
{
|
||||
struct tcmu_mailbox *mb = udev->mb_addr;
|
||||
size_t space;
|
||||
size_t space, cmd_needed;
|
||||
u32 cmd_head;
|
||||
size_t cmd_needed;
|
||||
|
||||
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
||||
|
||||
@ -363,10 +379,10 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
|
||||
return false;
|
||||
}
|
||||
|
||||
space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
|
||||
space = spc_bitmap_free(udev->data_bitmap);
|
||||
if (space < data_needed) {
|
||||
pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
|
||||
udev->data_tail, udev->data_size);
|
||||
pr_debug("no data space: only %zu available, but ask for %zu\n",
|
||||
space, data_needed);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -385,6 +401,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
uint32_t cmd_head;
|
||||
uint64_t cdb_off;
|
||||
bool copy_to_data_area;
|
||||
size_t data_length;
|
||||
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
|
||||
return -EINVAL;
|
||||
@ -393,12 +411,12 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
* Must be a certain minimum size for response sense info, but
|
||||
* also may be larger if the iov array is large.
|
||||
*
|
||||
* iovs = sgl_nents+1, for end-of-ring case, plus another 1
|
||||
* b/c size == offsetof one-past-element.
|
||||
* We prepare way too many iovs for potential uses here, because it's
|
||||
* expensive to tell how many regions are freed in the bitmap
|
||||
*/
|
||||
base_command_size = max(offsetof(struct tcmu_cmd_entry,
|
||||
req.iov[se_cmd->t_bidi_data_nents +
|
||||
se_cmd->t_data_nents + 2]),
|
||||
req.iov[se_cmd->t_bidi_data_nents +
|
||||
se_cmd->t_data_nents]),
|
||||
sizeof(struct tcmu_cmd_entry));
|
||||
command_size = base_command_size
|
||||
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
|
||||
@ -409,13 +427,18 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
|
||||
mb = udev->mb_addr;
|
||||
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
||||
data_length = se_cmd->data_length;
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
||||
data_length += se_cmd->t_bidi_data_sg->length;
|
||||
}
|
||||
if ((command_size > (udev->cmdr_size / 2))
|
||||
|| tcmu_cmd->data_length > (udev->data_size - 1))
|
||||
|| data_length > udev->data_size)
|
||||
pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
|
||||
"cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
|
||||
"cmd/data ring buffers\n", command_size, data_length,
|
||||
udev->cmdr_size, udev->data_size);
|
||||
|
||||
while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
|
||||
while (!is_ring_space_avail(udev, command_size, data_length)) {
|
||||
int ret;
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
@ -462,6 +485,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
entry->hdr.kflags = 0;
|
||||
entry->hdr.uflags = 0;
|
||||
|
||||
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
/*
|
||||
* Fix up iovecs, and handle if allocation in data ring wrapped.
|
||||
*/
|
||||
@ -480,6 +505,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
|
||||
entry->req.iov_bidi_cnt = iov_cnt;
|
||||
|
||||
/* cmd's data_bitmap is what changed in process */
|
||||
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
|
||||
/* All offsets relative to mb_addr, not start of entry! */
|
||||
cdb_off = CMDR_OFF + cmd_head + base_command_size;
|
||||
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
|
||||
@ -530,35 +559,42 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
|
||||
struct tcmu_dev *udev = cmd->tcmu_dev;
|
||||
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
/* cmd has been completed already from timeout, just reclaim data
|
||||
ring space */
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
/*
|
||||
* cmd has been completed already from timeout, just reclaim
|
||||
* data ring space and free cmd
|
||||
*/
|
||||
free_data_area(udev, cmd);
|
||||
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
free_data_area(udev, cmd);
|
||||
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
|
||||
cmd->se_cmd);
|
||||
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
|
||||
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
|
||||
se_cmd->scsi_sense_length);
|
||||
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
/* Discard data_out buffer */
|
||||
UPDATE_HEAD(udev->data_tail,
|
||||
(size_t)se_cmd->t_data_sg->length, udev->data_size);
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
/* Get Data-In buffer */
|
||||
gather_and_free_data_area(udev,
|
||||
/* Get Data-In buffer before clean up */
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
gather_data_area(udev, bitmap,
|
||||
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
|
||||
gather_and_free_data_area(udev,
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
gather_data_area(udev, bitmap,
|
||||
se_cmd->t_data_sg, se_cmd->t_data_nents);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction != DMA_NONE) {
|
||||
pr_warn("TCMU: data direction was %d!\n",
|
||||
se_cmd->data_direction);
|
||||
@ -894,11 +930,13 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||
|
||||
mb = udev->mb_addr;
|
||||
mb->version = TCMU_MAILBOX_VERSION;
|
||||
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
|
||||
mb->cmdr_off = CMDR_OFF;
|
||||
mb->cmdr_size = udev->cmdr_size;
|
||||
|
||||
WARN_ON(!PAGE_ALIGNED(udev->data_off));
|
||||
WARN_ON(udev->data_size % PAGE_SIZE);
|
||||
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
|
||||
|
||||
info->version = __stringify(TCMU_MAILBOX_VERSION);
|
||||
|
||||
@ -942,12 +980,12 @@ err_vzalloc:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcmu_check_pending_cmd(int id, void *p, void *data)
|
||||
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
||||
{
|
||||
struct tcmu_cmd *cmd = p;
|
||||
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -962,6 +1000,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
|
||||
static void tcmu_free_device(struct se_device *dev)
|
||||
{
|
||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||
struct tcmu_cmd *cmd;
|
||||
bool all_expired = true;
|
||||
int i;
|
||||
|
||||
del_timer_sync(&udev->timeout);
|
||||
@ -970,10 +1010,13 @@ static void tcmu_free_device(struct se_device *dev)
|
||||
|
||||
/* Upper layer should drain all requests before calling this */
|
||||
spin_lock_irq(&udev->commands_lock);
|
||||
i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
|
||||
idr_for_each_entry(&udev->commands, cmd, i) {
|
||||
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
||||
all_expired = false;
|
||||
}
|
||||
idr_destroy(&udev->commands);
|
||||
spin_unlock_irq(&udev->commands_lock);
|
||||
WARN_ON(i);
|
||||
WARN_ON(!all_expired);
|
||||
|
||||
/* Device was configured */
|
||||
if (udev->uio_info.uio_dev) {
|
||||
|
@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
|
||||
|
||||
int ft_check_stop_free(struct se_cmd *se_cmd)
|
||||
{
|
||||
transport_generic_free_cmd(se_cmd, 0);
|
||||
return 1;
|
||||
return transport_generic_free_cmd(se_cmd, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
lport->tt.exch_done(cmd->seq);
|
||||
/*
|
||||
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
|
||||
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
|
||||
/* FIXME: Add referenced task tag for ABORT_TASK */
|
||||
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
|
||||
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
|
||||
cmd, tm_func, GFP_KERNEL, 0, 0);
|
||||
cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
|
||||
if (rc < 0)
|
||||
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
|
||||
}
|
||||
@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
|
||||
pr_debug("tmr fn %d resp %d fcp code %d\n",
|
||||
tmr->function, tmr->response, code);
|
||||
ft_send_resp_code(cmd, code);
|
||||
/*
|
||||
* Drop the extra ACK_KREF reference taken by target_submit_tmr()
|
||||
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
}
|
||||
|
||||
void ft_aborted_task(struct se_cmd *se_cmd)
|
||||
@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work)
|
||||
*/
|
||||
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
|
||||
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
|
||||
ntohl(fcp->fc_dl), task_attr, data_dir, 0))
|
||||
ntohl(fcp->fc_dl), task_attr, data_dir,
|
||||
TARGET_SCF_ACK_KREF))
|
||||
goto err;
|
||||
|
||||
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
|
||||
|
@ -186,6 +186,20 @@ out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct ft_sess *sess = p;
|
||||
struct ft_tport *tport = sess->tport;
|
||||
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
|
||||
|
||||
pr_debug("port_id %x sess %p\n", sess->port_id, sess);
|
||||
hlist_add_head_rcu(&sess->hash, head);
|
||||
tport->sess_count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate session and enter it in the hash for the local port.
|
||||
* Caller holds ft_lport_lock.
|
||||
@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
|
||||
struct fc_rport_priv *rdata)
|
||||
{
|
||||
struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
|
||||
struct se_node_acl *se_acl;
|
||||
struct ft_sess *sess;
|
||||
struct hlist_head *head;
|
||||
unsigned char initiatorname[TRANSPORT_IQN_LEN];
|
||||
@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
|
||||
if (!sess)
|
||||
return NULL;
|
||||
|
||||
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
|
||||
sizeof(struct ft_cmd),
|
||||
TARGET_PROT_NORMAL);
|
||||
kref_init(&sess->kref); /* ref for table entry */
|
||||
sess->tport = tport;
|
||||
sess->port_id = port_id;
|
||||
|
||||
sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
|
||||
sizeof(struct ft_cmd),
|
||||
TARGET_PROT_NORMAL, &initiatorname[0],
|
||||
sess, ft_sess_alloc_cb);
|
||||
if (IS_ERR(sess->se_sess)) {
|
||||
kfree(sess);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
|
||||
if (!se_acl) {
|
||||
transport_free_session(sess->se_sess);
|
||||
kfree(sess);
|
||||
return NULL;
|
||||
}
|
||||
sess->se_sess->se_node_acl = se_acl;
|
||||
sess->tport = tport;
|
||||
sess->port_id = port_id;
|
||||
kref_init(&sess->kref); /* ref for table entry */
|
||||
hlist_add_head_rcu(&sess->hash, head);
|
||||
tport->sess_count++;
|
||||
|
||||
pr_debug("port_id %x sess %p\n", port_id, sess);
|
||||
|
||||
transport_register_session(&tport->tpg->se_tpg, se_acl,
|
||||
sess->se_sess, sess);
|
||||
return sess;
|
||||
}
|
||||
|
||||
|
@ -41,13 +41,6 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
|
||||
return container_of(f, struct f_uas, function);
|
||||
}
|
||||
|
||||
static void usbg_cmd_release(struct kref *);
|
||||
|
||||
static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
|
||||
{
|
||||
kref_put(&cmd->ref, usbg_cmd_release);
|
||||
}
|
||||
|
||||
/* Start bot.c code */
|
||||
|
||||
static int bot_enqueue_cmd_cbw(struct f_uas *fu)
|
||||
@ -68,7 +61,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
struct usbg_cmd *cmd = req->context;
|
||||
struct f_uas *fu = cmd->fu;
|
||||
|
||||
usbg_cleanup_cmd(cmd);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
if (req->status < 0) {
|
||||
pr_err("ERR %s(%d)\n", __func__, __LINE__);
|
||||
return;
|
||||
@ -605,7 +598,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
|
||||
break;
|
||||
|
||||
case UASP_QUEUE_COMMAND:
|
||||
usbg_cleanup_cmd(cmd);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
|
||||
break;
|
||||
|
||||
@ -615,7 +608,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
|
||||
return;
|
||||
|
||||
cleanup:
|
||||
usbg_cleanup_cmd(cmd);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
}
|
||||
|
||||
static int uasp_send_status_response(struct usbg_cmd *cmd)
|
||||
@ -977,7 +970,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
|
||||
return;
|
||||
|
||||
cleanup:
|
||||
usbg_cleanup_cmd(cmd);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
}
|
||||
|
||||
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
|
||||
@ -1046,7 +1039,7 @@ static void usbg_cmd_work(struct work_struct *work)
|
||||
struct se_cmd *se_cmd;
|
||||
struct tcm_usbg_nexus *tv_nexus;
|
||||
struct usbg_tpg *tpg;
|
||||
int dir;
|
||||
int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
|
||||
|
||||
se_cmd = &cmd->se_cmd;
|
||||
tpg = cmd->fu->tpg;
|
||||
@ -1060,9 +1053,9 @@ static void usbg_cmd_work(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
|
||||
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
|
||||
0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
|
||||
if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
|
||||
cmd->sense_iu.sense, cmd->unpacked_lun, 0,
|
||||
cmd->prio_attr, dir, flags) < 0)
|
||||
goto out;
|
||||
|
||||
return;
|
||||
@ -1070,42 +1063,64 @@ static void usbg_cmd_work(struct work_struct *work)
|
||||
out:
|
||||
transport_send_check_condition_and_sense(se_cmd,
|
||||
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
|
||||
usbg_cleanup_cmd(cmd);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
}
|
||||
|
||||
static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
|
||||
struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
|
||||
{
|
||||
struct se_session *se_sess = tv_nexus->tvn_se_sess;
|
||||
struct usbg_cmd *cmd;
|
||||
int tag;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
|
||||
if (tag < 0)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->se_cmd.map_tag = tag;
|
||||
cmd->se_cmd.tag = cmd->tag = scsi_tag;
|
||||
cmd->fu = fu;
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static void usbg_release_cmd(struct se_cmd *);
|
||||
|
||||
static int usbg_submit_command(struct f_uas *fu,
|
||||
void *cmdbuf, unsigned int len)
|
||||
{
|
||||
struct command_iu *cmd_iu = cmdbuf;
|
||||
struct usbg_cmd *cmd;
|
||||
struct usbg_tpg *tpg;
|
||||
struct tcm_usbg_nexus *tv_nexus;
|
||||
struct usbg_tpg *tpg = fu->tpg;
|
||||
struct tcm_usbg_nexus *tv_nexus = tpg->tpg_nexus;
|
||||
u32 cmd_len;
|
||||
u16 scsi_tag;
|
||||
|
||||
if (cmd_iu->iu_id != IU_ID_COMMAND) {
|
||||
pr_err("Unsupported type %d\n", cmd_iu->iu_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
tv_nexus = tpg->tpg_nexus;
|
||||
if (!tv_nexus) {
|
||||
pr_err("Missing nexus, ignoring command\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd->fu = fu;
|
||||
|
||||
/* XXX until I figure out why I can't free in on complete */
|
||||
kref_init(&cmd->ref);
|
||||
kref_get(&cmd->ref);
|
||||
|
||||
tpg = fu->tpg;
|
||||
cmd_len = (cmd_iu->len & ~0x3) + 16;
|
||||
if (cmd_len > USBG_MAX_CMD)
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
|
||||
scsi_tag = be16_to_cpup(&cmd_iu->tag);
|
||||
cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
|
||||
if (IS_ERR(cmd)) {
|
||||
pr_err("usbg_get_cmd failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
|
||||
|
||||
cmd->tag = be16_to_cpup(&cmd_iu->tag);
|
||||
cmd->se_cmd.tag = cmd->tag;
|
||||
if (fu->flags & USBG_USE_STREAMS) {
|
||||
if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
|
||||
goto err;
|
||||
@ -1117,12 +1132,6 @@ static int usbg_submit_command(struct f_uas *fu,
|
||||
cmd->stream = &fu->stream[0];
|
||||
}
|
||||
|
||||
tv_nexus = tpg->tpg_nexus;
|
||||
if (!tv_nexus) {
|
||||
pr_err("Missing nexus, ignoring command\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (cmd_iu->prio_attr & 0x7) {
|
||||
case UAS_HEAD_TAG:
|
||||
cmd->prio_attr = TCM_HEAD_TAG;
|
||||
@ -1148,7 +1157,7 @@ static int usbg_submit_command(struct f_uas *fu,
|
||||
|
||||
return 0;
|
||||
err:
|
||||
kfree(cmd);
|
||||
usbg_release_cmd(&cmd->se_cmd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1182,7 +1191,7 @@ static void bot_cmd_work(struct work_struct *work)
|
||||
out:
|
||||
transport_send_check_condition_and_sense(se_cmd,
|
||||
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
|
||||
usbg_cleanup_cmd(cmd);
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
}
|
||||
|
||||
static int bot_submit_command(struct f_uas *fu,
|
||||
@ -1190,7 +1199,7 @@ static int bot_submit_command(struct f_uas *fu,
|
||||
{
|
||||
struct bulk_cb_wrap *cbw = cmdbuf;
|
||||
struct usbg_cmd *cmd;
|
||||
struct usbg_tpg *tpg;
|
||||
struct usbg_tpg *tpg = fu->tpg;
|
||||
struct tcm_usbg_nexus *tv_nexus;
|
||||
u32 cmd_len;
|
||||
|
||||
@ -1207,28 +1216,20 @@ static int bot_submit_command(struct f_uas *fu,
|
||||
if (cmd_len < 1 || cmd_len > 16)
|
||||
return -EINVAL;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd->fu = fu;
|
||||
|
||||
/* XXX until I figure out why I can't free in on complete */
|
||||
kref_init(&cmd->ref);
|
||||
kref_get(&cmd->ref);
|
||||
|
||||
tpg = fu->tpg;
|
||||
|
||||
memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
|
||||
|
||||
cmd->bot_tag = cbw->Tag;
|
||||
|
||||
tv_nexus = tpg->tpg_nexus;
|
||||
if (!tv_nexus) {
|
||||
pr_err("Missing nexus, ignoring command\n");
|
||||
goto err;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
|
||||
if (IS_ERR(cmd)) {
|
||||
pr_err("usbg_get_cmd failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
|
||||
|
||||
cmd->bot_tag = cbw->Tag;
|
||||
cmd->prio_attr = TCM_SIMPLE_TAG;
|
||||
cmd->unpacked_lun = cbw->Lun;
|
||||
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
|
||||
@ -1239,9 +1240,6 @@ static int bot_submit_command(struct f_uas *fu,
|
||||
queue_work(tpg->workqueue, &cmd->work);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
kfree(cmd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Start fabric.c code */
|
||||
@ -1282,20 +1280,14 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void usbg_cmd_release(struct kref *ref)
|
||||
{
|
||||
struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
|
||||
ref);
|
||||
|
||||
transport_generic_free_cmd(&cmd->se_cmd, 0);
|
||||
}
|
||||
|
||||
static void usbg_release_cmd(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
|
||||
se_cmd);
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
|
||||
kfree(cmd->data_buf);
|
||||
kfree(cmd);
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
||||
}
|
||||
|
||||
static int usbg_shutdown_session(struct se_session *se_sess)
|
||||
@ -1579,55 +1571,48 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct usbg_tpg *tpg = container_of(se_tpg,
|
||||
struct usbg_tpg, se_tpg);
|
||||
|
||||
tpg->tpg_nexus = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
struct tcm_usbg_nexus *tv_nexus;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&tpg->tpg_mutex);
|
||||
if (tpg->tpg_nexus) {
|
||||
ret = -EEXIST;
|
||||
pr_debug("tpg->tpg_nexus already exists\n");
|
||||
goto err_unlock;
|
||||
goto out_unlock;
|
||||
}
|
||||
se_tpg = &tpg->se_tpg;
|
||||
|
||||
ret = -ENOMEM;
|
||||
tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
|
||||
if (!tv_nexus)
|
||||
goto err_unlock;
|
||||
tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
if (IS_ERR(tv_nexus->tvn_se_sess))
|
||||
goto err_free;
|
||||
if (!tv_nexus) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we are running in 'demo mode' this call with generate a
|
||||
* struct se_node_acl for the tcm_vhost struct se_portal_group with
|
||||
* the SCSI Initiator port name of the passed configfs group 'name'.
|
||||
*/
|
||||
tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
|
||||
se_tpg, name);
|
||||
if (!tv_nexus->tvn_se_sess->se_node_acl) {
|
||||
tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
|
||||
USB_G_DEFAULT_SESSION_TAGS,
|
||||
sizeof(struct usbg_cmd),
|
||||
TARGET_PROT_NORMAL, name,
|
||||
tv_nexus, usbg_alloc_sess_cb);
|
||||
if (IS_ERR(tv_nexus->tvn_se_sess)) {
|
||||
#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
|
||||
pr_debug(MAKE_NEXUS_MSG, name);
|
||||
#undef MAKE_NEXUS_MSG
|
||||
goto err_session;
|
||||
ret = PTR_ERR(tv_nexus->tvn_se_sess);
|
||||
kfree(tv_nexus);
|
||||
}
|
||||
/*
|
||||
* Now register the TCM vHost virtual I_T Nexus as active.
|
||||
*/
|
||||
transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
|
||||
tv_nexus->tvn_se_sess, tv_nexus);
|
||||
tpg->tpg_nexus = tv_nexus;
|
||||
mutex_unlock(&tpg->tpg_mutex);
|
||||
return 0;
|
||||
|
||||
err_session:
|
||||
transport_free_session(tv_nexus->tvn_se_sess);
|
||||
err_free:
|
||||
kfree(tv_nexus);
|
||||
err_unlock:
|
||||
out_unlock:
|
||||
mutex_unlock(&tpg->tpg_mutex);
|
||||
return ret;
|
||||
}
|
||||
@ -1735,11 +1720,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg,
|
||||
|
||||
static int usbg_check_stop_free(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
|
||||
se_cmd);
|
||||
|
||||
kref_put(&cmd->ref, usbg_cmd_release);
|
||||
return 1;
|
||||
return target_put_sess_cmd(se_cmd);
|
||||
}
|
||||
|
||||
static const struct target_core_fabric_ops usbg_ops = {
|
||||
|
@ -23,6 +23,8 @@ enum {
|
||||
#define USB_G_ALT_INT_BBB 0
|
||||
#define USB_G_ALT_INT_UAS 1
|
||||
|
||||
#define USB_G_DEFAULT_SESSION_TAGS 128
|
||||
|
||||
struct tcm_usbg_nexus {
|
||||
struct se_session *tvn_se_sess;
|
||||
};
|
||||
|
@ -1664,8 +1664,7 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
|
||||
mutex_unlock(&vhost_scsi_mutex);
|
||||
}
|
||||
|
||||
static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
|
||||
struct se_session *se_sess)
|
||||
static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
|
||||
{
|
||||
struct vhost_scsi_cmd *tv_cmd;
|
||||
unsigned int i;
|
||||
@ -1721,14 +1720,47 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct vhost_scsi_cmd *tv_cmd;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
|
||||
tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
|
||||
|
||||
tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
|
||||
VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
|
||||
if (!tv_cmd->tvc_sgl) {
|
||||
pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
|
||||
VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
|
||||
if (!tv_cmd->tvc_upages) {
|
||||
pr_err("Unable to allocate tv_cmd->tvc_upages\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
|
||||
VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
|
||||
if (!tv_cmd->tvc_prot_sgl) {
|
||||
pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
vhost_scsi_free_cmd_map_res(se_sess);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
|
||||
const char *name)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
struct se_session *se_sess;
|
||||
struct vhost_scsi_nexus *tv_nexus;
|
||||
struct vhost_scsi_cmd *tv_cmd;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&tpg->tv_tpg_mutex);
|
||||
if (tpg->tpg_nexus) {
|
||||
@ -1745,74 +1777,25 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
|
||||
return -ENOMEM;
|
||||
}
|
||||
/*
|
||||
* Initialize the struct se_session pointer and setup tagpool
|
||||
* for struct vhost_scsi_cmd descriptors
|
||||
* Since we are running in 'demo mode' this call with generate a
|
||||
* struct se_node_acl for the vhost_scsi struct se_portal_group with
|
||||
* the SCSI Initiator port name of the passed configfs group 'name'.
|
||||
*/
|
||||
tv_nexus->tvn_se_sess = transport_init_session_tags(
|
||||
tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
|
||||
VHOST_SCSI_DEFAULT_TAGS,
|
||||
sizeof(struct vhost_scsi_cmd),
|
||||
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
|
||||
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
|
||||
(unsigned char *)name, tv_nexus,
|
||||
vhost_scsi_nexus_cb);
|
||||
if (IS_ERR(tv_nexus->tvn_se_sess)) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
kfree(tv_nexus);
|
||||
return -ENOMEM;
|
||||
}
|
||||
se_sess = tv_nexus->tvn_se_sess;
|
||||
for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
|
||||
tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
|
||||
|
||||
tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
|
||||
VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
|
||||
if (!tv_cmd->tvc_sgl) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
|
||||
VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
|
||||
if (!tv_cmd->tvc_upages) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_err("Unable to allocate tv_cmd->tvc_upages\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
|
||||
VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
|
||||
if (!tv_cmd->tvc_prot_sgl) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Since we are running in 'demo mode' this call with generate a
|
||||
* struct se_node_acl for the vhost_scsi struct se_portal_group with
|
||||
* the SCSI Initiator port name of the passed configfs group 'name'.
|
||||
*/
|
||||
tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
|
||||
se_tpg, (unsigned char *)name);
|
||||
if (!tv_nexus->tvn_se_sess->se_node_acl) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_debug("core_tpg_check_initiator_node_acl() failed"
|
||||
" for %s\n", name);
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Now register the TCM vhost virtual I_T Nexus as active.
|
||||
*/
|
||||
transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
|
||||
tv_nexus->tvn_se_sess, tv_nexus);
|
||||
tpg->tpg_nexus = tv_nexus;
|
||||
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
|
||||
transport_free_session(se_sess);
|
||||
kfree(tv_nexus);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
|
||||
@ -1853,7 +1836,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
|
||||
" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
|
||||
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
|
||||
|
||||
vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
|
||||
vhost_scsi_free_cmd_map_res(se_sess);
|
||||
/*
|
||||
* Release the SCSI I_T Nexus to the emulated vhost Target Port
|
||||
*/
|
||||
|
@ -141,6 +141,8 @@ struct scsiback_tmr {
|
||||
wait_queue_head_t tmr_wait;
|
||||
};
|
||||
|
||||
#define VSCSI_DEFAULT_SESSION_TAGS 128
|
||||
|
||||
struct scsiback_nexus {
|
||||
/* Pointer to TCM session for I_T Nexus */
|
||||
struct se_session *tvn_se_sess;
|
||||
@ -190,7 +192,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
|
||||
MODULE_PARM_DESC(max_buffer_pages,
|
||||
"Maximum number of free pages to keep in backend buffer");
|
||||
|
||||
static struct kmem_cache *scsiback_cachep;
|
||||
static DEFINE_SPINLOCK(free_pages_lock);
|
||||
static int free_pages_num;
|
||||
static LIST_HEAD(scsiback_free_pages);
|
||||
@ -321,11 +322,11 @@ static void scsiback_free_translation_entry(struct kref *kref)
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
|
||||
uint32_t resid, struct vscsibk_pend *pending_req)
|
||||
static void scsiback_send_response(struct vscsibk_info *info,
|
||||
char *sense_buffer, int32_t result, uint32_t resid,
|
||||
uint16_t rqid)
|
||||
{
|
||||
struct vscsiif_response *ring_res;
|
||||
struct vscsibk_info *info = pending_req->info;
|
||||
int notify;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
unsigned long flags;
|
||||
@ -337,7 +338,7 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
|
||||
info->ring.rsp_prod_pvt++;
|
||||
|
||||
ring_res->rslt = result;
|
||||
ring_res->rqid = pending_req->rqid;
|
||||
ring_res->rqid = rqid;
|
||||
|
||||
if (sense_buffer != NULL &&
|
||||
scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
|
||||
@ -357,6 +358,13 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
|
||||
|
||||
if (notify)
|
||||
notify_remote_via_irq(info->irq);
|
||||
}
|
||||
|
||||
static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
|
||||
uint32_t resid, struct vscsibk_pend *pending_req)
|
||||
{
|
||||
scsiback_send_response(pending_req->info, sense_buffer, result,
|
||||
resid, pending_req->rqid);
|
||||
|
||||
if (pending_req->v2p)
|
||||
kref_put(&pending_req->v2p->kref,
|
||||
@ -380,6 +388,12 @@ static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
|
||||
scsiback_fast_flush_area(pending_req);
|
||||
scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
|
||||
scsiback_put(info);
|
||||
/*
|
||||
* Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
|
||||
* ahead of scsiback_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
target_put_sess_cmd(&pending_req->se_cmd);
|
||||
}
|
||||
|
||||
static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
|
||||
@ -388,16 +402,12 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
|
||||
struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
|
||||
int rc;
|
||||
|
||||
memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
|
||||
|
||||
memset(se_cmd, 0, sizeof(*se_cmd));
|
||||
|
||||
scsiback_get(pending_req->info);
|
||||
se_cmd->tag = pending_req->rqid;
|
||||
rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
|
||||
pending_req->sense_buffer, pending_req->v2p->lun,
|
||||
pending_req->data_len, 0,
|
||||
pending_req->sc_data_direction, 0,
|
||||
pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
|
||||
pending_req->sgl, pending_req->n_sg,
|
||||
NULL, 0, NULL, 0);
|
||||
if (rc < 0) {
|
||||
@ -586,45 +596,40 @@ static void scsiback_disconnect(struct vscsibk_info *info)
|
||||
static void scsiback_device_action(struct vscsibk_pend *pending_req,
|
||||
enum tcm_tmreq_table act, int tag)
|
||||
{
|
||||
int rc, err = FAILED;
|
||||
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
|
||||
struct scsiback_nexus *nexus = tpg->tpg_nexus;
|
||||
struct se_cmd *se_cmd = &pending_req->se_cmd;
|
||||
struct scsiback_tmr *tmr;
|
||||
u64 unpacked_lun = pending_req->v2p->lun;
|
||||
int rc, err = FAILED;
|
||||
|
||||
tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
|
||||
if (!tmr)
|
||||
goto out;
|
||||
if (!tmr) {
|
||||
target_put_sess_cmd(se_cmd);
|
||||
goto err;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&tmr->tmr_wait);
|
||||
|
||||
transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
|
||||
tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
|
||||
&pending_req->sense_buffer[0]);
|
||||
rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
|
||||
&pending_req->sense_buffer[0],
|
||||
unpacked_lun, tmr, act, GFP_KERNEL,
|
||||
tag, TARGET_SCF_ACK_KREF);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
se_cmd->se_tmr_req->ref_task_tag = tag;
|
||||
|
||||
if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0)
|
||||
goto out;
|
||||
|
||||
transport_generic_handle_tmr(se_cmd);
|
||||
wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
|
||||
|
||||
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
|
||||
SUCCESS : FAILED;
|
||||
|
||||
out:
|
||||
if (tmr) {
|
||||
transport_generic_free_cmd(&pending_req->se_cmd, 1);
|
||||
kfree(tmr);
|
||||
}
|
||||
|
||||
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
|
||||
|
||||
kmem_cache_free(scsiback_cachep, pending_req);
|
||||
transport_generic_free_cmd(&pending_req->se_cmd, 1);
|
||||
return;
|
||||
err:
|
||||
if (tmr)
|
||||
kfree(tmr);
|
||||
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -653,15 +658,53 @@ out:
|
||||
return entry;
|
||||
}
|
||||
|
||||
static int prepare_pending_reqs(struct vscsibk_info *info,
|
||||
struct vscsiif_request *ring_req,
|
||||
struct vscsibk_pend *pending_req)
|
||||
static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
|
||||
struct v2p_entry *v2p)
|
||||
{
|
||||
struct scsiback_tpg *tpg = v2p->tpg;
|
||||
struct scsiback_nexus *nexus = tpg->tpg_nexus;
|
||||
struct se_session *se_sess = nexus->tvn_se_sess;
|
||||
struct vscsibk_pend *req;
|
||||
int tag, i;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
|
||||
if (tag < 0) {
|
||||
pr_err("Unable to obtain tag for vscsiif_request\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->se_cmd.map_tag = tag;
|
||||
|
||||
for (i = 0; i < VSCSI_MAX_GRANTS; i++)
|
||||
req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
|
||||
struct vscsiif_back_ring *ring,
|
||||
struct vscsiif_request *ring_req)
|
||||
{
|
||||
struct vscsibk_pend *pending_req;
|
||||
struct v2p_entry *v2p;
|
||||
struct ids_tuple vir;
|
||||
|
||||
pending_req->rqid = ring_req->rqid;
|
||||
pending_req->info = info;
|
||||
/* request range check from frontend */
|
||||
if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
|
||||
(ring_req->sc_data_direction != DMA_TO_DEVICE) &&
|
||||
(ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
|
||||
(ring_req->sc_data_direction != DMA_NONE)) {
|
||||
pr_debug("invalid parameter data_dir = %d\n",
|
||||
ring_req->sc_data_direction);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
|
||||
pr_debug("invalid parameter cmd_len = %d\n",
|
||||
ring_req->cmd_len);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
vir.chn = ring_req->channel;
|
||||
vir.tgt = ring_req->id;
|
||||
@ -669,33 +712,24 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
|
||||
|
||||
v2p = scsiback_do_translation(info, &vir);
|
||||
if (!v2p) {
|
||||
pending_req->v2p = NULL;
|
||||
pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
|
||||
vir.chn, vir.tgt, vir.lun);
|
||||
return -ENODEV;
|
||||
vir.chn, vir.tgt, vir.lun);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
pending_req = scsiback_get_pend_req(ring, v2p);
|
||||
if (IS_ERR(pending_req)) {
|
||||
kref_put(&v2p->kref, scsiback_free_translation_entry);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
pending_req->rqid = ring_req->rqid;
|
||||
pending_req->info = info;
|
||||
pending_req->v2p = v2p;
|
||||
|
||||
/* request range check from frontend */
|
||||
pending_req->sc_data_direction = ring_req->sc_data_direction;
|
||||
if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
|
||||
(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
|
||||
(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
|
||||
(pending_req->sc_data_direction != DMA_NONE)) {
|
||||
pr_debug("invalid parameter data_dir = %d\n",
|
||||
pending_req->sc_data_direction);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pending_req->cmd_len = ring_req->cmd_len;
|
||||
if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
|
||||
pr_debug("invalid parameter cmd_len = %d\n",
|
||||
pending_req->cmd_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
|
||||
|
||||
return 0;
|
||||
return pending_req;
|
||||
}
|
||||
|
||||
static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
@ -704,7 +738,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
struct vscsiif_request ring_req;
|
||||
struct vscsibk_pend *pending_req;
|
||||
RING_IDX rc, rp;
|
||||
int err, more_to_do;
|
||||
int more_to_do;
|
||||
uint32_t result;
|
||||
|
||||
rc = ring->req_cons;
|
||||
@ -722,16 +756,13 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
while ((rc != rp)) {
|
||||
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
|
||||
break;
|
||||
pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
|
||||
if (!pending_req)
|
||||
return 1;
|
||||
|
||||
RING_COPY_REQUEST(ring, rc, &ring_req);
|
||||
ring->req_cons = ++rc;
|
||||
|
||||
err = prepare_pending_reqs(info, &ring_req, pending_req);
|
||||
if (err) {
|
||||
switch (err) {
|
||||
pending_req = prepare_pending_reqs(info, ring, &ring_req);
|
||||
if (IS_ERR(pending_req)) {
|
||||
switch (PTR_ERR(pending_req)) {
|
||||
case -ENODEV:
|
||||
result = DID_NO_CONNECT;
|
||||
break;
|
||||
@ -739,9 +770,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
result = DRIVER_ERROR;
|
||||
break;
|
||||
}
|
||||
scsiback_do_resp_with_sense(NULL, result << 24, 0,
|
||||
pending_req);
|
||||
kmem_cache_free(scsiback_cachep, pending_req);
|
||||
scsiback_send_response(info, NULL, result << 24, 0,
|
||||
ring_req.rqid);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -750,8 +780,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
|
||||
scsiback_fast_flush_area(pending_req);
|
||||
scsiback_do_resp_with_sense(NULL,
|
||||
DRIVER_ERROR << 24, 0, pending_req);
|
||||
kmem_cache_free(scsiback_cachep, pending_req);
|
||||
DRIVER_ERROR << 24, 0, pending_req);
|
||||
transport_generic_free_cmd(&pending_req->se_cmd, 0);
|
||||
} else {
|
||||
scsiback_cmd_exec(pending_req);
|
||||
}
|
||||
@ -765,9 +795,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
break;
|
||||
default:
|
||||
pr_err_ratelimited("invalid request\n");
|
||||
scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
|
||||
0, pending_req);
|
||||
kmem_cache_free(scsiback_cachep, pending_req);
|
||||
scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
|
||||
pending_req);
|
||||
transport_generic_free_cmd(&pending_req->se_cmd, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1353,24 +1383,20 @@ static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
|
||||
|
||||
static int scsiback_check_stop_free(struct se_cmd *se_cmd)
|
||||
{
|
||||
/*
|
||||
* Do not release struct se_cmd's containing a valid TMR pointer.
|
||||
* These will be released directly in scsiback_device_action()
|
||||
* with transport_generic_free_cmd().
|
||||
*/
|
||||
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
|
||||
return 0;
|
||||
|
||||
transport_generic_free_cmd(se_cmd, 0);
|
||||
return 1;
|
||||
return transport_generic_free_cmd(se_cmd, 0);
|
||||
}
|
||||
|
||||
static void scsiback_release_cmd(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct vscsibk_pend *pending_req = container_of(se_cmd,
|
||||
struct vscsibk_pend, se_cmd);
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
|
||||
|
||||
kmem_cache_free(scsiback_cachep, pending_req);
|
||||
if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
|
||||
struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
|
||||
kfree(tmr);
|
||||
}
|
||||
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
||||
}
|
||||
|
||||
static int scsiback_shutdown_session(struct se_session *se_sess)
|
||||
@ -1494,61 +1520,49 @@ static struct configfs_attribute *scsiback_param_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct scsiback_tpg *tpg = container_of(se_tpg,
|
||||
struct scsiback_tpg, se_tpg);
|
||||
|
||||
tpg->tpg_nexus = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsiback_make_nexus(struct scsiback_tpg *tpg,
|
||||
const char *name)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
struct se_session *se_sess;
|
||||
struct scsiback_nexus *tv_nexus;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&tpg->tv_tpg_mutex);
|
||||
if (tpg->tpg_nexus) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_debug("tpg->tpg_nexus already exists\n");
|
||||
return -EEXIST;
|
||||
ret = -EEXIST;
|
||||
goto out_unlock;
|
||||
}
|
||||
se_tpg = &tpg->se_tpg;
|
||||
|
||||
tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
|
||||
if (!tv_nexus) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
/*
|
||||
* Initialize the struct se_session pointer
|
||||
*/
|
||||
tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
|
||||
tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
|
||||
VSCSI_DEFAULT_SESSION_TAGS,
|
||||
sizeof(struct vscsibk_pend),
|
||||
TARGET_PROT_NORMAL, name,
|
||||
tv_nexus, scsiback_alloc_sess_cb);
|
||||
if (IS_ERR(tv_nexus->tvn_se_sess)) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
kfree(tv_nexus);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
se_sess = tv_nexus->tvn_se_sess;
|
||||
/*
|
||||
* Since we are running in 'demo mode' this call with generate a
|
||||
* struct se_node_acl for the scsiback struct se_portal_group with
|
||||
* the SCSI Initiator port name of the passed configfs group 'name'.
|
||||
*/
|
||||
tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
|
||||
se_tpg, (unsigned char *)name);
|
||||
if (!tv_nexus->tvn_se_sess->se_node_acl) {
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n",
|
||||
name);
|
||||
goto out;
|
||||
}
|
||||
/* Now register the TCM pvscsi virtual I_T Nexus as active. */
|
||||
transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
|
||||
tv_nexus->tvn_se_sess, tv_nexus);
|
||||
tpg->tpg_nexus = tv_nexus;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&tpg->tv_tpg_mutex);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
transport_free_session(se_sess);
|
||||
kfree(tv_nexus);
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
|
||||
@ -1866,16 +1880,6 @@ static struct xenbus_driver scsiback_driver = {
|
||||
.otherend_changed = scsiback_frontend_changed
|
||||
};
|
||||
|
||||
static void scsiback_init_pend(void *p)
|
||||
{
|
||||
struct vscsibk_pend *pend = p;
|
||||
int i;
|
||||
|
||||
memset(pend, 0, sizeof(*pend));
|
||||
for (i = 0; i < VSCSI_MAX_GRANTS; i++)
|
||||
pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
static int __init scsiback_init(void)
|
||||
{
|
||||
int ret;
|
||||
@ -1886,14 +1890,9 @@ static int __init scsiback_init(void)
|
||||
pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
|
||||
VSCSI_VERSION, utsname()->sysname, utsname()->machine);
|
||||
|
||||
scsiback_cachep = kmem_cache_create("vscsiif_cache",
|
||||
sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
|
||||
if (!scsiback_cachep)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = xenbus_register_backend(&scsiback_driver);
|
||||
if (ret)
|
||||
goto out_cache_destroy;
|
||||
goto out;
|
||||
|
||||
ret = target_register_template(&scsiback_ops);
|
||||
if (ret)
|
||||
@ -1903,8 +1902,7 @@ static int __init scsiback_init(void)
|
||||
|
||||
out_unregister_xenbus:
|
||||
xenbus_unregister_driver(&scsiback_driver);
|
||||
out_cache_destroy:
|
||||
kmem_cache_destroy(scsiback_cachep);
|
||||
out:
|
||||
pr_err("%s: error %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1920,7 +1918,6 @@ static void __exit scsiback_exit(void)
|
||||
}
|
||||
target_unregister_template(&scsiback_ops);
|
||||
xenbus_unregister_driver(&scsiback_driver);
|
||||
kmem_cache_destroy(scsiback_cachep);
|
||||
}
|
||||
|
||||
module_init(scsiback_init);
|
||||
|
@ -144,12 +144,6 @@ enum se_cmd_flags_table {
|
||||
SCF_USE_CPUID = 0x00800000,
|
||||
};
|
||||
|
||||
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
|
||||
enum transport_lunflags_table {
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
|
||||
};
|
||||
|
||||
/*
|
||||
* Used by transport_send_check_condition_and_sense()
|
||||
* to signal which ASC/ASCQ sense payload should be built.
|
||||
@ -633,11 +627,10 @@ struct se_lun_acl {
|
||||
};
|
||||
|
||||
struct se_dev_entry {
|
||||
/* See transport_lunflags_table */
|
||||
u64 mapped_lun;
|
||||
u64 pr_res_key;
|
||||
u64 creation_time;
|
||||
u32 lun_flags;
|
||||
bool lun_access_ro;
|
||||
u32 attach_count;
|
||||
atomic_long_t total_cmds;
|
||||
atomic_long_t read_bytes;
|
||||
@ -711,7 +704,7 @@ struct se_lun {
|
||||
u64 unpacked_lun;
|
||||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
||||
u32 lun_link_magic;
|
||||
u32 lun_access;
|
||||
bool lun_access_ro;
|
||||
u32 lun_index;
|
||||
|
||||
/* RELATIVE TARGET PORT IDENTIFER */
|
||||
|
@ -108,6 +108,12 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
|
||||
int target_depend_item(struct config_item *item);
|
||||
void target_undepend_item(struct config_item *item);
|
||||
|
||||
struct se_session *target_alloc_session(struct se_portal_group *,
|
||||
unsigned int, unsigned int, enum target_prot_op prot_op,
|
||||
const char *, void *,
|
||||
int (*callback)(struct se_portal_group *,
|
||||
struct se_session *, void *));
|
||||
|
||||
struct se_session *transport_init_session(enum target_prot_op);
|
||||
int transport_alloc_session_tags(struct se_session *, unsigned int,
|
||||
unsigned int);
|
||||
|
@ -41,6 +41,7 @@
|
||||
|
||||
#define TCMU_MAILBOX_VERSION 2
|
||||
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
|
||||
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
|
||||
|
||||
struct tcmu_mailbox {
|
||||
__u16 version;
|
||||
|
Loading…
Reference in New Issue
Block a user