Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger:
"This includes the long awaited series to address a set of bugs around
active I/O remote-port LUN_RESET, as well as properly handling this
same case with concurrent fabric driver session disconnect ->
reconnect.
Note this set of LUN_RESET bug-fixes has been surviving extended
testing on both v4.5-rc1 and v3.14.y code over the last weeks, and is
CC'ed for stable as it's something folks using multiple ESX connected
hosts with slow backends can certainly trigger.
The highlights also include:
- Fix WRITE_SAME/DISCARD emulation 4k sector conversion in
target/iblock (Mike Christie)
- Fix TMR abort interaction and AIO type TMR response in qla2xxx
target (Quinn Tran + Swapnil Nagle)
- Fix >= v3.17 stale descriptor pointer regression in qla2xxx target
(Quinn Tran)
- Fix >= v4.5-rc1 return regression with unmap_zeros_data_store new
configfs store handler (nab)
- Add CPU affinity flag + convert qla2xxx to use bit (Quinn + HCH +
Bart)"
* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
qla2xxx: use TARGET_SCF_USE_CPUID flag to indiate CPU Affinity
target/transport: add flag to indicate CPU Affinity is observed
target: Fix incorrect unmap_zeroes_data_store return
qla2xxx: Use ATIO type to send correct tmr response
qla2xxx: Fix stale pointer access.
target/user: Fix cast from pointer to phys_addr_t
target: Drop legacy se_cmd->task_stop_comp + REQUEST_STOP usage
target: Fix race with SCF_SEND_DELAYED_TAS handling
target: Fix remote-port TMR ABORT + se_cmd fabric stop
target: Fix TAS handling for multi-session se_node_acls
target: Fix LUN_RESET active TMR descriptor handling
target: Fix LUN_RESET active I/O handling for ACK_KREF
qla2xxx: Fix TMR ABORT interaction issue between qla2xxx and TCM
qla2xxx: Fix warning reported by static checker
target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52a8765..692a757 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2204,7 +2204,7 @@
/* Clear outstanding commands array. */
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
- if (!req)
+ if (!req || !test_bit(que, ha->req_qid_map))
continue;
req->out_ptr = (void *)(req->ring + req->length);
*req->out_ptr = 0;
@@ -2221,7 +2221,7 @@
for (que = 0; que < ha->max_rsp_queues; que++) {
rsp = ha->rsp_q_map[que];
- if (!rsp)
+ if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
@@ -4981,7 +4981,7 @@
for (i = 1; i < ha->max_rsp_queues; i++) {
rsp = ha->rsp_q_map[i];
- if (rsp) {
+ if (rsp && test_bit(i, ha->rsp_qid_map)) {
rsp->options &= ~BIT_0;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS)
@@ -4996,8 +4996,8 @@
}
for (i = 1; i < ha->max_req_queues; i++) {
req = ha->req_q_map[i];
- if (req) {
- /* Clear outstanding commands array. */
+ if (req && test_bit(i, ha->req_qid_map)) {
+ /* Clear outstanding commands array. */
req->options &= ~BIT_0;
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4d65eb..4af9547 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3063,9 +3063,9 @@
"MSI-X: Failed to enable support "
"-- %d/%d\n Retry with %d vectors.\n",
ha->msix_count, ret, ret);
+ ha->msix_count = ret;
+ ha->max_rsp_queues = ha->msix_count - 1;
}
- ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594..cf7ba52 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@
/* Delete request queues */
for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt];
- if (req) {
+ if (req && test_bit(cnt, ha->req_qid_map)) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@
/* Delete response queues */
for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
rsp = ha->rsp_q_map[cnt];
- if (rsp) {
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f1788db..f6c7ce3 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -409,6 +409,9 @@
int cnt;
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+ if (!test_bit(cnt, ha->req_qid_map))
+ continue;
+
req = ha->req_q_map[cnt];
qla2x00_free_req_que(ha, req);
}
@@ -416,6 +419,9 @@
ha->req_q_map = NULL;
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+ if (!test_bit(cnt, ha->rsp_qid_map))
+ continue;
+
rsp = ha->rsp_q_map[cnt];
qla2x00_free_rsp_que(ha, rsp);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8075a4c..ee967be 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -105,7 +105,7 @@
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
- *cmd, struct atio_from_isp *atio, int ha_locked);
+ *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
struct qla_tgt_srr_imm *imm, int ha_lock);
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -1756,7 +1756,7 @@
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
else {
- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
@@ -2665,7 +2665,7 @@
/* no need to terminate. FW already freed exchange. */
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
else
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return 0;
}
@@ -3173,7 +3173,8 @@
}
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
+ int ul_abort)
{
unsigned long flags = 0;
int rc;
@@ -3193,8 +3194,7 @@
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done:
- if (cmd && (!cmd->aborted ||
- !cmd->cmd_sent_to_fw)) {
+ if (cmd && !ul_abort && !cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3253,21 +3253,38 @@
}
-void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt *tgt = cmd->tgt;
struct scsi_qla_host *vha = tgt->vha;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
"qla_target(%d): terminating exchange for aborted cmd=%p "
"(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
se_cmd->tag);
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /*
+ * It's normal to see 2 calls in this path:
+ * 1) XFER Rdy completion + CMD_T_ABORT
+ * 2) TCM TMR - drain_state_list
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "multiple abort. %p transport_state %x, t_state %x,"
+ " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
+ return EIO;
+ }
cmd->aborted = 1;
cmd->cmd_flags |= BIT_6;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
+ return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3282,6 +3299,9 @@
BUG_ON(cmd->cmd_in_wq);
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(cmd->vha, cmd);
+
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);
@@ -3399,7 +3419,7 @@
term = 1;
if (term)
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3580,12 +3600,13 @@
case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE:
{
- int logged_out = (status & 0xFFFF);
+ int logged_out =
+ (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
"qla_target(%d): CTIO with %s status %x "
"received (state %x, se_cmd %p)\n", vha->vp_idx,
- (logged_out == CTIO_PORT_LOGGED_OUT) ?
- "PORT LOGGED OUT" : "PORT UNAVAILABLE",
+ logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
status, cmd->state, se_cmd);
if (logged_out && cmd->sess) {
@@ -3754,6 +3775,7 @@
goto out_term;
}
+ spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3796,7 +3818,7 @@
*/
cmd->cmd_flags |= BIT_2;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
@@ -3918,7 +3940,7 @@
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &op->atio, 1);
+ qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
@@ -3982,7 +4004,8 @@
cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0;
- cmd->se_cmd.cpuid = -1;
+ cmd->se_cmd.cpuid = ha->msix_count ?
+ ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
spin_lock(&vha->cmd_list_lock);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
@@ -3990,7 +4013,6 @@
INIT_WORK(&cmd->work, qlt_do_work);
if (ha->msix_count) {
- cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
queue_work_on(smp_processor_id(), qla_tgt_wq,
&cmd->work);
@@ -4771,7 +4793,7 @@
dump_stack();
} else {
cmd->cmd_flags |= BIT_9;
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -4950,7 +4972,7 @@
sctio, sctio->srr_id);
list_del(&sctio->srr_list_entry);
qlt_send_term_exchange(vha, sctio->cmd,
- &sctio->cmd->atio, 1);
+ &sctio->cmd->atio, 1, 0);
kfree(sctio);
}
}
@@ -5123,7 +5145,7 @@
atio->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
@@ -5406,7 +5428,7 @@
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif
if (!ha_locked)
@@ -5523,7 +5545,7 @@
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, 0);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif
} else {
if (tgt->tgt_stop) {
@@ -5532,7 +5554,7 @@
"command to target, sending TERM "
"EXCHANGE for rsp\n");
qlt_send_term_exchange(vha, NULL,
- atio, 1);
+ atio, 1, 0);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe060,
"qla_target(%d): Unable to send "
@@ -5960,7 +5982,7 @@
return;
out_term:
- qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0);
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 71b2865..22a6a76 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -943,6 +943,36 @@
qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
};
+typedef enum {
+ /*
+ * BIT_0 - Atio Arrival / schedule to work
+ * BIT_1 - qlt_do_work
+ * BIT_2 - qlt_do work failed
+ * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+ * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+ * BIT_5 - status respond / tcm_qla2xx_queue_status
+ * BIT_6 - tcm request to abort/Term exchange.
+ * pre_xmit_response->qlt_send_term_exchange
+ * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+ * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+ * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+ * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+
+ * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+ * BIT_13 - Bad completion -
+ * qlt_ctio_do_completion --> qlt_term_ctio_exchange
+ * BIT_14 - Back end data received/sent.
+ * BIT_15 - SRR prepare ctio
+ * BIT_16 - complete free
+ * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+ * BIT_18 - completion w/abort status
+ * BIT_19 - completion w/unknown status
+ * BIT_20 - tcm_qla2xxx_free_cmd
+ */
+ CMD_FLAG_DATA_WORK = BIT_11,
+ CMD_FLAG_DATA_WORK_FREE = BIT_21,
+} cmd_flags_t;
+
struct qla_tgt_cmd {
struct se_cmd se_cmd;
struct qla_tgt_sess *sess;
@@ -952,6 +982,7 @@
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ spinlock_t cmd_lock;
/* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
@@ -986,30 +1017,8 @@
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
- /* BIT_0 - Atio Arrival / schedule to work
- * BIT_1 - qlt_do_work
- * BIT_2 - qlt_do work failed
- * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
- * BIT_4 - read respond/tcm_qla2xx_queue_data_in
- * BIT_5 - status respond / tcm_qla2xx_queue_status
- * BIT_6 - tcm request to abort/Term exchange.
- * pre_xmit_response->qlt_send_term_exchange
- * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
- * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
- * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
- * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
- * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
- * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
- * BIT_13 - Bad completion -
- * qlt_ctio_do_completion --> qlt_term_ctio_exchange
- * BIT_14 - Back end data received/sent.
- * BIT_15 - SRR prepare ctio
- * BIT_16 - complete free
- * BIT_17 - flush - qlt_abort_cmd_on_host_reset
- * BIT_18 - completion w/abort status
- * BIT_19 - completion w/unknown status
- */
- uint32_t cmd_flags;
+
+ cmd_flags_t cmd_flags;
};
struct qla_tgt_sess_work_param {
@@ -1148,7 +1157,7 @@
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
-extern void qlt_abort_cmd(struct qla_tgt_cmd *);
+extern int qlt_abort_cmd(struct qla_tgt_cmd *);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7..c3e6225 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@
if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
+
+ if (!test_bit(i, vha->hw->req_qid_map))
+ continue;
+
if (req || !buf) {
length = req ?
req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@
} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+ if (!test_bit(i, vha->hw->rsp_qid_map))
+ continue;
+
if (rsp || !buf) {
length = rsp ?
rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@
if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
+
+ if (!test_bit(i, vha->hw->req_qid_map))
+ continue;
+
if (req || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@
} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+ if (!test_bit(i, vha->hw->rsp_qid_map))
+ continue;
+
if (rsp || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index faf0a12..1808a01 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -298,6 +298,10 @@
{
cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
+
+ BUG_ON(cmd->cmd_flags & BIT_20);
+ cmd->cmd_flags |= BIT_20;
+
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
@@ -374,6 +378,20 @@
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+
+ if (cmd->aborted) {
+ /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug("write_pending aborted cmd[%p] refcount %d "
+ "transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +423,7 @@
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
- 3 * HZ);
+ 50);
return 0;
}
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -444,6 +462,9 @@
if (bidi)
flags |= TARGET_SCF_BIDI_OP;
+ if (se_cmd->cpuid != WORK_CPU_UNBOUND)
+ flags |= TARGET_SCF_USE_CPUID;
+
sess = cmd->sess;
if (!sess) {
pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -465,13 +486,25 @@
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
cmd->cmd_in_wq = 0;
- cmd->cmd_flags |= BIT_11;
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
+ if (cmd->aborted) {
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
cmd->vha->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -546,6 +579,20 @@
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ if (cmd->aborted) {
+ /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug("queue_data_in aborted cmd[%p] refcount %d "
+ "transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
+
cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -637,11 +684,34 @@
qlt_xmit_tm_rsp(mcmd);
}
+
+#define DATA_WORK_NOT_FREE(_flags) \
+ (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
+ CMD_FLAG_DATA_WORK)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- qlt_abort_cmd(cmd);
+ unsigned long flags;
+
+ if (qlt_abort_cmd(cmd))
+ return;
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if ((cmd->state == QLA_TGT_STATE_NEW)||
+ ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
+ DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
+
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /* Cmd have not reached firmware.
+ * Use this trigger to free it. */
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ return;
+
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3327c49..713c63d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -898,7 +898,7 @@
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag);
- return 0;
+ return count;
}
/*
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cacd97a..da457e2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -828,6 +828,50 @@
return dev;
}
+/*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+ struct request_queue *q, int block_size)
+{
+ if (!blk_queue_discard(q))
+ return false;
+
+ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
+ block_size;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ attrib->max_unmap_block_desc_count = 1;
+ attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+ attrib->unmap_granularity_alignment = q->limits.discard_alignment /
+ block_size;
+ attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
+ return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_queue);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+ switch (dev->dev_attrib.block_size) {
+ case 4096:
+ return lb << 3;
+ case 2048:
+ return lb << 2;
+ case 1024:
+ return lb << 1;
+ default:
+ return lb;
+ }
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e319570..75f0f08 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
+
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+ fd_dev->fd_block_size))
pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
- }
+ " disabled by default\n");
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev;
+ struct se_device *dev = cmd->se_dev;
- ret = blkdev_issue_discard(bdev, lba,
- nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL, 0);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a2899f..abe4eb9 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,29 +121,11 @@
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
-
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
- dev->dev_attrib.unmap_zeroes_data =
- q->limits.discard_zeroes_data;
-
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+ dev->dev_attrib.hw_block_size))
pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
- }
+ " disabled by default\n");
+
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -415,9 +397,13 @@
iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+ struct se_device *dev = cmd->se_dev;
int ret;
- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL, 0);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -433,8 +419,10 @@
struct scatterlist *sg;
struct bio *bio;
struct bio_list list;
- sector_t block_lba = cmd->t_task_lba;
- sector_t sectors = sbc_get_write_same_sectors(cmd);
+ struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+ sector_t sectors = target_to_linux_sector(dev,
+ sbc_get_write_same_sectors(cmd));
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -648,12 +636,12 @@
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
- sector_t block_lba;
unsigned bio_cnt;
int rw = 0;
int i;
@@ -679,24 +667,6 @@
rw = READ;
}
- /*
- * Convert the blocksize advertised to the initiator to the 512 byte
- * units unconditionally used by the Linux block layer.
- */
- if (dev->dev_attrib.block_size == 4096)
- block_lba = (cmd->t_task_lba << 3);
- else if (dev->dev_attrib.block_size == 2048)
- block_lba = (cmd->t_task_lba << 2);
- else if (dev->dev_attrib.block_size == 1024)
- block_lba = (cmd->t_task_lba << 1);
- else if (dev->dev_attrib.block_size == 512)
- block_lba = cmd->t_task_lba;
- else {
- pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->dev_attrib.block_size);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c..db4412f 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -141,7 +141,6 @@
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
void transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fcdcb11..82a663b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del(&tmr->tmr_list);
+ list_del_init(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(
- struct se_node_acl *tmr_nacl,
- struct se_cmd *cmd,
- int tas)
+static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
- bool remove = true;
+ unsigned long flags;
+ bool remove = true, send_tas;
/*
* TASK ABORTED status (TAS) bit support
*/
- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ send_tas = (cmd->transport_state & CMD_T_TAS);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (send_tas) {
remove = false;
transport_send_task_abort(cmd);
}
@@ -107,6 +109,46 @@
return 1;
}
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+ struct se_session *tmr_sess, int tas)
+{
+ struct se_session *sess = se_cmd->se_sess;
+
+ assert_spin_locked(&sess->sess_cmd_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+ /*
+ * If command already reached CMD_T_COMPLETE state within
+ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+ * this se_cmd has been passed to fabric driver and will
+ * not be aborted.
+ *
+ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+ * long as se_cmd->cmd_kref is still active unless zero.
+ */
+ spin_lock(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+ pr_debug("Attempted to abort io tag: %llu already complete or"
+ " fabric stop, skipping\n", se_cmd->tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ pr_debug("Attempted to abort io tag: %llu already shutdown,"
+ " skipping\n", se_cmd->tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ se_cmd->transport_state |= CMD_T_ABORTED;
+
+ if ((tmr_sess != se_cmd->se_sess) && tas)
+ se_cmd->transport_state |= CMD_T_TAS;
+
+ spin_unlock(&se_cmd->t_state_lock);
+
+ return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
+
void core_tmr_abort_task(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@
if (tmr->ref_task_tag != ref_tag)
continue;
- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
- continue;
-
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
- spin_lock(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_COMPLETE) {
- printk("ABORT_TASK: ref_tag: %llu already complete,"
- " skipping\n", ref_tag);
- spin_unlock(&se_cmd->t_state_lock);
+ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
target_put_sess_cmd(se_cmd);
-
goto out;
}
- se_cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock(&se_cmd->t_state_lock);
-
list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- target_put_sess_cmd(se_cmd);
transport_cmd_finish_abort(se_cmd, true);
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
+ struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
+ bool rc;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
@@ -206,17 +238,39 @@
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
spin_lock(&cmd->t_state_lock);
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
continue;
}
+ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&cmd->t_state_lock);
+ rc = kref_get_unless_zero(&cmd->cmd_kref);
+ if (!rc) {
+ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ spin_unlock(&sess->sess_cmd_lock);
+
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
+ cancel_work_sync(&cmd->work);
+ transport_wait_for_tasks(cmd);
+
transport_cmd_finish_abort(cmd, 1);
+ target_put_sess_cmd(cmd);
}
}
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
+ struct se_session *tmr_sess,
int tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
+ struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
+ int rc;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@
if (prout_cmd == cmd)
continue;
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
+ rc = __target_check_io_state(cmd, tmr_sess, tas);
+ spin_unlock(&sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
}
@@ -289,7 +359,7 @@
while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
- list_del(&cmd->state_list);
+ list_del_init(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@
* loop above, but we do it down here given that
* cancel_work_sync may block.
*/
- if (cmd->t_state == TRANSPORT_COMPLETE)
- cancel_work_sync(&cmd->work);
+ cancel_work_sync(&cmd->work);
+ transport_wait_for_tasks(cmd);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- target_stop_cmd(cmd, &flags);
-
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
+ core_tmr_handle_tas_abort(cmd, tas);
+ target_put_sess_cmd(cmd);
}
}
@@ -334,6 +399,7 @@
{
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
+ struct se_session *tmr_sess = NULL;
int tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ tmr_sess = tmr->task_cmd->se_sess;
+ tmr_nacl = tmr_sess->se_node_acl;
+ tmr_tpg = tmr_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
@@ -366,7 +433,7 @@
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
+ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);
/*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9f3608e..867bc6d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -534,9 +534,6 @@
}
EXPORT_SYMBOL(transport_deregister_session);
-/*
- * Called with cmd->t_state_lock held.
- */
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -561,10 +558,6 @@
{
unsigned long flags;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (write_pending)
- cmd->t_state = TRANSPORT_WRITE_PENDING;
-
if (remove_from_lists) {
target_remove_from_state_list(cmd);
@@ -574,6 +567,10 @@
cmd->se_lun = NULL;
}
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (write_pending)
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -627,6 +624,8 @@
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
+ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
@@ -638,7 +637,7 @@
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove)
+ if (remove && ack_kref)
transport_put_cmd(cmd);
}
@@ -694,19 +693,10 @@
}
/*
- * See if we are waiting to complete for an exception condition.
- */
- if (cmd->transport_state & CMD_T_REQUEST_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->task_stop_comp);
- return;
- }
-
- /*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
*/
- if (cmd->transport_state & CMD_T_ABORTED &&
+ if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete_all(&cmd->t_transport_stop_comp);
@@ -721,10 +711,10 @@
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (cmd->cpuid == -1)
- queue_work(target_completion_wq, &cmd->work);
- else
+ if (cmd->se_cmd_flags & SCF_USE_CPUID)
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+ else
+ queue_work(target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@@ -1203,7 +1193,6 @@
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp);
- init_completion(&cmd->task_stop_comp);
spin_lock_init(&cmd->t_state_lock);
kref_init(&cmd->cmd_kref);
cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1437,6 +1426,12 @@
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
data_length, data_dir, task_attr, sense);
+
+ if (flags & TARGET_SCF_USE_CPUID)
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
+ else
+ se_cmd->cpuid = WORK_CPU_UNBOUND;
+
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1;
/*
@@ -1635,33 +1630,6 @@
EXPORT_SYMBOL(target_submit_tmr);
/*
- * If the cmd is active, request it to be stopped and sleep until it
- * has completed.
- */
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
- __releases(&cmd->t_state_lock)
- __acquires(&cmd->t_state_lock)
-{
- bool was_active = false;
-
- if (cmd->transport_state & CMD_T_BUSY) {
- cmd->transport_state |= CMD_T_REQUEST_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
- pr_debug("cmd %p waiting to complete\n", cmd);
- wait_for_completion(&cmd->task_stop_comp);
- pr_debug("cmd %p stopped successfully\n", cmd);
-
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
- cmd->transport_state &= ~CMD_T_REQUEST_STOP;
- cmd->transport_state &= ~CMD_T_BUSY;
- was_active = true;
- }
-
- return was_active;
-}
-
-/*
* Handle SAM-esque emulation for generic transport request failures.
*/
void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1859,19 +1827,21 @@
return true;
}
+static int __transport_check_aborted_status(struct se_cmd *, int);
+
void target_execute_cmd(struct se_cmd *cmd)
{
/*
- * If the received CDB has aleady been aborted stop processing it here.
- */
- if (transport_check_aborted_status(cmd, 1))
- return;
-
- /*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
+ *
+ * If the received CDB has aleady been aborted stop processing it here.
*/
spin_lock_irq(&cmd->t_state_lock);
+ if (__transport_check_aborted_status(cmd, 1)) {
+ spin_unlock_irq(&cmd->t_state_lock);
+ return;
+ }
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
@@ -2222,28 +2192,6 @@
}
/**
- * transport_release_cmd - free a command
- * @cmd: command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-static int transport_release_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
-
- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- core_tmr_release_req(cmd->se_tmr_req);
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
- kfree(cmd->t_task_cdb);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
-/**
* transport_put_cmd - release a reference to a command
* @cmd: command to release
*
@@ -2251,8 +2199,12 @@
*/
static int transport_put_cmd(struct se_cmd *cmd)
{
- transport_free_pages(cmd);
- return transport_release_cmd(cmd);
+ BUG_ON(!cmd->se_tfo);
+ /*
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+ */
+ return target_put_sess_cmd(cmd);
}
void *transport_kmap_data_sg(struct se_cmd *cmd)
@@ -2450,34 +2402,58 @@
}
}
-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+ unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
{
unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
int ret = 0;
+ bool aborted = false, tas = false;
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- transport_wait_for_tasks(cmd);
+ target_wait_free_cmd(cmd, &aborted, &tas);
- ret = transport_release_cmd(cmd);
+ if (!aborted || tas)
+ ret = transport_put_cmd(cmd);
} else {
if (wait_for_tasks)
- transport_wait_for_tasks(cmd);
+ target_wait_free_cmd(cmd, &aborted, &tas);
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
* failed command before I/O submission.
*/
- if (cmd->state_active) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->state_active)
target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- ret = transport_put_cmd(cmd);
+ if (!aborted || tas)
+ ret = transport_put_cmd(cmd);
+ }
+ /*
+ * If the task has been internally aborted due to TMR ABORT_TASK
+ * or LUN_RESET, target_core_tmr.c is responsible for performing
+ * the remaining calls to target_put_sess_cmd(), and not the
+ * callers of this function.
+ */
+ if (aborted) {
+ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+ wait_for_completion(&cmd->cmd_wait_comp);
+ cmd->se_tfo->release_cmd(cmd);
+ ret = 1;
}
return ret;
}
@@ -2517,26 +2493,46 @@
}
EXPORT_SYMBOL(target_get_sess_cmd);
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+ transport_free_pages(cmd);
+
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
+}
+
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
+ bool fabric_stop;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+
+ spin_lock(&se_cmd->t_state_lock);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+ spin_unlock(&se_cmd->t_state_lock);
+
+ if (se_cmd->cmd_wait_set || fabric_stop) {
+ list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
complete(&se_cmd->cmd_wait_comp);
return;
}
- list_del(&se_cmd->se_cmd_list);
+ list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
}
@@ -2548,6 +2544,7 @@
struct se_session *se_sess = se_cmd->se_sess;
if (!se_sess) {
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
return 1;
}
@@ -2564,6 +2561,7 @@
{
struct se_cmd *se_cmd;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2573,8 +2571,15 @@
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
- se_cmd->cmd_wait_set = 1;
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
+ if (rc) {
+ se_cmd->cmd_wait_set = 1;
+ spin_lock(&se_cmd->t_state_lock);
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ spin_unlock(&se_cmd->t_state_lock);
+ }
+ }
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@@ -2587,15 +2592,25 @@
{
struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
+ bool tas;
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
- list_del(&se_cmd->se_cmd_list);
+ list_del_init(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ tas = (se_cmd->transport_state & CMD_T_TAS);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ if (!target_put_sess_cmd(se_cmd)) {
+ if (tas)
+ target_put_sess_cmd(se_cmd);
+ }
+
wait_for_completion(&se_cmd->cmd_wait_comp);
pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
" fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2617,6 +2632,58 @@
wait_for_completion(&lun->lun_ref_comp);
}
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+ bool *aborted, bool *tas, unsigned long *flags)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
+{
+
+ assert_spin_locked(&cmd->t_state_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (fabric_stop)
+ cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+ if (cmd->transport_state & CMD_T_ABORTED)
+ *aborted = true;
+
+ if (cmd->transport_state & CMD_T_TAS)
+ *tas = true;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+
+ if (!(cmd->transport_state & CMD_T_ACTIVE))
+ return false;
+
+ if (fabric_stop && *aborted)
+ return false;
+
+ cmd->transport_state |= CMD_T_STOP;
+
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
+ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ wait_for_completion(&cmd->t_transport_stop_comp);
+
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
+
+ return true;
+}
+
/**
* transport_wait_for_tasks - wait for completion to occur
* @cmd: command to wait
@@ -2627,43 +2694,13 @@
bool transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
+ bool ret, aborted = false, tas = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-
- if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-
- cmd->transport_state |= CMD_T_STOP;
-
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
-
+ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wait_for_completion(&cmd->t_transport_stop_comp);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
-
- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
- cmd->tag);
-
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- return true;
+ return ret;
}
EXPORT_SYMBOL(transport_wait_for_tasks);
@@ -2845,28 +2882,49 @@
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
+ assert_spin_locked(&cmd->t_state_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+
if (!(cmd->transport_state & CMD_T_ABORTED))
return 0;
-
/*
* If cmd has been aborted but either no status is to be sent or it has
* already been sent, just return
*/
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
+ if (send_status)
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
return 1;
+ }
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
- cmd->t_task_cdb[0], cmd->tag);
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
+ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
+
+ spin_unlock_irq(&cmd->t_state_lock);
cmd->se_tfo->queue_status(cmd);
+ spin_lock_irq(&cmd->t_state_lock);
return 1;
}
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+ int ret;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ ret = __transport_check_aborted_status(cmd, send_status);
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(transport_check_aborted_status);
void transport_send_task_abort(struct se_cmd *cmd)
@@ -2888,11 +2946,17 @@
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
- cmd->transport_state |= CMD_T_ABORTED;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto send_abort;
+ }
cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
}
+send_abort:
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_lun_remove_cmd(cmd);
@@ -2909,8 +2973,17 @@
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ tmr->response = TMR_FUNCTION_REJECTED;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto check_stop;
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
switch (tmr->function) {
case TMR_ABORT_TASK:
core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2943,9 +3016,17 @@
break;
}
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto check_stop;
+ }
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
cmd->se_tfo->queue_tm_rsp(cmd);
+check_stop:
transport_cmd_check_stop_to_fabric(cmd);
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dd600e5..94f5154 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -903,7 +903,7 @@
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
- info->mem[0].addr = (phys_addr_t) udev->mb_addr;
+ info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
info->mem[0].size = TCMU_RING_SIZE;
info->mem[0].memtype = UIO_MEM_VIRTUAL;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 56cf8e4..28ee5c2 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -94,5 +94,8 @@
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
bool target_sense_desc_format(struct se_device *dev);
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+ struct request_queue *q, int block_size);
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5d82816..e8c8c08 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -140,6 +140,8 @@
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
+ SCF_ACK_KREF = 0x00400000,
+ SCF_USE_CPUID = 0x00800000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -187,6 +189,7 @@
TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
+ TARGET_SCF_USE_CPUID = 0x08,
};
/* fabric independent task management function values */
@@ -490,8 +493,9 @@
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
#define CMD_T_DEV_ACTIVE (1 << 7)
-#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
+#define CMD_T_TAS (1 << 10)
+#define CMD_T_FABRIC_STOP (1 << 11)
spinlock_t t_state_lock;
struct kref cmd_kref;
struct completion t_transport_stop_comp;
@@ -511,9 +515,6 @@
struct list_head state_list;
- /* old task stop completion, consider merging with some of the above */
- struct completion task_stop_comp;
-
/* backend private data */
void *priv;