1
0
Fork 0

target: Fix LUN_RESET active TMR descriptor handling

This patch fixes a NULL pointer se_cmd->cmd_kref < 0
refcount bug during TMR LUN_RESET with active TMRs,
triggered during se_cmd + se_tmr_req descriptor
shutdown + release via core_tmr_drain_tmr_list().

To address this bug, go ahead and obtain a local
kref_get_unless_zero(&se_cmd->cmd_kref) for active I/O
to set CMD_T_ABORTED, and transport_wait_for_tasks()
followed by the final target_put_sess_cmd() to drop
the local ->cmd_kref.

Also add two new checks within target_tmr_work() to
avoid CMD_T_ABORTED -> TFO->queue_tm_rsp() callbacks
ahead of invoking the backend -> fabric put in
transport_cmd_check_stop_to_fabric().

For good measure, also change core_tmr_release_req()
to use list_del_init() ahead of se_tmr_req memory
free.

Reviewed-by: Quinn Tran <quinn.tran@qlogic.com>
Cc: Himanshu Madhani <himanshu.madhani@qlogic.com>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Andy Grover <agrover@redhat.com>
Cc: Mike Christie <mchristi@redhat.com>
Cc: stable@vger.kernel.org # 3.10+
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
hifive-unleashed-5.1
Nicholas Bellinger 2016-01-11 21:53:05 -08:00
parent febe562c20
commit a6d9bb1c96
2 changed files with 38 additions and 1 deletions

View File

@ -68,7 +68,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_del(&tmr->tmr_list);
list_del_init(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
@ -194,9 +194,11 @@ static void core_tmr_drain_tmr_list(
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
bool rc;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
@ -222,17 +224,31 @@ static void core_tmr_drain_tmr_list(
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
spin_lock(&sess->sess_cmd_lock);
spin_lock(&cmd->t_state_lock);
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
}
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&cmd->t_state_lock);
rc = kref_get_unless_zero(&cmd->cmd_kref);
spin_unlock(&sess->sess_cmd_lock);
if (!rc) {
printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
continue;
}
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@ -246,7 +262,11 @@ static void core_tmr_drain_tmr_list(
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
transport_cmd_finish_abort(cmd, 1);
target_put_sess_cmd(cmd);
}
}

View File

@ -2900,8 +2900,17 @@ static void target_tmr_work(struct work_struct *work)
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_ABORTED) {
tmr->response = TMR_FUNCTION_REJECTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_stop;
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
switch (tmr->function) {
case TMR_ABORT_TASK:
core_tmr_abort_task(dev, tmr, cmd->se_sess);
@ -2934,9 +2943,17 @@ static void target_tmr_work(struct work_struct *work)
break;
}
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_ABORTED) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_stop;
}
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->se_tfo->queue_tm_rsp(cmd);
check_stop:
transport_cmd_check_stop_to_fabric(cmd);
}