1
0
Fork 0

SCSI fixes on 20210327

Seven fixes, all in drivers (qla2xxx, mkt3sas, qedi, target,
 ibmvscsi).  The most serious are the target pscsi oom and the qla2xxx
 revert which can otherwise cause a use after free.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYF/V2yYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pisha+AAQCSO7Wv
 Wshi7H9N8SQUJO9EcmkrSApGDtjHUfZYlnse0AD/UhbpCSzSEPzI23lfduMB3QTa
 o5wmwHEIG8ULneB57vE=
 =f44O
 -----END PGP SIGNATURE-----

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Seven fixes, all in drivers (qla2xxx, mkt3sas, qedi, target,
  ibmvscsi).

  The most serious are the target pscsi oom and the qla2xxx revert which
  can otherwise cause a use after free"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: target: pscsi: Clean up after failure in pscsi_map_sg()
  scsi: target: pscsi: Avoid OOM in pscsi_map_sg()
  scsi: mpt3sas: Fix error return code of mpt3sas_base_attach()
  scsi: qedi: Fix error return code of qedi_alloc_global_queues()
  scsi: Revert "qla2xxx: Make sure that aborted commands are freed"
  scsi: ibmvfc: Make ibmvfc_wait_for_ops() MQ aware
  scsi: ibmvfc: Fix potential race in ibmvfc_wait_for_ops()
rM2-mainline
Linus Torvalds 2021-03-28 11:34:47 -07:00
commit e8cfe8fa22
6 changed files with 74 additions and 28 deletions

View File

@ -2371,6 +2371,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
return 0; return 0;
} }
/**
* ibmvfc_event_is_free - Check if event is free or not
* @evt: ibmvfc event struct
*
* Returns:
* true / false
**/
static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
{
struct ibmvfc_event *loop_evt;
list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
if (loop_evt == evt)
return true;
return false;
}
/** /**
* ibmvfc_wait_for_ops - Wait for ops to complete * ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct * @vhost: ibmvfc host struct
@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
{ {
struct ibmvfc_event *evt; struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp); DECLARE_COMPLETION_ONSTACK(comp);
int wait; int wait, i, q_index, q_size;
unsigned long flags; unsigned long flags;
signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
struct ibmvfc_queue *queues;
ENTER; ENTER;
if (vhost->mq_enabled && vhost->using_channels) {
queues = vhost->scsi_scrqs.scrqs;
q_size = vhost->scsi_scrqs.active_queues;
} else {
queues = &vhost->crq;
q_size = 1;
}
do { do {
wait = 0; wait = 0;
spin_lock_irqsave(&vhost->crq.l_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(evt, &vhost->crq.sent, queue_list) { for (q_index = 0; q_index < q_size; q_index++) {
if (match(evt, device)) { spin_lock(&queues[q_index].l_lock);
evt->eh_comp = &comp; for (i = 0; i < queues[q_index].evt_pool.size; i++) {
wait++; evt = &queues[q_index].evt_pool.events[i];
if (!ibmvfc_event_is_free(evt)) {
if (match(evt, device)) {
evt->eh_comp = &comp;
wait++;
}
}
} }
spin_unlock(&queues[q_index].l_lock);
} }
spin_unlock_irqrestore(&vhost->crq.l_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait) { if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout); timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) { if (!timeout) {
wait = 0; wait = 0;
spin_lock_irqsave(&vhost->crq.l_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(evt, &vhost->crq.sent, queue_list) { for (q_index = 0; q_index < q_size; q_index++) {
if (match(evt, device)) { spin_lock(&queues[q_index].l_lock);
evt->eh_comp = NULL; for (i = 0; i < queues[q_index].evt_pool.size; i++) {
wait++; evt = &queues[q_index].evt_pool.events[i];
if (!ibmvfc_event_is_free(evt)) {
if (match(evt, device)) {
evt->eh_comp = NULL;
wait++;
}
}
} }
spin_unlock(&queues[q_index].l_lock);
} }
spin_unlock_irqrestore(&vhost->crq.l_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait) if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE; LEAVE;

View File

@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz++; ioc->pend_os_device_add_sz++;
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL); GFP_KERNEL);
if (!ioc->pend_os_device_add) if (!ioc->pend_os_device_add) {
r = -ENOMEM;
goto out_free_resources; goto out_free_resources;
}
ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
ioc->device_remove_in_progress = ioc->device_remove_in_progress =
kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
if (!ioc->device_remove_in_progress) if (!ioc->device_remove_in_progress) {
r = -ENOMEM;
goto out_free_resources; goto out_free_resources;
}
ioc->fwfault_debug = mpt3sas_fwfault_debug; ioc->fwfault_debug = mpt3sas_fwfault_debug;

View File

@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
if (!qedi->global_queues[i]) { if (!qedi->global_queues[i]) {
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
"Unable to allocation global queue %d.\n", i); "Unable to allocation global queue %d.\n", i);
status = -ENOMEM;
goto mem_alloc_failure; goto mem_alloc_failure;
} }

View File

@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) { (cmd->sess && cmd->sess->deleted)) {
cmd->state = QLA_TGT_STATE_PROCESSED; cmd->state = QLA_TGT_STATE_PROCESSED;
res = 0; return 0;
goto free;
} }
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt); &full_req_cnt);
if (unlikely(res != 0)) if (unlikely(res != 0)) {
goto free; return res;
}
spin_lock_irqsave(qpair->qp_lock_ptr, flags); spin_lock_irqsave(qpair->qp_lock_ptr, flags);
@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
vha->flags.online, qla2x00_reset_active(vha), vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset); cmd->reset_count, qpair->chip_reset);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
res = 0; return 0;
goto free;
} }
/* Does F/W have an IOCBs for this request */ /* Does F/W have an IOCBs for this request */
@ -3359,8 +3358,6 @@ out_unmap_unlock:
qlt_unmap_sg(vha, cmd); qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
free:
vha->hw->tgt.tgt_ops->free_cmd(cmd);
return res; return res;
} }
EXPORT_SYMBOL(qlt_xmit_response); EXPORT_SYMBOL(qlt_xmit_response);

View File

@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd); struct qla_tgt_cmd, se_cmd);
struct scsi_qla_host *vha = cmd->vha;
if (cmd->aborted) { if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->se_cmd.transport_state, cmd->se_cmd.transport_state,
cmd->se_cmd.t_state, cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags); cmd->se_cmd.se_cmd_flags);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0; return 0;
} }
@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd); struct qla_tgt_cmd, se_cmd);
struct scsi_qla_host *vha = cmd->vha;
int xmit_type = QLA_TGT_XMIT_STATUS; int xmit_type = QLA_TGT_XMIT_STATUS;
if (cmd->aborted) { if (cmd->aborted) {
@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd, kref_read(&cmd->se_cmd.cmd_kref),
cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags); cmd->se_cmd.se_cmd_flags);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0; return 0;
} }
cmd->bufflen = se_cmd->data_length; cmd->bufflen = se_cmd->data_length;

View File

@ -882,7 +882,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio) { if (!bio) {
new_bio: new_bio:
nr_vecs = bio_max_segs(nr_pages); nr_vecs = bio_max_segs(nr_pages);
nr_pages -= nr_vecs;
/* /*
* Calls bio_kmalloc() and sets bio->bi_end_io() * Calls bio_kmalloc() and sets bio->bi_end_io()
*/ */
@ -939,6 +938,14 @@ new_bio:
return 0; return 0;
fail: fail:
if (bio)
bio_put(bio);
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
bio_put(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }