1
0
Fork 0

scsi: smartpqi: Avoid crashing kernel for controller issues

[ Upstream commit 9e68cccc8e ]

Eliminate kernel panics when getting invalid responses from controller.
Take controller offline instead of causing kernel panics.

Link: https://lore.kernel.org/r/159622929306.30579.16523318707596752828.stgit@brunhilda
Reviewed-by: Scott Teel <scott.teel@microsemi.com>
Reviewed-by: Scott Benesh <scott.benesh@microsemi.com>
Reviewed-by: Prasad Munirathnam <Prasad.Munirathnam@microsemi.com>
Reviewed-by: Martin Wilck <mwilck@suse.com>
Signed-off-by: Kevin Barnett <kevin.barnett@microsemi.com>
Signed-off-by: Don Brace <don.brace@microsemi.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
Kevin Barnett 2020-07-31 16:01:33 -05:00 committed by Greg Kroah-Hartman
parent d00555d225
commit 36df67bd00
2 changed files with 68 additions and 35 deletions

View File

@ -357,7 +357,7 @@ struct pqi_event_response {
struct pqi_iu_header header; struct pqi_iu_header header;
u8 event_type; u8 event_type;
u8 reserved2 : 7; u8 reserved2 : 7;
u8 request_acknowlege : 1; u8 request_acknowledge : 1;
__le16 event_id; __le16 event_id;
__le32 additional_event_id; __le32 additional_event_id;
union { union {

View File

@ -527,8 +527,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be16(cdb_length, &cdb[7]); put_unaligned_be16(cdb_length, &cdb[7]);
break; break;
default: default:
dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
cmd);
break; break;
} }
@ -2450,7 +2449,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
offload_to_mirror = offload_to_mirror =
(offload_to_mirror >= layout_map_count - 1) ? (offload_to_mirror >= layout_map_count - 1) ?
0 : offload_to_mirror + 1; 0 : offload_to_mirror + 1;
WARN_ON(offload_to_mirror >= layout_map_count);
device->offload_to_mirror = offload_to_mirror; device->offload_to_mirror = offload_to_mirror;
/* /*
* Avoid direct use of device->offload_to_mirror within this * Avoid direct use of device->offload_to_mirror within this
@ -2903,10 +2901,14 @@ static int pqi_interpret_task_management_response(
return rc; return rc;
} }
static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
struct pqi_queue_group *queue_group)
{ {
unsigned int num_responses; pqi_take_ctrl_offline(ctrl_info);
}
static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
{
int num_responses;
pqi_index_t oq_pi; pqi_index_t oq_pi;
pqi_index_t oq_ci; pqi_index_t oq_ci;
struct pqi_io_request *io_request; struct pqi_io_request *io_request;
@ -2918,6 +2920,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
while (1) { while (1) {
oq_pi = readl(queue_group->oq_pi); oq_pi = readl(queue_group->oq_pi);
if (oq_pi >= ctrl_info->num_elements_per_oq) {
pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev,
"I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
return -1;
}
if (oq_pi == oq_ci) if (oq_pi == oq_ci)
break; break;
@ -2926,10 +2935,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
request_id = get_unaligned_le16(&response->request_id); request_id = get_unaligned_le16(&response->request_id);
WARN_ON(request_id >= ctrl_info->max_io_slots); if (request_id >= ctrl_info->max_io_slots) {
pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev,
"request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
return -1;
}
io_request = &ctrl_info->io_request_pool[request_id]; io_request = &ctrl_info->io_request_pool[request_id];
WARN_ON(atomic_read(&io_request->refcount) == 0); if (atomic_read(&io_request->refcount) == 0) {
pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev,
"request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
request_id, oq_pi, oq_ci);
return -1;
}
switch (response->header.iu_type) { switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
@ -2959,24 +2980,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
io_request->error_info = ctrl_info->error_buffer + io_request->error_info = ctrl_info->error_buffer +
(get_unaligned_le16(&response->error_index) * (get_unaligned_le16(&response->error_index) *
PQI_ERROR_BUFFER_ELEMENT_LENGTH); PQI_ERROR_BUFFER_ELEMENT_LENGTH);
pqi_process_io_error(response->header.iu_type, pqi_process_io_error(response->header.iu_type, io_request);
io_request);
break; break;
default: default:
pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
"unexpected IU type: 0x%x\n", "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
response->header.iu_type); response->header.iu_type, oq_pi, oq_ci);
break; return -1;
} }
io_request->io_complete_callback(io_request, io_request->io_complete_callback(io_request, io_request->context);
io_request->context);
/* /*
* Note that the I/O request structure CANNOT BE TOUCHED after * Note that the I/O request structure CANNOT BE TOUCHED after
* returning from the I/O completion callback! * returning from the I/O completion callback!
*/ */
oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
} }
@ -3289,9 +3308,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
} }
} }
static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{ {
unsigned int num_events; int num_events;
pqi_index_t oq_pi; pqi_index_t oq_pi;
pqi_index_t oq_ci; pqi_index_t oq_ci;
struct pqi_event_queue *event_queue; struct pqi_event_queue *event_queue;
@ -3305,26 +3324,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
while (1) { while (1) {
oq_pi = readl(event_queue->oq_pi); oq_pi = readl(event_queue->oq_pi);
if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev,
"event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
return -1;
}
if (oq_pi == oq_ci) if (oq_pi == oq_ci)
break; break;
num_events++; num_events++;
response = event_queue->oq_element_array + response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
(oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
event_index = event_index =
pqi_event_type_to_event_index(response->event_type); pqi_event_type_to_event_index(response->event_type);
if (event_index >= 0) { if (event_index >= 0 && response->request_acknowledge) {
if (response->request_acknowlege) { event = &ctrl_info->events[event_index];
event = &ctrl_info->events[event_index]; event->pending = true;
event->pending = true; event->event_type = response->event_type;
event->event_type = response->event_type; event->event_id = response->event_id;
event->event_id = response->event_id; event->additional_event_id = response->additional_event_id;
event->additional_event_id = if (event->event_type == PQI_EVENT_TYPE_OFA)
response->additional_event_id;
pqi_ofa_capture_event_payload(event, response); pqi_ofa_capture_event_payload(event, response);
}
} }
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@ -3439,7 +3463,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
{ {
struct pqi_ctrl_info *ctrl_info; struct pqi_ctrl_info *ctrl_info;
struct pqi_queue_group *queue_group; struct pqi_queue_group *queue_group;
unsigned int num_responses_handled; int num_io_responses_handled;
int num_events_handled;
queue_group = data; queue_group = data;
ctrl_info = queue_group->ctrl_info; ctrl_info = queue_group->ctrl_info;
@ -3447,17 +3472,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
if (!pqi_is_valid_irq(ctrl_info)) if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE; return IRQ_NONE;
num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
if (num_io_responses_handled < 0)
goto out;
if (irq == ctrl_info->event_irq) if (irq == ctrl_info->event_irq) {
num_responses_handled += pqi_process_event_intr(ctrl_info); num_events_handled = pqi_process_event_intr(ctrl_info);
if (num_events_handled < 0)
goto out;
} else {
num_events_handled = 0;
}
if (num_responses_handled) if (num_io_responses_handled + num_events_handled > 0)
atomic_inc(&ctrl_info->num_interrupts); atomic_inc(&ctrl_info->num_interrupts);
pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
out:
return IRQ_HANDLED; return IRQ_HANDLED;
} }