nvme: split nvme_trans_send_fw_cmd

This function handles two totally different opcodes, so split it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2015-05-22 11:12:40 +02:00 committed by Jens Axboe
parent e75ec752d7
commit b90c48d0c1

View file

@ -1554,10 +1554,25 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
return res; return res;
} }
/* Write Buffer Helper Functions */ static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */ u8 buffer_id)
{
struct nvme_command c;
int nvme_sc;
int res;
static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_activate_fw;
c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
return nvme_sc;
}
static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 opcode, u32 tot_len, u32 offset, u8 opcode, u32 tot_len, u32 offset,
u8 buffer_id) u8 buffer_id)
{ {
@ -1569,37 +1584,30 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
unsigned length; unsigned length;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = opcode; c.common.opcode = nvme_admin_download_fw;
if (opcode == nvme_admin_download_fw) {
if (hdr->iovec_count > 0) {
/* Assuming SGL is not allowed for this command */
res = nvme_trans_completion(hdr,
SAM_STAT_CHECK_CONDITION,
ILLEGAL_REQUEST,
SCSI_ASC_INVALID_CDB,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
goto out;
}
iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
(unsigned long)hdr->dxferp, tot_len);
if (IS_ERR(iod)) {
res = PTR_ERR(iod);
goto out;
}
length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
if (length != tot_len) {
res = -ENOMEM;
goto out_unmap;
}
c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); if (hdr->iovec_count > 0) {
c.dlfw.prp2 = cpu_to_le64(iod->first_dma); /* Assuming SGL is not allowed for this command */
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); return nvme_trans_completion(hdr,
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); SAM_STAT_CHECK_CONDITION,
} else if (opcode == nvme_admin_activate_fw) { ILLEGAL_REQUEST,
u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV; SCSI_ASC_INVALID_CDB,
c.common.cdw10[0] = cpu_to_le32(cdw10); SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
} }
iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
(unsigned long)hdr->dxferp, tot_len);
if (IS_ERR(iod))
return PTR_ERR(iod);
length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
if (length != tot_len) {
res = -ENOMEM;
goto out_unmap;
}
c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c); nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
@ -1609,11 +1617,8 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = nvme_sc; res = nvme_sc;
out_unmap: out_unmap:
if (opcode == nvme_admin_download_fw) { nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod); nvme_free_iod(dev, iod);
nvme_free_iod(dev, iod);
}
out:
return res; return res;
} }
@ -2769,7 +2774,7 @@ static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
} }
/* Attempt to activate any previously downloaded firmware image */ /* Attempt to activate any previously downloaded firmware image */
res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0); res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0);
/* Determine Block size and count and send format command */ /* Determine Block size and count and send format command */
res = nvme_trans_fmt_set_blk_size_count(ns, hdr); res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
@ -2829,24 +2834,20 @@ static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
switch (mode) { switch (mode) {
case DOWNLOAD_SAVE_ACTIVATE: case DOWNLOAD_SAVE_ACTIVATE:
res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
parm_list_length, buffer_offset, parm_list_length, buffer_offset,
buffer_id); buffer_id);
if (res != SNTI_TRANSLATION_SUCCESS) if (res != SNTI_TRANSLATION_SUCCESS)
goto out; goto out;
res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
parm_list_length, buffer_offset,
buffer_id);
break; break;
case DOWNLOAD_SAVE_DEFER_ACTIVATE: case DOWNLOAD_SAVE_DEFER_ACTIVATE:
res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
parm_list_length, buffer_offset, parm_list_length, buffer_offset,
buffer_id); buffer_id);
break; break;
case ACTIVATE_DEFERRED_MICROCODE: case ACTIVATE_DEFERRED_MICROCODE:
res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
parm_list_length, buffer_offset,
buffer_id);
break; break;
default: default:
res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,