Merge branch 'nvmf-4.8-rc' of git://git.infradead.org/nvme-fabrics into for-linus
Sagi writes: Mostly stability fixes and cleanups: - NQN endianess fix from Daniel - possible use-after-free fix from Vincent - nvme-rdma connect semantics fixes from Jay - Remove redundant variables in rdma driver - Kbuild fix from Christoph - nvmf_host referencing fix from Christoph - uninit variable fix from Colinsteinar/wifi_calib_4_9_kernel
commit
d8d8d9d789
|
@ -31,7 +31,7 @@ config NVME_FABRICS
|
||||||
config NVME_RDMA
|
config NVME_RDMA
|
||||||
tristate "NVM Express over Fabrics RDMA host driver"
|
tristate "NVM Express over Fabrics RDMA host driver"
|
||||||
depends on INFINIBAND
|
depends on INFINIBAND
|
||||||
depends on BLK_DEV_NVME
|
select NVME_CORE
|
||||||
select NVME_FABRICS
|
select NVME_FABRICS
|
||||||
select SG_POOL
|
select SG_POOL
|
||||||
help
|
help
|
||||||
|
|
|
@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
|
||||||
|
|
||||||
mutex_lock(&nvmf_hosts_mutex);
|
mutex_lock(&nvmf_hosts_mutex);
|
||||||
host = __nvmf_host_find(hostnqn);
|
host = __nvmf_host_find(hostnqn);
|
||||||
if (host)
|
if (host) {
|
||||||
|
kref_get(&host->ref);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
host = kmalloc(sizeof(*host), GFP_KERNEL);
|
host = kmalloc(sizeof(*host), GFP_KERNEL);
|
||||||
if (!host)
|
if (!host)
|
||||||
|
@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
|
||||||
|
|
||||||
kref_init(&host->ref);
|
kref_init(&host->ref);
|
||||||
memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
|
memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
|
||||||
uuid_le_gen(&host->id);
|
uuid_be_gen(&host->id);
|
||||||
|
|
||||||
list_add_tail(&host->list, &nvmf_hosts);
|
list_add_tail(&host->list, &nvmf_hosts);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
kref_init(&host->ref);
|
kref_init(&host->ref);
|
||||||
uuid_le_gen(&host->id);
|
uuid_be_gen(&host->id);
|
||||||
snprintf(host->nqn, NVMF_NQN_SIZE,
|
snprintf(host->nqn, NVMF_NQN_SIZE,
|
||||||
"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
|
"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
|
||||||
|
|
||||||
mutex_lock(&nvmf_hosts_mutex);
|
mutex_lock(&nvmf_hosts_mutex);
|
||||||
list_add_tail(&host->list, &nvmf_hosts);
|
list_add_tail(&host->list, &nvmf_hosts);
|
||||||
|
@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||||
cmd.connect.opcode = nvme_fabrics_command;
|
cmd.connect.opcode = nvme_fabrics_command;
|
||||||
cmd.connect.fctype = nvme_fabrics_type_connect;
|
cmd.connect.fctype = nvme_fabrics_type_connect;
|
||||||
cmd.connect.qid = 0;
|
cmd.connect.qid = 0;
|
||||||
cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
|
|
||||||
|
/*
|
||||||
|
* fabrics spec sets a minimum of depth 32 for admin queue,
|
||||||
|
* so set the queue with this depth always until
|
||||||
|
* justification otherwise.
|
||||||
|
*/
|
||||||
|
cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set keep-alive timeout in seconds granularity (ms * 1000)
|
* Set keep-alive timeout in seconds granularity (ms * 1000)
|
||||||
* and add a grace period for controller kato enforcement
|
* and add a grace period for controller kato enforcement
|
||||||
|
@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
|
memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
|
||||||
data->cntlid = cpu_to_le16(0xffff);
|
data->cntlid = cpu_to_le16(0xffff);
|
||||||
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
||||||
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
||||||
|
@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
|
memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
|
||||||
data->cntlid = cpu_to_le16(ctrl->cntlid);
|
data->cntlid = cpu_to_le16(ctrl->cntlid);
|
||||||
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
|
||||||
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
|
||||||
|
|
|
@ -34,7 +34,7 @@ struct nvmf_host {
|
||||||
struct kref ref;
|
struct kref ref;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
char nqn[NVMF_NQN_SIZE];
|
char nqn[NVMF_NQN_SIZE];
|
||||||
uuid_le id;
|
uuid_be id;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -43,10 +43,6 @@
|
||||||
|
|
||||||
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
|
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
|
||||||
|
|
||||||
#define NVME_RDMA_MAX_PAGES_PER_MR 512
|
|
||||||
|
|
||||||
#define NVME_RDMA_DEF_RECONNECT_DELAY 20
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We handle AEN commands ourselves and don't even let the
|
* We handle AEN commands ourselves and don't even let the
|
||||||
* block layer know about them.
|
* block layer know about them.
|
||||||
|
@ -77,7 +73,6 @@ struct nvme_rdma_request {
|
||||||
u32 num_sge;
|
u32 num_sge;
|
||||||
int nents;
|
int nents;
|
||||||
bool inline_data;
|
bool inline_data;
|
||||||
bool need_inval;
|
|
||||||
struct ib_reg_wr reg_wr;
|
struct ib_reg_wr reg_wr;
|
||||||
struct ib_cqe reg_cqe;
|
struct ib_cqe reg_cqe;
|
||||||
struct nvme_rdma_queue *queue;
|
struct nvme_rdma_queue *queue;
|
||||||
|
@ -286,7 +281,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
|
||||||
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!req->need_inval)
|
if (!req->mr->need_inval)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ib_dereg_mr(req->mr);
|
ib_dereg_mr(req->mr);
|
||||||
|
@ -298,7 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
|
||||||
req->mr = NULL;
|
req->mr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->need_inval = false;
|
req->mr->need_inval = false;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -645,7 +640,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++) {
|
for (i = 1; i < ctrl->queue_count; i++) {
|
||||||
ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
|
ret = nvme_rdma_init_queue(ctrl, i,
|
||||||
|
ctrl->ctrl.opts->queue_size);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_info(ctrl->ctrl.device,
|
dev_info(ctrl->ctrl.device,
|
||||||
"failed to initialize i/o queue: %d\n", ret);
|
"failed to initialize i/o queue: %d\n", ret);
|
||||||
|
@ -849,7 +845,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
|
||||||
if (!blk_rq_bytes(rq))
|
if (!blk_rq_bytes(rq))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (req->need_inval) {
|
if (req->mr->need_inval) {
|
||||||
res = nvme_rdma_inv_rkey(queue, req);
|
res = nvme_rdma_inv_rkey(queue, req);
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
dev_err(ctrl->ctrl.device,
|
dev_err(ctrl->ctrl.device,
|
||||||
|
@ -935,7 +931,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
|
||||||
IB_ACCESS_REMOTE_READ |
|
IB_ACCESS_REMOTE_READ |
|
||||||
IB_ACCESS_REMOTE_WRITE;
|
IB_ACCESS_REMOTE_WRITE;
|
||||||
|
|
||||||
req->need_inval = true;
|
req->mr->need_inval = true;
|
||||||
|
|
||||||
sg->addr = cpu_to_le64(req->mr->iova);
|
sg->addr = cpu_to_le64(req->mr->iova);
|
||||||
put_unaligned_le24(req->mr->length, sg->length);
|
put_unaligned_le24(req->mr->length, sg->length);
|
||||||
|
@ -958,7 +954,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
||||||
|
|
||||||
req->num_sge = 1;
|
req->num_sge = 1;
|
||||||
req->inline_data = false;
|
req->inline_data = false;
|
||||||
req->need_inval = false;
|
req->mr->need_inval = false;
|
||||||
|
|
||||||
c->common.flags |= NVME_CMD_SGL_METABUF;
|
c->common.flags |= NVME_CMD_SGL_METABUF;
|
||||||
|
|
||||||
|
@ -1145,7 +1141,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
|
||||||
|
|
||||||
if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
|
if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
|
||||||
wc->ex.invalidate_rkey == req->mr->rkey)
|
wc->ex.invalidate_rkey == req->mr->rkey)
|
||||||
req->need_inval = false;
|
req->mr->need_inval = false;
|
||||||
|
|
||||||
blk_mq_complete_request(rq, status);
|
blk_mq_complete_request(rq, status);
|
||||||
|
|
||||||
|
@ -1278,8 +1274,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
|
||||||
|
|
||||||
priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
|
priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
|
||||||
priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
|
priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
|
||||||
priv.hrqsize = cpu_to_le16(queue->queue_size);
|
/*
|
||||||
priv.hsqsize = cpu_to_le16(queue->queue_size);
|
* set the admin queue depth to the minimum size
|
||||||
|
* specified by the Fabrics standard.
|
||||||
|
*/
|
||||||
|
if (priv.qid == 0) {
|
||||||
|
priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
|
||||||
|
priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* current interpretation of the fabrics spec
|
||||||
|
* is at minimum you make hrqsize sqsize+1, or a
|
||||||
|
* 1's based representation of sqsize.
|
||||||
|
*/
|
||||||
|
priv.hrqsize = cpu_to_le16(queue->queue_size);
|
||||||
|
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
|
||||||
|
}
|
||||||
|
|
||||||
ret = rdma_connect(queue->cm_id, ¶m);
|
ret = rdma_connect(queue->cm_id, ¶m);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1319,7 +1329,7 @@ out_destroy_queue_ib:
|
||||||
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
|
static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
/* Own the controller deletion */
|
/* Own the controller deletion */
|
||||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
|
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
|
||||||
|
@ -1461,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
|
if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
|
||||||
flush = true;
|
flush = true;
|
||||||
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
||||||
req->need_inval ? &req->reg_wr.wr : NULL, flush);
|
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nvme_rdma_unmap_data(queue, rq);
|
nvme_rdma_unmap_data(queue, rq);
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1816,7 +1826,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||||
|
|
||||||
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
||||||
ctrl->tag_set.ops = &nvme_rdma_mq_ops;
|
ctrl->tag_set.ops = &nvme_rdma_mq_ops;
|
||||||
ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
|
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
|
||||||
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
|
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
|
||||||
ctrl->tag_set.numa_node = NUMA_NO_NODE;
|
ctrl->tag_set.numa_node = NUMA_NO_NODE;
|
||||||
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||||
|
@ -1914,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
||||||
spin_lock_init(&ctrl->lock);
|
spin_lock_init(&ctrl->lock);
|
||||||
|
|
||||||
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
|
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
|
||||||
ctrl->ctrl.sqsize = opts->queue_size;
|
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
||||||
ctrl->ctrl.kato = opts->kato;
|
ctrl->ctrl.kato = opts->kato;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
|
@ -15,8 +15,8 @@ config NVME_TARGET
|
||||||
|
|
||||||
config NVME_TARGET_LOOP
|
config NVME_TARGET_LOOP
|
||||||
tristate "NVMe loopback device support"
|
tristate "NVMe loopback device support"
|
||||||
depends on BLK_DEV_NVME
|
|
||||||
depends on NVME_TARGET
|
depends on NVME_TARGET
|
||||||
|
select NVME_CORE
|
||||||
select NVME_FABRICS
|
select NVME_FABRICS
|
||||||
select SG_POOL
|
select SG_POOL
|
||||||
help
|
help
|
||||||
|
|
|
@ -556,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||||
|
|
||||||
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
|
||||||
ctrl->tag_set.ops = &nvme_loop_mq_ops;
|
ctrl->tag_set.ops = &nvme_loop_mq_ops;
|
||||||
ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
|
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
|
||||||
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
|
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
|
||||||
ctrl->tag_set.numa_node = NUMA_NO_NODE;
|
ctrl->tag_set.numa_node = NUMA_NO_NODE;
|
||||||
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||||
|
@ -620,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
||||||
ctrl->ctrl.sqsize = opts->queue_size;
|
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
||||||
ctrl->ctrl.kato = opts->kato;
|
ctrl->ctrl.kato = opts->kato;
|
||||||
|
|
||||||
ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
|
ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
|
||||||
|
|
|
@ -978,10 +978,11 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
|
||||||
container_of(w, struct nvmet_rdma_queue, release_work);
|
container_of(w, struct nvmet_rdma_queue, release_work);
|
||||||
struct rdma_cm_id *cm_id = queue->cm_id;
|
struct rdma_cm_id *cm_id = queue->cm_id;
|
||||||
struct nvmet_rdma_device *dev = queue->dev;
|
struct nvmet_rdma_device *dev = queue->dev;
|
||||||
|
enum nvmet_rdma_queue_state state = queue->state;
|
||||||
|
|
||||||
nvmet_rdma_free_queue(queue);
|
nvmet_rdma_free_queue(queue);
|
||||||
|
|
||||||
if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
|
if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
|
||||||
rdma_destroy_id(cm_id);
|
rdma_destroy_id(cm_id);
|
||||||
|
|
||||||
kref_put(&dev->ref, nvmet_rdma_free_dev);
|
kref_put(&dev->ref, nvmet_rdma_free_dev);
|
||||||
|
@ -1003,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
|
||||||
queue->host_qid = le16_to_cpu(req->qid);
|
queue->host_qid = le16_to_cpu(req->qid);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* req->hsqsize corresponds to our recv queue size
|
* req->hsqsize corresponds to our recv queue size plus 1
|
||||||
* req->hrqsize corresponds to our send queue size
|
* req->hrqsize corresponds to our send queue size
|
||||||
*/
|
*/
|
||||||
queue->recv_queue_size = le16_to_cpu(req->hsqsize);
|
queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
|
||||||
queue->send_queue_size = le16_to_cpu(req->hrqsize);
|
queue->send_queue_size = le16_to_cpu(req->hrqsize);
|
||||||
|
|
||||||
if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
|
if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
|
||||||
|
|
|
@ -794,7 +794,7 @@ struct nvmf_connect_command {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nvmf_connect_data {
|
struct nvmf_connect_data {
|
||||||
uuid_le hostid;
|
uuid_be hostid;
|
||||||
__le16 cntlid;
|
__le16 cntlid;
|
||||||
char resv4[238];
|
char resv4[238];
|
||||||
char subsysnqn[NVMF_NQN_FIELD_LEN];
|
char subsysnqn[NVMF_NQN_FIELD_LEN];
|
||||||
|
|
Loading…
Reference in New Issue