1
0
Fork 0

IB/srp: Add RDMA/CM support

Since the SRP_LOGIN_REQ defined in the SRP standard is larger than
what fits in the RDMA/CM login request private data, introduce a new
login request format for the RDMA/CM.

Note: since srp_daemon and ibsrpdm rely on the subnet manager and
since there is no equivalent of the IB subnet manager in non-IB
networks, login has to be performed manually for non-IB networks.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
hifive-unleashed-5.1
Bart Van Assche 2018-01-22 14:27:12 -08:00 committed by Doug Ledford
parent 172856eac7
commit 19f313438c
3 changed files with 603 additions and 151 deletions

View File

@ -41,6 +41,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@ -144,7 +145,9 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
@ -265,7 +268,7 @@ static void srp_qp_event(struct ib_event *event, void *context)
ib_event_msg(event->event), event->event);
}
static int srp_init_qp(struct srp_target_port *target,
static int srp_init_ib_qp(struct srp_target_port *target,
struct ib_qp *qp)
{
struct ib_qp_attr *attr;
@ -277,7 +280,7 @@ static int srp_init_qp(struct srp_target_port *target,
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
be16_to_cpu(target->pkey),
be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index);
if (ret)
goto out;
@ -298,32 +301,110 @@ out:
return ret;
}
static int srp_new_cm_id(struct srp_rdma_ch *ch)
static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
srp_cm_handler, ch);
srp_ib_cm_handler, ch);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
if (ch->cm_id)
ib_destroy_cm_id(ch->cm_id);
ch->cm_id = new_cm_id;
if (ch->ib_cm.cm_id)
ib_destroy_cm_id(ch->ib_cm.cm_id);
ch->ib_cm.cm_id = new_cm_id;
if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
target->srp_host->port))
ch->path.rec_type = SA_PATH_REC_TYPE_OPA;
ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
else
ch->path.rec_type = SA_PATH_REC_TYPE_IB;
ch->path.sgid = target->sgid;
ch->path.dgid = target->orig_dgid;
ch->path.pkey = target->pkey;
ch->path.service_id = target->service_id;
ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
ch->ib_cm.path.sgid = target->sgid;
ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
ch->ib_cm.path.pkey = target->ib_cm.pkey;
ch->ib_cm.path.service_id = target->ib_cm.service_id;
return 0;
}
static const char *inet_ntop(const void *sa, char *dst, unsigned int size)
{
switch (((struct sockaddr *)sa)->sa_family) {
case AF_INET:
snprintf(dst, size, "%pI4",
&((struct sockaddr_in *)sa)->sin_addr);
break;
case AF_INET6:
snprintf(dst, size, "%pI6",
&((struct sockaddr_in6 *)sa)->sin6_addr);
break;
default:
snprintf(dst, size, "???");
break;
}
return dst;
}
static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct rdma_cm_id *new_cm_id;
char src_addr[64], dst_addr[64];
int ret;
new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(new_cm_id)) {
ret = PTR_ERR(new_cm_id);
new_cm_id = NULL;
goto out;
}
init_completion(&ch->done);
ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
(struct sockaddr *)&target->rdma_cm.src : NULL,
(struct sockaddr *)&target->rdma_cm.dst,
SRP_PATH_REC_TIMEOUT_MS);
if (ret) {
pr_err("No route available from %s to %s (%d)\n",
target->rdma_cm.src_specified ?
inet_ntop(&target->rdma_cm.src, src_addr,
sizeof(src_addr)) : "(any)",
inet_ntop(&target->rdma_cm.dst, dst_addr,
sizeof(dst_addr)),
ret);
goto out;
}
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
goto out;
ret = ch->status;
if (ret) {
pr_err("Resolving address %s failed (%d)\n",
inet_ntop(&target->rdma_cm.dst, dst_addr,
sizeof(dst_addr)),
ret);
goto out;
}
swap(ch->rdma_cm.cm_id, new_cm_id);
out:
if (new_cm_id)
rdma_destroy_id(new_cm_id);
return ret;
}
static int srp_new_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
srp_new_ib_cm_id(ch);
}
static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
@ -521,16 +602,25 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
if (target->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
qp = ch->rdma_cm.cm_id->qp;
} else {
qp = ib_create_qp(dev->pd, init_attr);
if (IS_ERR(qp)) {
if (!IS_ERR(qp)) {
ret = srp_init_ib_qp(target, qp);
if (ret)
ib_destroy_qp(qp);
} else {
ret = PTR_ERR(qp);
}
}
if (ret) {
pr_err("QP creation failed for dev %s: %d\n",
dev_name(&dev->dev->dev), ret);
goto err_send_cq;
}
ret = srp_init_qp(target, qp);
if (ret)
goto err_qp;
if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
@ -574,6 +664,9 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0;
err_qp:
if (target->using_rdma_cm)
rdma_destroy_qp(ch->rdma_cm.cm_id);
else
ib_destroy_qp(qp);
err_send_cq:
@ -600,9 +693,16 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (!ch->target)
return;
if (ch->cm_id) {
ib_destroy_cm_id(ch->cm_id);
ch->cm_id = NULL;
if (target->using_rdma_cm) {
if (ch->rdma_cm.cm_id) {
rdma_destroy_id(ch->rdma_cm.cm_id);
ch->rdma_cm.cm_id = NULL;
}
} else {
if (ch->ib_cm.cm_id) {
ib_destroy_cm_id(ch->ib_cm.cm_id);
ch->ib_cm.cm_id = NULL;
}
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
@ -658,16 +758,16 @@ static void srp_path_rec_completion(int status,
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
ch->path = *pathrec;
ch->ib_cm.path = *pathrec;
complete(&ch->done);
}
static int srp_lookup_path(struct srp_rdma_ch *ch)
static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret = -ENODEV;
ch->path.numb_path = 1;
ch->ib_cm.path.numb_path = 1;
init_completion(&ch->done);
@ -678,10 +778,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
if (!scsi_host_get(target->scsi_host))
goto out;
ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
&ch->path,
&ch->ib_cm.path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
@ -690,8 +790,8 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
ch, &ch->path_query);
ret = ch->path_query_id;
ch, &ch->ib_cm.path_query);
ret = ch->ib_cm.path_query_id;
if (ret < 0)
goto put;
@ -703,9 +803,9 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
ch->path.sgid.raw, ch->path.dgid.raw,
be16_to_cpu(target->pkey),
be64_to_cpu(target->service_id));
ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
be16_to_cpu(target->ib_cm.pkey),
be64_to_cpu(target->ib_cm.service_id));
put:
scsi_host_put(target->scsi_host);
@ -714,6 +814,34 @@ out:
return ret;
}
static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret;
init_completion(&ch->done);
ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
if (ret)
return ret;
wait_for_completion_interruptible(&ch->done);
if (ch->status != 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path resolution failed\n");
return ch->status;
}
static int srp_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
srp_ib_lookup_path(ch);
}
static u8 srp_get_subnet_timeout(struct srp_host *host)
{
struct ib_port_attr attr;
@ -735,8 +863,10 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
{
struct srp_target_port *target = ch->target;
struct {
struct ib_cm_req_param param;
struct srp_login_req priv;
struct rdma_conn_param rdma_param;
struct srp_login_req_rdma rdma_req;
struct ib_cm_req_param ib_param;
struct srp_login_req ib_req;
} *req = NULL;
char *ipi, *tpi;
int status;
@ -745,44 +875,62 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
if (!req)
return -ENOMEM;
req->param.flow_control = 1;
req->param.retry_count = target->tl_retry_count;
req->ib_param.flow_control = 1;
req->ib_param.retry_count = target->tl_retry_count;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
req->param.responder_resources = 4;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
req->ib_param.responder_resources = 4;
req->ib_param.rnr_retry_count = 7;
req->ib_param.max_cm_retries = 15;
req->priv.opcode = SRP_LOGIN_REQ;
req->priv.tag = 0;
req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
req->ib_req.opcode = SRP_LOGIN_REQ;
req->ib_req.tag = 0;
req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len);
req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
SRP_MULTICHAN_SINGLE);
{
if (target->using_rdma_cm) {
req->rdma_param.flow_control = req->ib_param.flow_control;
req->rdma_param.responder_resources =
req->ib_param.responder_resources;
req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
req->rdma_param.retry_count = req->ib_param.retry_count;
req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
req->rdma_param.private_data = &req->rdma_req;
req->rdma_param.private_data_len = sizeof(req->rdma_req);
req->rdma_req.opcode = req->ib_req.opcode;
req->rdma_req.tag = req->ib_req.tag;
req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
req->rdma_req.req_flags = req->ib_req.req_flags;
ipi = req->rdma_req.initiator_port_id;
tpi = req->rdma_req.target_port_id;
} else {
u8 subnet_timeout;
subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req->param.primary_path = &ch->path;
req->param.alternate_path = NULL;
req->param.service_id = target->service_id;
get_random_bytes(&req->param.starting_psn, 4);
req->param.starting_psn &= 0xffffff;
req->param.qp_num = ch->qp->qp_num;
req->param.qp_type = ch->qp->qp_type;
req->param.local_cm_response_timeout = subnet_timeout + 2;
req->param.remote_cm_response_timeout = subnet_timeout + 2;
req->param.private_data = &req->priv;
req->param.private_data_len = sizeof(req->priv);
req->ib_param.primary_path = &ch->ib_cm.path;
req->ib_param.alternate_path = NULL;
req->ib_param.service_id = target->ib_cm.service_id;
get_random_bytes(&req->ib_param.starting_psn, 4);
req->ib_param.starting_psn &= 0xffffff;
req->ib_param.qp_num = ch->qp->qp_num;
req->ib_param.qp_type = ch->qp->qp_type;
req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
req->ib_param.private_data = &req->ib_req;
req->ib_param.private_data_len = sizeof(req->ib_req);
ipi = req->priv.initiator_port_id;
tpi = req->priv.target_port_id;
ipi = req->ib_req.initiator_port_id;
tpi = req->ib_req.target_port_id;
}
/*
@ -820,7 +968,10 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
status = ib_send_cm_req(ch->cm_id, &req->param);
if (target->using_rdma_cm)
status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
else
status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
kfree(req);
@ -847,14 +998,23 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
int i;
int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
ret = 0;
if (target->using_rdma_cm) {
if (ch->rdma_cm.cm_id)
rdma_disconnect(ch->rdma_cm.cm_id);
} else {
if (ch->ib_cm.cm_id)
ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
NULL, 0);
}
if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
@ -968,6 +1128,7 @@ static void srp_remove_target(struct srp_target_port *target)
scsi_remove_host(target->scsi_host);
srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
@ -2355,7 +2516,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_target_port *target = ch->target;
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
int ret;
int ret = 0;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
@ -2385,8 +2546,17 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
goto error;
}
if (!target->using_rdma_cm) {
ret = -ENOMEM;
qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr)
goto error;
@ -2399,14 +2569,6 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
goto error_free;
}
qp_attr->qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
if (ret)
@ -2419,6 +2581,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error_free;
ret = ib_send_cm_rtu(cm_id, NULL, 0);
}
error_free:
kfree(qp_attr);
@ -2427,7 +2590,7 @@ error:
ch->status = ret;
}
static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event,
struct srp_rdma_ch *ch)
{
@ -2435,33 +2598,35 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
u16 dlid;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
ch->path.pkey = cpi->redirect_pkey;
dlid = be16_to_cpu(cpi->redirect_lid);
sa_path_set_dlid(&ch->ib_cm.path, dlid);
ch->ib_cm.path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
ch->status = sa_path_get_dlid(&ch->path) ?
SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
union ib_gid *dgid = &ch->ib_cm.path.dgid;
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
memcpy(ch->path.dgid.raw,
event->param.rej_rcvd.ari, 16);
memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
be64_to_cpu(ch->path.dgid.global.subnet_prefix),
be64_to_cpu(ch->path.dgid.global.interface_id));
be64_to_cpu(dgid->global.subnet_prefix),
be64_to_cpu(dgid->global.interface_id));
ch->status = SRP_PORT_REDIRECT;
} else {
@ -2490,7 +2655,8 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
shost_printk(KERN_WARNING, shost, PFX
"SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
target->sgid.raw,
target->orig_dgid.raw, reason);
target->ib_cm.orig_dgid.raw,
reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
@ -2510,7 +2676,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@ -2533,7 +2699,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
srp_cm_rej_handler(cm_id, event, ch);
srp_ib_cm_rej_handler(cm_id, event, ch);
break;
case IB_CM_DREQ_RECEIVED:
@ -2571,6 +2737,135 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
struct rdma_cm_event *event)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
int opcode;
switch (event->status) {
case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
ch->status = -ECONNRESET;
break;
case IB_CM_REJ_CONSUMER_DEFINED:
opcode = *(u8 *) event->param.conn.private_data;
if (opcode == SRP_LOGIN_REJ) {
struct srp_login_rej *rej =
(struct srp_login_rej *)
event->param.conn.private_data;
u32 reason = be32_to_cpu(rej->reason);
if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
shost_printk(KERN_WARNING, shost,
PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
else
shost_printk(KERN_WARNING, shost,
PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
} else {
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
opcode);
}
ch->status = -ECONNRESET;
break;
case IB_CM_REJ_STALE_CONN:
shost_printk(KERN_WARNING, shost,
" REJ reason: stale connection\n");
ch->status = SRP_STALE_CONN;
break;
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->status);
ch->status = -ECONNRESET;
break;
}
}
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
int comp = 0;
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
ch->status = 0;
comp = 1;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
ch->status = -ENXIO;
comp = 1;
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
ch->status = 0;
comp = 1;
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
ch->status = -EHOSTUNREACH;
comp = 1;
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM REQ failed\n");
comp = 1;
ch->status = -ECONNRESET;
break;
case RDMA_CM_EVENT_ESTABLISHED:
comp = 1;
srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
break;
case RDMA_CM_EVENT_REJECTED:
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
srp_rdma_cm_rej_handler(ch, event);
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (ch->connected) {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "received DREQ\n");
rdma_disconnect(ch->rdma_cm.cm_id);
comp = 1;
ch->status = 0;
queue_work(system_long_wq, &target->tl_err_work);
}
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
comp = 1;
ch->status = 0;
break;
default:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled CM event %d\n", event->event);
break;
}
if (comp)
complete(&ch->done);
return 0;
}
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
@ -2772,7 +3067,10 @@ static ssize_t show_service_id(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
if (target->using_rdma_cm)
return -ENOENT;
return sprintf(buf, "0x%016llx\n",
be64_to_cpu(target->ib_cm.service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@ -2780,7 +3078,9 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
if (target->using_rdma_cm)
return -ENOENT;
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@ -2797,7 +3097,9 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch = &target->ch[0];
return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
if (target->using_rdma_cm)
return -ENOENT;
return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
@ -2805,7 +3107,9 @@ static ssize_t show_orig_dgid(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
if (target->using_rdma_cm)
return -ENOENT;
return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static ssize_t show_req_lim(struct device *dev,
@ -3050,6 +3354,9 @@ static bool srp_conn_unique(struct srp_host *host,
if (t != target &&
target->id_ext == t->id_ext &&
target->ioc_guid == t->ioc_guid &&
(!target->using_rdma_cm ||
memcmp(&target->rdma_cm.dst, &t->rdma_cm.dst,
sizeof(target->rdma_cm.dst)) == 0) &&
target->initiator_ext == t->initiator_ext) {
ret = false;
break;
@ -3066,6 +3373,9 @@ out:
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
* or
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
* [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
*
* to the add_target sysfs attribute.
*/
@ -3086,11 +3396,19 @@ enum {
SRP_OPT_COMP_VECTOR = 1 << 12,
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
SRP_OPT_QUEUE_SIZE = 1 << 14,
SRP_OPT_ALL = (SRP_OPT_ID_EXT |
SRP_OPT_IP_SRC = 1 << 15,
SRP_OPT_IP_DEST = 1 << 16,
};
static unsigned int srp_opt_mandatory[] = {
SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
SRP_OPT_PKEY |
SRP_OPT_SERVICE_ID),
SRP_OPT_SERVICE_ID,
SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_IP_DEST,
};
static const match_table_t srp_opt_tokens = {
@ -3109,10 +3427,28 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_ERR, NULL }
};
static int srp_parse_options(const char *buf, struct srp_target_port *target)
static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
const char *addr_port_str)
{
char *addr = kstrdup(addr_port_str, GFP_KERNEL);
char *port_str = addr;
int ret;
if (!addr)
return -ENOMEM;
strsep(&port_str, ":");
ret = inet_pton_with_scope(net, AF_UNSPEC, addr, port_str, sa);
kfree(addr);
return ret;
}
static int srp_parse_options(struct net *net, const char *buf,
struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
@ -3180,7 +3516,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
goto out;
}
ret = hex2bin(target->orig_dgid.raw, p, 16);
ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
kfree(p);
if (ret < 0)
goto out;
@ -3191,7 +3527,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
target->pkey = cpu_to_be16(token);
target->ib_cm.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
@ -3206,7 +3542,39 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
kfree(p);
goto out;
}
target->service_id = cpu_to_be64(ull);
target->ib_cm.service_id = cpu_to_be64(ull);
kfree(p);
break;
case SRP_OPT_IP_SRC:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
if (ret < 0) {
pr_warn("bad source parameter '%s'\n", p);
kfree(p);
goto out;
}
target->rdma_cm.src_specified = true;
kfree(p);
break;
case SRP_OPT_IP_DEST:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
if (ret < 0) {
pr_warn("bad dest parameter '%s'\n", p);
kfree(p);
goto out;
}
target->using_rdma_cm = true;
kfree(p);
break;
@ -3321,14 +3689,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
}
}
if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
ret = 0;
else
for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
!(srp_opt_tokens[i].token & opt_mask))
pr_warn("target creation request is missing parameter '%s'\n",
srp_opt_tokens[i].pattern);
break;
}
}
if (ret)
pr_warn("target creation request is missing one or more parameters\n");
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
@ -3369,6 +3737,7 @@ static ssize_t srp_create_target(struct device *dev,
target = host_to_target(target_host);
target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
@ -3390,18 +3759,29 @@ static ssize_t srp_create_target(struct device *dev,
if (ret < 0)
goto put;
ret = srp_parse_options(buf, target);
ret = srp_parse_options(target->net, buf, target);
if (ret)
goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) {
if (target->using_rdma_cm) {
char dst_addr[64];
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%s\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
inet_ntop(&target->rdma_cm.dst, dst_addr,
sizeof(dst_addr)));
} else {
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
be64_to_cpu(target->initiator_ext));
}
ret = -EEXIST;
goto out;
}
@ -3502,11 +3882,18 @@ static ssize_t srp_create_target(struct device *dev,
ret = srp_connect_ch(ch, multich);
if (ret) {
char dst[64];
if (target->using_rdma_cm)
inet_ntop(&target->rdma_cm.dst, dst,
sizeof(dst));
else
snprintf(dst, sizeof(dst), "%pI6",
target->ib_cm.orig_dgid.raw);
shost_printk(KERN_ERR, target->scsi_host,
PFX "Connection %d/%d to %pI6 failed\n",
PFX "Connection %d/%d to %s failed\n",
ch_start + cpu_idx,
target->ch_count,
ch->target->orig_dgid.raw);
target->ch_count, dst);
if (node_idx == 0 && cpu_idx == 0) {
goto free_ch;
} else {
@ -3531,13 +3918,25 @@ connected:
goto err_disconnect;
if (target->state != SRP_TARGET_REMOVED) {
if (target->using_rdma_cm) {
char dst[64];
inet_ntop(&target->rdma_cm.dst, dst, sizeof(dst));
shost_printk(KERN_DEBUG, target->scsi_host, PFX
"new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %s\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
target->sgid.raw, dst);
} else {
shost_printk(KERN_DEBUG, target->scsi_host, PFX
"new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
be16_to_cpu(target->pkey),
be64_to_cpu(target->service_id),
target->sgid.raw, target->orig_dgid.raw);
be16_to_cpu(target->ib_cm.pkey),
be64_to_cpu(target->ib_cm.service_id),
target->sgid.raw,
target->ib_cm.orig_dgid.raw);
}
}
ret = count;
@ -3547,8 +3946,16 @@ out:
put:
scsi_host_put(target->scsi_host);
if (ret < 0)
if (ret < 0) {
/*
* If a call to srp_remove_target() has not been scheduled,
* drop the network namespace reference now that was obtained
* earlier in this function.
*/
if (target->state != SRP_TARGET_REMOVED)
kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
scsi_host_put(target->scsi_host);
}
return ret;

View File

@ -45,6 +45,7 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000,
@ -153,11 +154,18 @@ struct srp_rdma_ch {
struct completion done;
int status;
union {
struct ib_cm {
struct sa_path_rec path;
struct ib_sa_query *path_query;
int path_query_id;
struct ib_cm_id *cm_id;
} ib_cm;
struct rdma_cm {
struct rdma_cm_id *cm_id;
} rdma_cm;
};
struct srp_iu **tx_ring;
struct srp_iu **rx_ring;
struct srp_request *req_ring;
@ -182,6 +190,7 @@ struct srp_target_port {
/* read only in the hot path */
u32 global_rkey;
struct srp_rdma_ch *ch;
struct net *net;
u32 ch_count;
u32 lkey;
enum srp_target_state state;
@ -194,7 +203,6 @@ struct srp_target_port {
union ib_gid sgid;
__be64 id_ext;
__be64 ioc_guid;
__be64 service_id;
__be64 initiator_ext;
u16 io_class;
struct srp_host *srp_host;
@ -210,8 +218,28 @@ struct srp_target_port {
int comp_vector;
int tl_retry_count;
bool using_rdma_cm;
union {
struct {
__be64 service_id;
union ib_gid orig_dgid;
__be16 pkey;
} ib_cm;
struct {
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
struct sockaddr_storage ss;
} src;
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
struct sockaddr_storage ss;
} dst;
bool src_specified;
} rdma_cm;
};
u32 rq_tmo_jiffies;

View File

@ -129,6 +129,23 @@ struct srp_login_req {
u8 target_port_id[16];
};
/**
* struct srp_login_req_rdma - RDMA/CM login parameters.
*
* RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
* data. The %srp_login_req_rdma structure contains the same information as
* %srp_login_req but with the reserved data removed.
*/
struct srp_login_req_rdma {
u64 tag;
__be16 req_buf_fmt;
u8 req_flags;
u8 opcode;
__be32 req_it_iu_len;
u8 initiator_port_id[16];
u8 target_port_id[16];
};
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56