IB/mad: Convert allocations from kmem_cache to kzalloc

This patch implements allocating alternate receive MAD buffers within the MAD
stack.  Support for OPA to send/recv variable sized MADs is implemented later.

    1) Convert MAD allocations from kmem_cache to kzalloc

       kzalloc is more flexible to support devices with different sized MADs
       and research and testing showed that the current use of kmem_cache does
       not provide performance benefits over kzalloc.

    2) Change struct ib_mad_private to use a flex array for the mad data
    3) Allocate ib_mad_private based on the size specified by devices in
       rdma_max_mad_size.
    4) Carry the allocated size in ib_mad_private to be used when processing
       ib_mad_private objects.
    5) Alter DMA mappings based on the mad_size of ib_mad_private.
    6) Replace the use of sizeof and static defines as appropriate
    7) Add appropriate casts for the MAD data when calling processing
       functions.

Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Ira Weiny 2015-06-06 14:38:30 -04:00 committed by Doug Ledford
parent 337877a466
commit c9082e51b6
4 changed files with 97 additions and 92 deletions

View file

@ -78,9 +78,9 @@ ib_get_agent_port(const struct ib_device *device, int port_num)
return entry; return entry;
} }
void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh, void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
const struct ib_wc *wc, const struct ib_device *device, const struct ib_wc *wc, const struct ib_device *device,
int port_num, int qpn) int port_num, int qpn, size_t resp_mad_len)
{ {
struct ib_agent_port_private *port_priv; struct ib_agent_port_private *port_priv;
struct ib_mad_agent *agent; struct ib_mad_agent *agent;
@ -107,7 +107,8 @@ void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
} }
send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, IB_MGMT_MAD_HDR,
resp_mad_len - IB_MGMT_MAD_HDR,
GFP_KERNEL, GFP_KERNEL,
IB_MGMT_BASE_VERSION); IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) { if (IS_ERR(send_buf)) {
@ -115,7 +116,7 @@ void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
goto err1; goto err1;
} }
memcpy(send_buf->mad, mad, sizeof *mad); memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah; send_buf->ah = ah;
if (device->node_type == RDMA_NODE_IB_SWITCH) { if (device->node_type == RDMA_NODE_IB_SWITCH) {

View file

@ -44,8 +44,8 @@ extern int ib_agent_port_open(struct ib_device *device, int port_num);
extern int ib_agent_port_close(struct ib_device *device, int port_num); extern int ib_agent_port_close(struct ib_device *device, int port_num);
extern void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh, extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
const struct ib_wc *wc, const struct ib_device *device, const struct ib_wc *wc, const struct ib_device *device,
int port_num, int qpn); int port_num, int qpn, size_t resp_mad_len);
#endif /* __AGENT_H_ */ #endif /* __AGENT_H_ */

View file

@ -59,8 +59,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444); module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
static struct kmem_cache *ib_mad_cache;
static struct list_head ib_mad_port_list; static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0; static u32 ib_mad_client_id = 0;
@ -717,6 +715,32 @@ static void build_smp_wc(struct ib_qp *qp,
wc->port_num = port_num; wc->port_num = port_num;
} }
static size_t mad_priv_size(const struct ib_mad_private *mp)
{
return sizeof(struct ib_mad_private) + mp->mad_size;
}
static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
{
size_t size = sizeof(struct ib_mad_private) + mad_size;
struct ib_mad_private *ret = kzalloc(size, flags);
if (ret)
ret->mad_size = mad_size;
return ret;
}
static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
{
return rdma_max_mad_size(port_priv->device, port_priv->port_num);
}
static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
{
return sizeof(struct ib_grh) + mp->mad_size;
}
/* /*
* Return 0 if SMP is to be sent * Return 0 if SMP is to be sent
* Return 1 if SMP was consumed locally (whether or not solicited) * Return 1 if SMP was consumed locally (whether or not solicited)
@ -736,6 +760,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
u8 port_num; u8 port_num;
struct ib_wc mad_wc; struct ib_wc mad_wc;
struct ib_send_wr *send_wr = &mad_send_wr->send_wr; struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
if (device->node_type == RDMA_NODE_IB_SWITCH && if (device->node_type == RDMA_NODE_IB_SWITCH &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
@ -771,7 +796,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
} }
local->mad_priv = NULL; local->mad_priv = NULL;
local->recv_mad_agent = NULL; local->recv_mad_agent = NULL;
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
if (!mad_priv) { if (!mad_priv) {
ret = -ENOMEM; ret = -ENOMEM;
dev_err(&device->dev, "No memory for local response MAD\n"); dev_err(&device->dev, "No memory for local response MAD\n");
@ -786,12 +811,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */ /* No GRH for DR SMP */
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
(struct ib_mad *)smp, (const struct ib_mad *)smp,
(struct ib_mad *)&mad_priv->mad); (struct ib_mad *)mad_priv->mad);
switch (ret) switch (ret)
{ {
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) && if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
mad_agent_priv->agent.recv_handler) { mad_agent_priv->agent.recv_handler) {
local->mad_priv = mad_priv; local->mad_priv = mad_priv;
local->recv_mad_agent = mad_agent_priv; local->recv_mad_agent = mad_agent_priv;
@ -801,33 +826,33 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
*/ */
atomic_inc(&mad_agent_priv->refcount); atomic_inc(&mad_agent_priv->refcount);
} else } else
kmem_cache_free(ib_mad_cache, mad_priv); kfree(mad_priv);
break; break;
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
kmem_cache_free(ib_mad_cache, mad_priv); kfree(mad_priv);
break; break;
case IB_MAD_RESULT_SUCCESS: case IB_MAD_RESULT_SUCCESS:
/* Treat like an incoming receive MAD */ /* Treat like an incoming receive MAD */
port_priv = ib_get_mad_port(mad_agent_priv->agent.device, port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
mad_agent_priv->agent.port_num); mad_agent_priv->agent.port_num);
if (port_priv) { if (port_priv) {
memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad)); memcpy(mad_priv->mad, smp, mad_priv->mad_size);
recv_mad_agent = find_mad_agent(port_priv, recv_mad_agent = find_mad_agent(port_priv,
&mad_priv->mad.mad.mad_hdr); (const struct ib_mad_hdr *)mad_priv->mad);
} }
if (!port_priv || !recv_mad_agent) { if (!port_priv || !recv_mad_agent) {
/* /*
* No receiving agent so drop packet and * No receiving agent so drop packet and
* generate send completion. * generate send completion.
*/ */
kmem_cache_free(ib_mad_cache, mad_priv); kfree(mad_priv);
break; break;
} }
local->mad_priv = mad_priv; local->mad_priv = mad_priv;
local->recv_mad_agent = recv_mad_agent; local->recv_mad_agent = recv_mad_agent;
break; break;
default: default:
kmem_cache_free(ib_mad_cache, mad_priv); kfree(mad_priv);
kfree(local); kfree(local);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@ -877,7 +902,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
struct ib_rmpp_segment *seg = NULL; struct ib_rmpp_segment *seg = NULL;
int left, seg_size, pad; int left, seg_size, pad;
send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len; send_buf->seg_size = sizeof(struct ib_mad) - send_buf->hdr_len;
seg_size = send_buf->seg_size; seg_size = send_buf->seg_size;
pad = send_wr->pad; pad = send_wr->pad;
@ -1238,7 +1263,7 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
recv_wc); recv_wc);
priv = container_of(mad_priv_hdr, struct ib_mad_private, priv = container_of(mad_priv_hdr, struct ib_mad_private,
header); header);
kmem_cache_free(ib_mad_cache, priv); kfree(priv);
} }
} }
EXPORT_SYMBOL(ib_free_recv_mad); EXPORT_SYMBOL(ib_free_recv_mad);
@ -1933,58 +1958,62 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
struct ib_mad_private *response) struct ib_mad_private *response)
{ {
enum smi_forward_action retsmi; enum smi_forward_action retsmi;
struct ib_smp *smp = (struct ib_smp *)recv->mad;
if (smi_handle_dr_smp_recv(&recv->mad.smp, if (smi_handle_dr_smp_recv(smp,
port_priv->device->node_type, port_priv->device->node_type,
port_num, port_num,
port_priv->device->phys_port_cnt) == port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD) IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
retsmi = smi_check_forward_dr_smp(&recv->mad.smp); retsmi = smi_check_forward_dr_smp(smp);
if (retsmi == IB_SMI_LOCAL) if (retsmi == IB_SMI_LOCAL)
return IB_SMI_HANDLE; return IB_SMI_HANDLE;
if (retsmi == IB_SMI_SEND) { /* don't forward */ if (retsmi == IB_SMI_SEND) { /* don't forward */
if (smi_handle_dr_smp_send(&recv->mad.smp, if (smi_handle_dr_smp_send(smp,
port_priv->device->node_type, port_priv->device->node_type,
port_num) == IB_SMI_DISCARD) port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
} else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
/* forward case for switches */ /* forward case for switches */
memcpy(response, recv, sizeof(*response)); memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.wc = &response->header.wc;
response->header.recv_wc.recv_buf.mad = &response->mad.mad; response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
response->header.recv_wc.recv_buf.grh = &response->grh; response->header.recv_wc.recv_buf.grh = &response->grh;
agent_send_response(&response->mad.mad, agent_send_response((const struct ib_mad_hdr *)response->mad,
&response->grh, wc, &response->grh, wc,
port_priv->device, port_priv->device,
smi_get_fwd_port(&recv->mad.smp), smi_get_fwd_port(smp),
qp_info->qp->qp_num); qp_info->qp->qp_num,
response->mad_size);
return IB_SMI_DISCARD; return IB_SMI_DISCARD;
} }
return IB_SMI_HANDLE; return IB_SMI_HANDLE;
} }
static bool generate_unmatched_resp(struct ib_mad_private *recv, static bool generate_unmatched_resp(const struct ib_mad_private *recv,
struct ib_mad_private *response) struct ib_mad_private *response)
{ {
if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
memcpy(response, recv, sizeof *response);
if (recv_hdr->method == IB_MGMT_METHOD_GET ||
recv_hdr->method == IB_MGMT_METHOD_SET) {
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc; response->header.recv_wc.wc = &response->header.wc;
response->header.recv_wc.recv_buf.mad = &response->mad.mad; response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
response->header.recv_wc.recv_buf.grh = &response->grh; response->header.recv_wc.recv_buf.grh = &response->grh;
response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
response->mad.mad.mad_hdr.status = resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) resp_hdr->status |= IB_SMP_DIRECTION;
response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
return true; return true;
} else { } else {
@ -2011,25 +2040,24 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
recv = container_of(mad_priv_hdr, struct ib_mad_private, header); recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
ib_dma_unmap_single(port_priv->device, ib_dma_unmap_single(port_priv->device,
recv->header.mapping, recv->header.mapping,
sizeof(struct ib_mad_private) - mad_priv_dma_size(recv),
sizeof(struct ib_mad_private_header),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */ /* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc; recv->header.wc = *wc;
recv->header.recv_wc.wc = &recv->header.wc; recv->header.recv_wc.wc = &recv->header.wc;
recv->header.recv_wc.mad_len = sizeof(struct ib_mad); recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
recv->header.recv_wc.recv_buf.grh = &recv->grh; recv->header.recv_wc.recv_buf.grh = &recv->grh;
if (atomic_read(&qp_info->snoop_count)) if (atomic_read(&qp_info->snoop_count))
snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
/* Validate MAD */ /* Validate MAD */
if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num)) if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info->qp->qp_num))
goto out; goto out;
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); response = alloc_mad_private(recv->mad_size, GFP_ATOMIC);
if (!response) { if (!response) {
dev_err(&port_priv->device->dev, dev_err(&port_priv->device->dev,
"ib_mad_recv_done_handler no memory for response buffer\n"); "ib_mad_recv_done_handler no memory for response buffer\n");
@ -2041,7 +2069,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
else else
port_num = port_priv->port_num; port_num = port_priv->port_num;
if (recv->mad.mad.mad_hdr.mgmt_class == if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv, if (handle_ib_smi(port_priv, qp_info, wc, port_num, recv,
response) response)
@ -2054,23 +2082,24 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
ret = port_priv->device->process_mad(port_priv->device, 0, ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num, port_priv->port_num,
wc, &recv->grh, wc, &recv->grh,
&recv->mad.mad, (const struct ib_mad *)recv->mad,
&response->mad.mad); (struct ib_mad *)response->mad);
if (ret & IB_MAD_RESULT_SUCCESS) { if (ret & IB_MAD_RESULT_SUCCESS) {
if (ret & IB_MAD_RESULT_CONSUMED) if (ret & IB_MAD_RESULT_CONSUMED)
goto out; goto out;
if (ret & IB_MAD_RESULT_REPLY) { if (ret & IB_MAD_RESULT_REPLY) {
agent_send_response(&response->mad.mad, agent_send_response((const struct ib_mad_hdr *)response->mad,
&recv->grh, wc, &recv->grh, wc,
port_priv->device, port_priv->device,
port_num, port_num,
qp_info->qp->qp_num); qp_info->qp->qp_num,
response->mad_size);
goto out; goto out;
} }
} }
} }
mad_agent = find_mad_agent(port_priv, &recv->mad.mad.mad_hdr); mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
if (mad_agent) { if (mad_agent) {
ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
/* /*
@ -2080,16 +2109,16 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
recv = NULL; recv = NULL;
} else if ((ret & IB_MAD_RESULT_SUCCESS) && } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
generate_unmatched_resp(recv, response)) { generate_unmatched_resp(recv, response)) {
agent_send_response(&response->mad.mad, &recv->grh, wc, agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
port_priv->device, port_num, qp_info->qp->qp_num); port_priv->device, port_num,
qp_info->qp->qp_num, response->mad_size);
} }
out: out:
/* Post another receive request for this QP */ /* Post another receive request for this QP */
if (response) { if (response) {
ib_mad_post_receive_mads(qp_info, response); ib_mad_post_receive_mads(qp_info, response);
if (recv) kfree(recv);
kmem_cache_free(ib_mad_cache, recv);
} else } else
ib_mad_post_receive_mads(qp_info, recv); ib_mad_post_receive_mads(qp_info, recv);
} }
@ -2521,7 +2550,7 @@ static void local_completions(struct work_struct *work)
&local->mad_priv->header.recv_wc.rmpp_list); &local->mad_priv->header.recv_wc.rmpp_list);
local->mad_priv->header.recv_wc.recv_buf.grh = NULL; local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
local->mad_priv->header.recv_wc.recv_buf.mad = local->mad_priv->header.recv_wc.recv_buf.mad =
&local->mad_priv->mad.mad; (struct ib_mad *)local->mad_priv->mad;
if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
snoop_recv(recv_mad_agent->qp_info, snoop_recv(recv_mad_agent->qp_info,
&local->mad_priv->header.recv_wc, &local->mad_priv->header.recv_wc,
@ -2549,7 +2578,7 @@ local_send_completion:
spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags);
atomic_dec(&mad_agent_priv->refcount); atomic_dec(&mad_agent_priv->refcount);
if (free_mad) if (free_mad)
kmem_cache_free(ib_mad_cache, local->mad_priv); kfree(local->mad_priv);
kfree(local); kfree(local);
} }
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
@ -2664,7 +2693,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_queue *recv_queue = &qp_info->recv_queue; struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
/* Initialize common scatter list fields */ /* Initialize common scatter list fields */
sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
sg_list.lkey = (*qp_info->port_priv->mr).lkey; sg_list.lkey = (*qp_info->port_priv->mr).lkey;
/* Initialize common receive WR fields */ /* Initialize common receive WR fields */
@ -2678,7 +2706,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = mad; mad_priv = mad;
mad = NULL; mad = NULL;
} else { } else {
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
if (!mad_priv) { if (!mad_priv) {
dev_err(&qp_info->port_priv->device->dev, dev_err(&qp_info->port_priv->device->dev,
"No memory for receive buffer\n"); "No memory for receive buffer\n");
@ -2686,10 +2715,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break; break;
} }
} }
sg_list.length = mad_priv_dma_size(mad_priv);
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
&mad_priv->grh, &mad_priv->grh,
sizeof *mad_priv - mad_priv_dma_size(mad_priv),
sizeof mad_priv->header,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) { sg_list.addr))) {
@ -2713,10 +2742,9 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
spin_unlock_irqrestore(&recv_queue->lock, flags); spin_unlock_irqrestore(&recv_queue->lock, flags);
ib_dma_unmap_single(qp_info->port_priv->device, ib_dma_unmap_single(qp_info->port_priv->device,
mad_priv->header.mapping, mad_priv->header.mapping,
sizeof *mad_priv - mad_priv_dma_size(mad_priv),
sizeof mad_priv->header,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv); kfree(mad_priv);
dev_err(&qp_info->port_priv->device->dev, dev_err(&qp_info->port_priv->device->dev,
"ib_post_recv failed: %d\n", ret); "ib_post_recv failed: %d\n", ret);
break; break;
@ -2753,10 +2781,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
ib_dma_unmap_single(qp_info->port_priv->device, ib_dma_unmap_single(qp_info->port_priv->device,
recv->header.mapping, recv->header.mapping,
sizeof(struct ib_mad_private) - mad_priv_dma_size(recv),
sizeof(struct ib_mad_private_header),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, recv); kfree(recv);
} }
qp_info->recv_queue.count = 0; qp_info->recv_queue.count = 0;
@ -3150,45 +3177,25 @@ static struct ib_client mad_client = {
static int __init ib_mad_init_module(void) static int __init ib_mad_init_module(void)
{ {
int ret;
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
ib_mad_cache = kmem_cache_create("ib_mad",
sizeof(struct ib_mad_private),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!ib_mad_cache) {
pr_err("Couldn't create ib_mad cache\n");
ret = -ENOMEM;
goto error1;
}
INIT_LIST_HEAD(&ib_mad_port_list); INIT_LIST_HEAD(&ib_mad_port_list);
if (ib_register_client(&mad_client)) { if (ib_register_client(&mad_client)) {
pr_err("Couldn't register ib_mad client\n"); pr_err("Couldn't register ib_mad client\n");
ret = -EINVAL; return -EINVAL;
goto error2;
} }
return 0; return 0;
error2:
kmem_cache_destroy(ib_mad_cache);
error1:
return ret;
} }
static void __exit ib_mad_cleanup_module(void) static void __exit ib_mad_cleanup_module(void)
{ {
ib_unregister_client(&mad_client); ib_unregister_client(&mad_client);
kmem_cache_destroy(ib_mad_cache);
} }
module_init(ib_mad_init_module); module_init(ib_mad_init_module);

View file

@ -75,12 +75,9 @@ struct ib_mad_private_header {
struct ib_mad_private { struct ib_mad_private {
struct ib_mad_private_header header; struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh; struct ib_grh grh;
union { u8 mad[0];
struct ib_mad mad;
struct ib_rmpp_mad rmpp_mad;
struct ib_smp smp;
} mad;
} __attribute__ ((packed)); } __attribute__ ((packed));
struct ib_rmpp_segment { struct ib_rmpp_segment {