1
0
Fork 0

mlx5: Add driver for Mellanox Connect-IB adapters

The driver is comprised of two kernel modules: mlx5_ib and mlx5_core.
This partitioning resembles what we have for mlx4, except that mlx5_ib
is the pci device driver and not mlx5_core.

mlx5_core is essentially a library that provides general functionality
that is intended to be used by other Mellanox devices that will be
introduced in the future.  mlx5_ib has a similar role as any hardware
device under drivers/infiniband/hw.

Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>

[ Merge in coccinelle fixes from Fengguang Wu <fengguang.wu@intel.com>.
  - Roland ]

Signed-off-by: Roland Dreier <roland@purestorage.com>
hifive-unleashed-5.1
Eli Cohen 2013-07-07 17:25:49 +03:00 committed by Roland Dreier
parent 0134f16bc9
commit e126ba97db
45 changed files with 15779 additions and 0 deletions

View File

@ -5365,6 +5365,28 @@ W: http://linuxtv.org
S: Odd Fixes
F: drivers/media/radio/radio-miropcm20*
Mellanox MLX5 core VPI driver
M: Eli Cohen <eli@mellanox.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git://openfabrics.org/~eli/connect-ib.git
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
Mellanox MLX5 IB driver
M: Eli Cohen <eli@mellanox.com>
L: linux-rdma@vger.kernel.org
W: http://www.mellanox.com
Q: http://patchwork.kernel.org/project/linux-rdma/list/
T: git://openfabrics.org/~eli/connect-ib.git
S: Supported
F: include/linux/mlx5/
F: drivers/infiniband/hw/mlx5/
MODULE SUPPORT
M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained

View File

@ -50,6 +50,7 @@ source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"

View File

@ -7,6 +7,7 @@ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/

View File

@ -0,0 +1,10 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
depends on NETDEVICES && ETHERNET && PCI && X86
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
This driver provides low-level InfiniBand support for
Mellanox Connect-IB PCI Express host channel adapters (HCAs).
This is required to use InfiniBand protocols such as
IP-over-IB or SRP with these devices.

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "mlx5_ib.h"
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah)
{
if (ah_attr->ah_flags & IB_AH_GRH) {
memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
(1 << 30) |
ah_attr->grh.sgid_index << 20);
ah->av.hop_limit = ah_attr->grh.hop_limit;
ah->av.tclass = ah_attr->grh.traffic_class;
}
ah->av.rlid = cpu_to_be16(ah_attr->dlid);
ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf);
return &ah->ibah;
}
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
struct mlx5_ib_ah *ah;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
return create_ib_ah(ah_attr, ah); /* never fails */
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
{
struct mlx5_ib_ah *ah = to_mah(ibah);
u32 tmp;
memset(ah_attr, 0, sizeof(*ah_attr));
tmp = be32_to_cpu(ah->av.grh_gid_fl);
if (tmp & (1 << 30)) {
ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.sgid_index = (tmp >> 20) & 0xff;
ah_attr->grh.flow_label = tmp & 0xfffff;
memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16);
ah_attr->grh.hop_limit = ah->av.hop_limit;
ah_attr->grh.traffic_class = ah->av.tclass;
}
ah_attr->dlid = be16_to_cpu(ah->av.rlid);
ah_attr->static_rate = ah->av.stat_rate_sl >> 4;
ah_attr->sl = ah->av.stat_rate_sl & 0xf;
return 0;
}
int mlx5_ib_destroy_ah(struct ib_ah *ah)
{
kfree(to_mah(ah));
return 0;
}

View File

@ -0,0 +1,843 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kref.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
#include "user.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
{
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
ibcq->comp_handler(ibcq, ibcq->cq_context);
}
static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
{
struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct ib_cq *ibcq = &cq->ibcq;
struct ib_event event;
if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
type, mcq->cqn);
return;
}
if (ibcq->event_handler) {
event.device = &dev->ib_dev;
event.event = IB_EVENT_CQ_ERR;
event.element.cq = ibcq;
ibcq->event_handler(&event, ibcq->cq_context);
}
}
static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
{
return mlx5_buf_offset(&buf->buf, n * size);
}
static void *get_cqe(struct mlx5_ib_cq *cq, int n)
{
return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
}
static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
{
void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
struct mlx5_cqe64 *cqe64;
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
}
static void *next_cqe_sw(struct mlx5_ib_cq *cq)
{
return get_sw_cqe(cq, cq->mcq.cons_index);
}
static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
{
switch (wq->wr_data[idx]) {
case MLX5_IB_WR_UMR:
return 0;
case IB_WR_LOCAL_INV:
return IB_WC_LOCAL_INV;
case IB_WR_FAST_REG_MR:
return IB_WC_FAST_REG_MR;
default:
pr_warn("unknown completion status\n");
return 0;
}
}
static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_ib_wq *wq, int idx)
{
wc->wc_flags = 0;
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
break;
case MLX5_OPCODE_RDMA_READ:
wc->opcode = IB_WC_RDMA_READ;
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
break;
case MLX5_OPCODE_ATOMIC_CS:
wc->opcode = IB_WC_COMP_SWAP;
wc->byte_len = 8;
break;
case MLX5_OPCODE_ATOMIC_FA:
wc->opcode = IB_WC_FETCH_ADD;
wc->byte_len = 8;
break;
case MLX5_OPCODE_ATOMIC_MASKED_CS:
wc->opcode = IB_WC_MASKED_COMP_SWAP;
wc->byte_len = 8;
break;
case MLX5_OPCODE_ATOMIC_MASKED_FA:
wc->opcode = IB_WC_MASKED_FETCH_ADD;
wc->byte_len = 8;
break;
case MLX5_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
case MLX5_OPCODE_UMR:
wc->opcode = get_umr_comp(wq, idx);
break;
}
}
enum {
MLX5_GRH_IN_BUFFER = 1,
MLX5_GRH_IN_CQE = 2,
};
static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
struct mlx5_ib_srq *srq;
struct mlx5_ib_wq *wq;
u16 wqe_ctr;
u8 g;
if (qp->ibqp.srq || qp->ibqp.xrcd) {
struct mlx5_core_srq *msrq = NULL;
if (qp->ibqp.xrcd) {
msrq = mlx5_core_get_srq(&dev->mdev,
be32_to_cpu(cqe->srqn));
srq = to_mibsrq(msrq);
} else {
srq = to_msrq(qp->ibqp.srq);
}
if (srq) {
wqe_ctr = be16_to_cpu(cqe->wqe_counter);
wc->wr_id = srq->wrid[wqe_ctr];
mlx5_ib_free_srq_wqe(srq, wqe_ctr);
if (msrq && atomic_dec_and_test(&msrq->refcount))
complete(&msrq->free);
}
} else {
wq = &qp->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
}
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
switch (cqe->op_own >> 4) {
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
wc->ex.imm_data = cqe->imm_inval_pkey;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
break;
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
wc->ex.imm_data = cqe->imm_inval_pkey;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
break;
}
wc->slid = be16_to_cpu(cqe->slid);
wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
}
static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
{
__be32 *p = (__be32 *)cqe;
int i;
mlx5_ib_warn(dev, "dump error cqe\n");
for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
be32_to_cpu(p[1]), be32_to_cpu(p[2]),
be32_to_cpu(p[3]));
}
static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
struct mlx5_err_cqe *cqe,
struct ib_wc *wc)
{
int dump = 1;
switch (cqe->syndrome) {
case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
wc->status = IB_WC_LOC_LEN_ERR;
break;
case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
wc->status = IB_WC_LOC_QP_OP_ERR;
break;
case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
wc->status = IB_WC_LOC_PROT_ERR;
break;
case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
dump = 0;
wc->status = IB_WC_WR_FLUSH_ERR;
break;
case MLX5_CQE_SYNDROME_MW_BIND_ERR:
wc->status = IB_WC_MW_BIND_ERR;
break;
case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
wc->status = IB_WC_BAD_RESP_ERR;
break;
case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
wc->status = IB_WC_LOC_ACCESS_ERR;
break;
case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
wc->status = IB_WC_REM_INV_REQ_ERR;
break;
case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
wc->status = IB_WC_REM_ACCESS_ERR;
break;
case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
wc->status = IB_WC_REM_OP_ERR;
break;
case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
wc->status = IB_WC_RETRY_EXC_ERR;
dump = 0;
break;
case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
wc->status = IB_WC_RNR_RETRY_EXC_ERR;
dump = 0;
break;
case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
wc->status = IB_WC_REM_ABORT_ERR;
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
wc->vendor_err = cqe->vendor_err_synd;
if (dump)
dump_cqe(dev, cqe);
}
static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
{
/* TBD: waiting decision
*/
return 0;
}
static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
{
struct mlx5_wqe_data_seg *dpseg;
void *addr;
dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_atomic_seg);
addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
return addr;
}
static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
uint16_t idx)
{
void *addr;
int byte_count;
int i;
if (!is_atomic_response(qp, idx))
return;
byte_count = be32_to_cpu(cqe64->byte_cnt);
addr = mlx5_get_atomic_laddr(qp, idx);
if (byte_count == 4) {
*(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
} else {
for (i = 0; i < byte_count; i += 8) {
*(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
addr += 8;
}
}
return;
}
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
u16 tail, u16 head)
{
int idx;
do {
idx = tail & (qp->sq.wqe_cnt - 1);
handle_atomic(qp, cqe64, idx);
if (idx == head)
break;
tail = qp->sq.w_list[idx].next;
} while (1);
tail = qp->sq.w_list[idx].next;
qp->sq.last_poll = tail;
}
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_err_cqe *err_cqe;
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
void *cqe;
int idx;
cqe = next_cqe_sw(cq);
if (!cqe)
return -EAGAIN;
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
++cq->mcq.cons_index;
/* Make sure we read CQ entry contents after we've checked the
* ownership bit.
*/
rmb();
/* TBD: resize CQ */
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
/* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.
*/
mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
if (unlikely(!mqp)) {
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
cq->mcq.cqn, qpn);
return -EINVAL;
}
*cur_qp = to_mibqp(mqp);
}
wc->qp = &(*cur_qp)->ibqp;
opcode = cqe64->op_own >> 4;
switch (opcode) {
case MLX5_CQE_REQ:
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
idx = wqe_ctr & (wq->wqe_cnt - 1);
handle_good_req(wc, cqe64, wq, idx);
handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
wc->wr_id = wq->wrid[idx];
wq->tail = wq->wqe_head[idx] + 1;
wc->status = IB_WC_SUCCESS;
break;
case MLX5_CQE_RESP_WR_IMM:
case MLX5_CQE_RESP_SEND:
case MLX5_CQE_RESP_SEND_IMM:
case MLX5_CQE_RESP_SEND_INV:
handle_responder(wc, cqe64, *cur_qp);
wc->status = IB_WC_SUCCESS;
break;
case MLX5_CQE_RESIZE_CQ:
break;
case MLX5_CQE_REQ_ERR:
case MLX5_CQE_RESP_ERR:
err_cqe = (struct mlx5_err_cqe *)cqe64;
mlx5_handle_error_cqe(dev, err_cqe, wc);
mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
opcode == MLX5_CQE_REQ_ERR ?
"Requestor" : "Responder", cq->mcq.cqn);
mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
err_cqe->syndrome, err_cqe->vendor_err_synd);
if (opcode == MLX5_CQE_REQ_ERR) {
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
idx = wqe_ctr & (wq->wqe_cnt - 1);
wc->wr_id = wq->wrid[idx];
wq->tail = wq->wqe_head[idx] + 1;
} else {
struct mlx5_ib_srq *srq;
if ((*cur_qp)->ibqp.srq) {
srq = to_msrq((*cur_qp)->ibqp.srq);
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
wc->wr_id = srq->wrid[wqe_ctr];
mlx5_ib_free_srq_wqe(srq, wqe_ctr);
} else {
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
}
}
break;
}
return 0;
}
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
unsigned long flags;
int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags);
for (npolled = 0; npolled < num_entries; npolled++) {
err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
if (err)
break;
}
if (npolled)
mlx5_cq_set_ci(&cq->mcq);
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
return npolled;
else
return err;
}
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
return 0;
}
static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
int nent, int cqe_size)
{
int err;
err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
return err;
buf->cqe_size = cqe_size;
return 0;
}
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_buf_free(&dev->mdev, &buf->buf);
}
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd;
int page_shift;
int npages;
int ncont;
int err;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
return -EINVAL;
*cqe_size = ucmd.cqe_size;
cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
entries * ucmd.cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
}
err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
&cq->db);
if (err)
goto err_umem;
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
&ncont, NULL);
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
}
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
(*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
*index = to_mucontext(context)->uuari.uars[0].index;
return 0;
err_db:
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
err_umem:
ib_umem_release(cq->buf.umem);
return err;
}
static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
{
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
ib_umem_release(cq->buf.umem);
}
static void init_cq_buf(struct mlx5_ib_cq *cq, int nent)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < nent; i++) {
cqe = get_cqe(cq, i);
cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64;
cqe64->op_own = 0xf1;
}
}
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size,
struct mlx5_create_cq_mbox_in **cqb,
int *index, int *inlen)
{
int err;
err = mlx5_db_alloc(&dev->mdev, &cq->db);
if (err)
return err;
cq->mcq.set_ci_db = cq->db.db;
cq->mcq.arm_db = cq->db.db + 1;
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
cq->mcq.cqe_sz = cqe_size;
err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
if (err)
goto err_db;
init_cq_buf(cq, entries);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
}
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT;
*index = dev->mdev.priv.uuari.uars[0].index;
return 0;
err_buf:
free_cq_buf(dev, &cq->buf);
err_db:
mlx5_db_free(&dev->mdev, &cq->db);
return err;
}
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
free_cq_buf(dev, &cq->buf);
mlx5_db_free(&dev->mdev, &cq->db);
}
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int vector, struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
int cqe_size;
int irqn;
int eqn;
int err;
entries = roundup_pow_of_two(entries + 1);
if (entries < 1 || entries > dev->mdev.caps.max_cqes)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
return ERR_PTR(-ENOMEM);
cq->ibcq.cqe = entries - 1;
mutex_init(&cq->resize_mutex);
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
if (context) {
err = create_cq_user(dev, udata, context, cq, entries,
&cqb, &cqe_size, &index, &inlen);
if (err)
goto err_create;
} else {
/* for now choose 64 bytes till we have a proper interface */
cqe_size = 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
goto err_create;
}
cq->cqe_size = cqe_size;
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev, vector, &eqn, &irqn);
if (err)
goto err_cqb;
cqb->ctx.c_eqn = cpu_to_be16(eqn);
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
if (err)
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
cq->mcq.irqn = irqn;
cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
goto err_cmd;
}
mlx5_vfree(cqb);
return &cq->ibcq;
err_cmd:
mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
err_cqb:
mlx5_vfree(cqb);
if (context)
destroy_cq_user(cq, context);
else
destroy_cq_kernel(dev, cq);
err_create:
kfree(cq);
return ERR_PTR(err);
}
int mlx5_ib_destroy_cq(struct ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
struct ib_ucontext *context = NULL;
if (cq->uobject)
context = cq->uobject->context;
mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
if (context)
destroy_cq_user(mcq, context);
else
destroy_cq_kernel(dev, mcq);
kfree(mcq);
return 0;
}
static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq,
u32 rsn)
{
u32 lrsn;
if (srq)
lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff;
else
lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
return rsn == lrsn;
}
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
{
struct mlx5_cqe64 *cqe64, *dest64;
void *cqe, *dest;
u32 prod_index;
int nfreed = 0;
u8 owner_bit;
if (!cq)
return;
/* First we need to find the current producer index, so we
* know where to start cleaning from. It doesn't matter if HW
* adds new entries after this loop -- the QP we're worried
* about is already in RESET, so the new entries won't come
* from our QP and therefore don't need to be checked.
*/
for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
break;
/* Now sweep backwards through the CQ, removing CQ entries
* that match our QP by copying older entries on top of them.
*/
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
if (is_equal_rsn(cqe64, srq, rsn)) {
if (srq)
mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
memcpy(dest, cqe, cq->mcq.cqe_sz);
dest64->op_own = owner_bit |
(dest64->op_own & ~MLX5_CQE_OWNER_MASK);
}
}
if (nfreed) {
cq->mcq.cons_index += nfreed;
/* Make sure update of buffer contents is done before
* updating consumer index.
*/
wmb();
mlx5_cq_set_ci(&cq->mcq);
}
}
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
{
if (!cq)
return;
spin_lock_irq(&cq->lock);
__mlx5_ib_cq_clean(cq, qpn, srq);
spin_unlock_irq(&cq->lock);
}
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
return -ENOSYS;
}
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
return -ENOSYS;
}
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
{
struct mlx5_ib_cq *cq;
if (!ibcq)
return 128;
cq = to_mcq(ibcq);
return cq->cqe_size;
}

View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kref.h>
#include <linux/slab.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
struct mlx5_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
};
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db)
{
struct mlx5_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0;
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
if (page->user_virt == (virt & PAGE_MASK))
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
goto out;
}
list_add(&page->list, &context->db_page_list);
found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
out:
mutex_unlock(&context->db_page_mutex);
return err;
}
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
{
mutex_lock(&context->db_page_mutex);
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
mutex_unlock(&context->db_page_mutex);
}

View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/cmd.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
#include "mlx5_ib.h"
enum {
MLX5_IB_VENDOR_CLASS1 = 0x9,
MLX5_IB_VENDOR_CLASS2 = 0xa
};
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad)
{
u8 op_modifier = 0;
/* Key check traps can't be generated unless we have in_wc to
* tell us where to send the trap.
*/
if (ignore_mkey || !in_wc)
op_modifier |= 0x1;
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
}
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
u16 slid;
int err;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/* Don't process SMInfo queries -- the SMA can't handle them.
*/
if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
return IB_MAD_RESULT_SUCCESS;
} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
} else {
return IB_MAD_RESULT_SUCCESS;
}
err = mlx5_MAD_IFC(to_mdev(ibdev),
mad_flags & IB_MAD_IGNORE_MKEY,
mad_flags & IB_MAD_IGNORE_BKEY,
port_num, in_wc, in_grh, in_mad, out_mad);
if (err)
return IB_MAD_RESULT_FAILURE;
/* set return bit in status of directed route responses */
if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
u16 packet_error;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
packet_error = be16_to_cpu(out_mad->status);
dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
kfree(in_mad);
kfree(out_mad);
return err;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,162 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
* @count: number of PAGE_SIZE pages covered by umem
* @shift: page shift for the compound pages found in the region
* @ncont: number of compund pages
* @order: log2 of the number of compound pages
*/
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order)
{
struct ib_umem_chunk *chunk;
unsigned long tmp;
unsigned long m;
int i, j, k;
u64 base = 0;
int p = 0;
int skip;
int mask;
u64 len;
u64 pfn;
addr = addr >> PAGE_SHIFT;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m;
mask = skip - 1;
i = 0;
list_for_each_entry(chunk, &umem->chunk_list, list)
for (j = 0; j < chunk->nmap; j++) {
len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT;
pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT;
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
m = min(m, find_first_bit(&tmp, sizeof(tmp)));
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
}
}
p++;
i++;
}
}
if (i) {
m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
if (order)
*order = ilog2(roundup_pow_of_two(i) >> m);
*ncont = DIV_ROUND_UP(i, (1 << m));
} else {
m = 0;
if (order)
*order = 0;
*ncont = 0;
}
*shift = PAGE_SHIFT + m;
*count = i;
}
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int umr)
{
int shift = page_shift - PAGE_SHIFT;
int mask = (1 << shift) - 1;
struct ib_umem_chunk *chunk;
int i, j, k;
u64 cur = 0;
u64 base;
int len;
i = 0;
list_for_each_entry(chunk, &umem->chunk_list, list)
for (j = 0; j < chunk->nmap; j++) {
len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT;
base = sg_dma_address(&chunk->page_list[j]);
for (k = 0; k < len; k++) {
if (!(i & mask)) {
cur = base + (k << PAGE_SHIFT);
if (umr)
cur |= 3;
pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
i >> shift, be64_to_cpu(pas[i >> shift]));
} else
mlx5_ib_dbg(dev, "=====> 0x%llx\n",
base + (k << PAGE_SHIFT));
i++;
}
}
}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{
u64 page_size;
u64 page_mask;
u64 off_size;
u64 off_mask;
u64 buf_off;
page_size = 1 << page_shift;
page_mask = page_size - 1;
buf_off = addr & page_mask;
off_size = page_size >> 6;
off_mask = off_size - 1;
if (buf_off & off_mask)
return -EINVAL;
*offset = buf_off >> ilog2(off_size);
return 0;
}

View File

@ -0,0 +1,545 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_IB_H
#define MLX5_IB_H
#include <linux/kernel.h>
#include <linux/sched.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/types.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
__LINE__, current->pid, ##arg)
#define mlx5_ib_err(dev, format, arg...) \
pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
__LINE__, current->pid, ##arg)
#define mlx5_ib_warn(dev, format, arg...) \
pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
__LINE__, current->pid, ##arg)
enum {
MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_REGULAR_PAGE = 0,
MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */
};
enum {
MLX5_RES_SCAT_DATA32_CQE = 0x1,
MLX5_RES_SCAT_DATA64_CQE = 0x2,
MLX5_REQ_SCAT_DATA32_CQE = 0x11,
MLX5_REQ_SCAT_DATA64_CQE = 0x22,
};
enum mlx5_ib_latency_class {
MLX5_IB_LATENCY_CLASS_LOW,
MLX5_IB_LATENCY_CLASS_MEDIUM,
MLX5_IB_LATENCY_CLASS_HIGH,
MLX5_IB_LATENCY_CLASS_FAST_PATH
};
enum mlx5_ib_mad_ifc_flags {
MLX5_MAD_IFC_IGNORE_MKEY = 1,
MLX5_MAD_IFC_IGNORE_BKEY = 2,
MLX5_MAD_IFC_NET_VIEW = 4,
};
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
/* protect doorbell record alloc/free
*/
struct mutex db_page_mutex;
struct mlx5_uuar_info uuari;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
}
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
u32 pa_lkey;
};
/* Use macros here so that don't have to duplicate
* enum ib_send_flags and enum ib_qp_type for low-level driver
*/
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
struct wr_list {
u16 opcode;
u16 next;
};
struct mlx5_ib_wq {
u64 *wrid;
u32 *wr_data;
struct wr_list *w_list;
unsigned *wqe_head;
u16 unsig_count;
/* serialize post to the work queue
*/
spinlock_t lock;
int wqe_cnt;
int max_post;
int max_gs;
int offset;
int wqe_shift;
unsigned head;
unsigned tail;
u16 cur_post;
u16 last_poll;
void *qend;
};
enum {
MLX5_QP_USER,
MLX5_QP_KERNEL,
MLX5_QP_EMPTY
};
struct mlx5_ib_qp {
struct ib_qp ibqp;
struct mlx5_core_qp mqp;
struct mlx5_buf buf;
struct mlx5_db db;
struct mlx5_ib_wq rq;
u32 doorbell_qpn;
u8 sq_signal_bits;
u8 fm_cache;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx5_ib_wq sq;
struct ib_umem *umem;
int buf_size;
/* serialize qp state modifications
*/
struct mutex mutex;
u16 xrcdn;
u32 flags;
u8 port;
u8 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
u8 state;
int mlx_type;
int wq_sig;
int scat_cqe;
int max_inline_data;
struct mlx5_bf *bf;
int has_rq;
/* only for user space QPs. For kernel
* we have it from the bf object
*/
int uuarn;
int create_type;
u32 pa_lkey;
};
struct mlx5_ib_cq_buf {
struct mlx5_buf buf;
struct ib_umem *umem;
int cqe_size;
};
enum mlx5_ib_qp_flags {
MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
};
struct mlx5_shared_mr_info {
int mr_id;
struct ib_umem *umem;
};
struct mlx5_ib_cq {
struct ib_cq ibcq;
struct mlx5_core_cq mcq;
struct mlx5_ib_cq_buf buf;
struct mlx5_db db;
/* serialize access to the CQ
*/
spinlock_t lock;
/* protect resize cq
*/
struct mutex resize_mutex;
struct mlx5_ib_cq_resize *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
};
struct mlx5_ib_srq {
struct ib_srq ibsrq;
struct mlx5_core_srq msrq;
struct mlx5_buf buf;
struct mlx5_db db;
u64 *wrid;
/* protect SRQ hanlding
*/
spinlock_t lock;
int head;
int tail;
u16 wqe_ctr;
struct ib_umem *umem;
/* serialize arming a SRQ
*/
struct mutex mutex;
int wq_sig;
};
struct mlx5_ib_xrcd {
struct ib_xrcd ibxrcd;
u32 xrcdn;
};
struct mlx5_ib_mr {
struct ib_mr ibmr;
struct mlx5_core_mr mmr;
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
int order;
int umred;
__be64 *pas;
dma_addr_t dma;
int npages;
struct completion done;
enum ib_wc_status status;
};
struct mlx5_ib_fast_reg_page_list {
struct ib_fast_reg_page_list ibfrpl;
__be64 *mapped_page_list;
dma_addr_t map;
};
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
struct ib_mr *mr;
/* control access to UMR QP
*/
struct semaphore sem;
};
enum {
MLX5_FMR_INVALID,
MLX5_FMR_VALID,
MLX5_FMR_BUSY,
};
struct mlx5_ib_fmr {
struct ib_fmr ibfmr;
struct mlx5_core_mr mr;
int access_flags;
int state;
/* protect fmr state
*/
spinlock_t lock;
u64 wrid;
struct ib_send_wr wr[2];
u8 page_shift;
struct ib_fast_reg_page_list page_list;
};
struct mlx5_cache_ent {
struct list_head head;
/* sync access to the cahce entry
*/
spinlock_t lock;
struct dentry *dir;
char name[4];
u32 order;
u32 size;
u32 cur;
u32 miss;
u32 limit;
struct dentry *fsize;
struct dentry *fcur;
struct dentry *fmiss;
struct dentry *flimit;
struct mlx5_ib_dev *dev;
struct work_struct work;
struct delayed_work dwork;
};
struct mlx5_mr_cache {
struct workqueue_struct *wq;
struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
int stopped;
struct dentry *root;
unsigned long last_add;
};
struct mlx5_ib_resources {
struct ib_cq *c0;
struct ib_xrcd *x0;
struct ib_xrcd *x1;
struct ib_pd *p0;
struct ib_srq *s0;
};
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev mdev;
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
struct list_head eqs_list;
int num_ports;
int num_comp_vectors;
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
bool ib_active;
struct umr_common umrc;
/* sync used page count stats
*/
spinlock_t mr_lock;
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
{
return container_of(mcq, struct mlx5_ib_cq, mcq);
}
static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
{
return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
}
static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
}
static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
}
static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct mlx5_ib_cq, ibcq);
}
static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
{
return container_of(mqp, struct mlx5_ib_qp, mqp);
}
static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct mlx5_ib_pd, ibpd);
}
static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
}
static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct mlx5_ib_qp, ibqp);
}
static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
{
return container_of(msrq, struct mlx5_ib_srq, msrq);
}
static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mlx5_ib_mr, ibmr);
}
static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
{
return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
}
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
};
static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
{
return container_of(ibah, struct mlx5_ib_ah, ibah);
}
static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
{
return container_of(dev, struct mlx5_ib_dev, mdev);
}
static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
{
return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
}
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad);
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx5_ib_destroy_ah(struct ib_ah *ah);
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
int mlx5_ib_destroy_qp(struct ib_qp *qp);
int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int vector, struct ib_ucontext *context,
struct ib_udata *udata);
int mlx5_ib_destroy_cq(struct ib_cq *cq);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
int max_page_list_len);
struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
int page_list_len);
void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
struct ib_fmr_attr *fmr_attr);
int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int npages, u64 iova);
int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order);
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int umr);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
mad->class_version = 1;
mad->method = IB_MGMT_METHOD_GET;
}
static inline u8 convert_access(int acc)
{
return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
(acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
MLX5_PERM_LOCAL_READ;
}
#endif /* MLX5_IB_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,473 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/slab.h>
#include <rdma/ib_umem.h>
#include "mlx5_ib.h"
#include "user.h"
/* not supported currently */
static int srq_signature;
static void *get_wqe(struct mlx5_ib_srq *srq, int n)
{
return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
}
static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
{
struct ib_event event;
struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
if (ibsrq->event_handler) {
event.device = ibsrq->device;
event.element.srq = ibsrq;
switch (type) {
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
event.event = IB_EVENT_SRQ_LIMIT_REACHED;
break;
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
event.event = IB_EVENT_SRQ_ERR;
break;
default:
pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
type, srq->srqn);
return;
}
ibsrq->event_handler(&event, ibsrq->srq_context);
}
}
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
struct ib_udata *udata, int buf_size, int *inlen)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd;
int err;
int npages;
int page_shift;
int ncont;
u32 offset;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
mlx5_ib_dbg(dev, "failed copy udata\n");
return -EFAULT;
}
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
0, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
return err;
}
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
&page_shift, &ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
&offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
if (!(*in)) {
err = -ENOMEM;
goto err_umem;
}
mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
goto err_in;
}
(*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
return 0;
err_in:
mlx5_vfree(*in);
err_umem:
ib_umem_release(srq->umem);
return err;
}
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
int *inlen)
{
int err;
int i;
struct mlx5_wqe_srq_next_seg *next;
int page_shift;
int npages;
err = mlx5_db_alloc(&dev->mdev, &srq->db);
if (err) {
mlx5_ib_warn(dev, "alloc dbell rec failed\n");
return err;
}
*srq->db.db = 0;
if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
goto err_db;
}
page_shift = srq->buf.page_shift;
srq->head = 0;
srq->tail = srq->msrq.max - 1;
srq->wqe_ctr = 0;
for (i = 0; i < srq->msrq.max; i++) {
next = get_wqe(srq, i);
next->next_wqe_index =
cpu_to_be16((i + 1) & (srq->msrq.max - 1));
}
npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
buf_size, page_shift, srq->buf.npages, npages);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
err = -ENOMEM;
goto err_buf;
}
mlx5_fill_page_array(&srq->buf, (*in)->pas);
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
(unsigned long)(srq->msrq.max * sizeof(u64)));
err = -ENOMEM;
goto err_in;
}
srq->wq_sig = !!srq_signature;
(*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT;
return 0;
err_in:
mlx5_vfree(*in);
err_buf:
mlx5_buf_free(&dev->mdev, &srq->buf);
err_db:
mlx5_db_free(&dev->mdev, &srq->db);
return err;
}
static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
{
mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
ib_umem_release(srq->umem);
}
static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
{
kfree(srq->wrid);
mlx5_buf_free(&dev->mdev, &srq->buf);
mlx5_db_free(&dev->mdev, &srq->db);
}
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_srq *srq;
int desc_size;
int buf_size;
int err;
struct mlx5_create_srq_mbox_in *uninitialized_var(in);
int uninitialized_var(inlen);
int is_xrc;
u32 flgs, xrcdn;
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
dev->mdev.caps.max_srq_wqes);
return ERR_PTR(-EINVAL);
}
srq = kmalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
srq->msrq.max_gs = init_attr->attr.max_sge;
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(int, 32, desc_size);
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
else
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
pd->uobject ? "user" : "kernel", err);
goto err_srq;
}
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;
if (is_xrc) {
xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
} else if (init_attr->srq_type == IB_SRQT_BASIC) {
xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
}
in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen);
mlx5_vfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_srq;
}
mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
srq->msrq.event = mlx5_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
if (pd->uobject)
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
goto err_core;
}
init_attr->attr.max_wr = srq->msrq.max - 1;
return &srq->ibsrq;
err_core:
mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
if (pd->uobject)
destroy_srq_user(pd, srq);
else
destroy_srq_kernel(dev, srq);
err_srq:
kfree(srq);
return ERR_PTR(err);
}
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
/* We don't support resizing SRQs yet */
if (attr_mask & IB_SRQ_MAX_WR)
return -EINVAL;
if (attr_mask & IB_SRQ_LIMIT) {
if (attr->srq_limit >= srq->msrq.max)
return -EINVAL;
mutex_lock(&srq->mutex);
ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1);
mutex_unlock(&srq->mutex);
if (ret)
return ret;
}
return 0;
}
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
struct mlx5_query_srq_mbox_out *out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return -ENOMEM;
ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out);
if (ret)
goto out_box;
srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
srq_attr->max_wr = srq->msrq.max - 1;
srq_attr->max_sge = srq->msrq.max_gs;
out_box:
kfree(out);
return ret;
}
int mlx5_ib_destroy_srq(struct ib_srq *srq)
{
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq);
if (srq->uobject) {
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem);
} else {
kfree(msrq->wrid);
mlx5_buf_free(&dev->mdev, &msrq->buf);
mlx5_db_free(&dev->mdev, &msrq->db);
}
kfree(srq);
return 0;
}
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
{
struct mlx5_wqe_srq_next_seg *next;
/* always called with interrupts disabled. */
spin_lock(&srq->lock);
next = get_wqe(srq, srq->tail);
next->next_wqe_index = cpu_to_be16(wqe_index);
srq->tail = wqe_index;
spin_unlock(&srq->lock);
}
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;
struct mlx5_wqe_data_seg *scat;
unsigned long flags;
int err = 0;
int nreq;
int i;
spin_lock_irqsave(&srq->lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
err = -EINVAL;
*bad_wr = wr;
break;
}
if (unlikely(srq->head == srq->tail)) {
err = -ENOMEM;
*bad_wr = wr;
break;
}
srq->wrid[srq->head] = wr->wr_id;
next = get_wqe(srq, srq->head);
srq->head = be16_to_cpu(next->next_wqe_index);
scat = (struct mlx5_wqe_data_seg *)(next + 1);
for (i = 0; i < wr->num_sge; i++) {
scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
}
if (i < srq->msrq.max_avail_gather) {
scat[i].byte_count = 0;
scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
scat[i].addr = 0;
}
}
if (likely(nreq)) {
srq->wqe_ctr += nreq;
/* Make sure that descriptors are written before
* doorbell record.
*/
wmb();
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
spin_unlock_irqrestore(&srq->lock, flags);
return err;
}

View File

@ -0,0 +1,121 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_IB_USER_H
#define MLX5_IB_USER_H
#include <linux/types.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
};
enum {
MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
};
/* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
#define MLX5_IB_UVERBS_ABI_VERSION 1
/* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct mlx5_ib_alloc_ucontext_req {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
};
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
__u32 tot_uuars;
__u32 cache_line_size;
__u16 max_sq_desc_sz;
__u16 max_rq_desc_sz;
__u32 max_send_wqebb;
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
__u16 reserved;
};
struct mlx5_ib_alloc_pd_resp {
__u32 pdn;
};
struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
__u32 cqe_size;
};
struct mlx5_ib_create_cq_resp {
__u32 cqn;
__u32 reserved;
};
struct mlx5_ib_resize_cq {
__u64 buf_addr;
};
struct mlx5_ib_create_srq {
__u64 buf_addr;
__u64 db_addr;
__u32 flags;
};
struct mlx5_ib_create_srq_resp {
__u32 srqn;
__u32 reserved;
};
struct mlx5_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 flags;
};
struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
};
#endif /* MLX5_IB_USER_H */

View File

@ -19,5 +19,6 @@ config NET_VENDOR_MELLANOX
if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
endif # NET_VENDOR_MELLANOX

View File

@ -3,3 +3,4 @@
#
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/

View File

@ -0,0 +1,18 @@
#
# Mellanox driver configuration
#
config MLX5_CORE
tristate
depends on PCI && X86
default n
config MLX5_DEBUG
bool "Verbose debugging output" if (MLX5_CORE && EXPERT)
depends on MLX5_CORE
default y
---help---
This option causes debugging code to be compiled into the
mlx5_core driver. The output can be turned on via the
debug_mask module parameter (which can also be set after
the driver is loaded through sysfs).

View File

@ -0,0 +1,5 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o

View File

@ -0,0 +1,238 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. If the
* requested size is > max_direct, we split the allocation into
* multiple pages, so we don't require too much contiguous memory.
*/
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
struct mlx5_buf *buf)
{
dma_addr_t t;
buf->size = size;
if (size <= max_direct) {
buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
} else {
int i;
buf->direct.buf = NULL;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; i++) {
buf->page_list[i].buf =
dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
}
if (BITS_PER_LONG == 64) {
struct page **pages;
pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; i++)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
}
}
return 0;
err_free:
mlx5_buf_free(dev, buf);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
int i;
if (buf->nbufs == 1)
dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
buf->direct.map);
else {
if (BITS_PER_LONG == 64 && buf->direct.buf)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; i++)
if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
{
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
&pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
}
return pgdir;
}
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
int offset;
int i;
i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
if (i >= MLX5_DB_PER_PAGE)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
offset = db->index * L1_CACHE_BYTES;
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
return 0;
}
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
struct mlx5_db_pgdir *pgdir;
int ret = 0;
mutex_lock(&dev->priv.pgdir_mutex);
list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out;
pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
if (!pgdir) {
ret = -ENOMEM;
goto out;
}
list_add(&pgdir->list, &dev->priv.pgdir_list);
/* This should never fail -- we just allocated an empty page: */
WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
out:
mutex_unlock(&dev->priv.pgdir_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
kfree(db->u.pgdir);
}
mutex_unlock(&dev->priv.pgdir_mutex);
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
{
u64 addr;
int i;
for (i = 0; i < buf->npages; i++) {
if (buf->nbufs == 1)
addr = buf->direct.map + (i << buf->page_shift);
else
addr = buf->page_list[i].map;
pas[i] = cpu_to_be64(addr);
}
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,224 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
{
struct mlx5_core_cq *cq;
struct mlx5_cq_table *table = &dev->priv.cq_table;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
atomic_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
return;
}
++cq->arm_sn;
cq->comp(cq);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_core_cq *cq;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (cq)
atomic_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
return;
}
cq->event(cq, event_type);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen)
{
int err;
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_create_cq_mbox_out out;
struct mlx5_destroy_cq_mbox_in din;
struct mlx5_destroy_cq_mbox_out dout;
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
memset(&out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
cq->cons_index = 0;
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
spin_unlock_irq(&table->lock);
if (err)
goto err_cmd;
cq->pid = current->pid;
err = mlx5_debug_cq_add(dev, cq);
if (err)
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn);
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_destroy_cq_mbox_in in;
struct mlx5_destroy_cq_mbox_out out;
struct mlx5_core_cq *tmp;
int err;
spin_lock_irq(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
spin_unlock_irq(&table->lock);
if (!tmp) {
mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
return -EINVAL;
}
if (tmp != cq) {
mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
return -EINVAL;
}
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
return 0;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out)
{
struct mlx5_query_cq_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, sizeof(*out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int type, struct mlx5_cq_modify_params *params)
{
return -ENOSYS;
}
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
int err;
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
err = mlx5_cq_debugfs_init(dev);
return err;
}
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
{
mlx5_cq_debugfs_cleanup(dev);
}

View File

@ -0,0 +1,587 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
enum {
QP_PID,
QP_STATE,
QP_XPORT,
QP_MTU,
QP_N_RECV,
QP_RECV_SZ,
QP_N_SEND,
QP_LOG_PG_SZ,
QP_RQPN,
};
static char *qp_fields[] = {
[QP_PID] = "pid",
[QP_STATE] = "state",
[QP_XPORT] = "transport",
[QP_MTU] = "mtu",
[QP_N_RECV] = "num_recv",
[QP_RECV_SZ] = "rcv_wqe_sz",
[QP_N_SEND] = "num_send",
[QP_LOG_PG_SZ] = "log2_page_sz",
[QP_RQPN] = "remote_qpn",
};
enum {
EQ_NUM_EQES,
EQ_INTR,
EQ_LOG_PG_SZ,
};
static char *eq_fields[] = {
[EQ_NUM_EQES] = "num_eqes",
[EQ_INTR] = "intr",
[EQ_LOG_PG_SZ] = "log_page_size",
};
enum {
CQ_PID,
CQ_NUM_CQES,
CQ_LOG_PG_SZ,
};
static char *cq_fields[] = {
[CQ_PID] = "pid",
[CQ_NUM_CQES] = "num_cqes",
[CQ_LOG_PG_SZ] = "log_page_size",
};
struct dentry *mlx5_debugfs_root;
EXPORT_SYMBOL(mlx5_debugfs_root);
void mlx5_register_debugfs(void)
{
mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
if (IS_ERR_OR_NULL(mlx5_debugfs_root))
mlx5_debugfs_root = NULL;
}
void mlx5_unregister_debugfs(void)
{
debugfs_remove(mlx5_debugfs_root);
}
int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return 0;
atomic_set(&dev->num_qps, 0);
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
if (!dev->priv.qp_debugfs)
return -ENOMEM;
return 0;
}
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return;
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return 0;
dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
if (!dev->priv.eq_debugfs)
return -ENOMEM;
return 0;
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return;
debugfs_remove_recursive(dev->priv.eq_debugfs);
}
static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_cmd_stats *stats;
u64 field = 0;
int ret;
int err;
char tbuf[22];
if (*pos)
return 0;
stats = filp->private_data;
spin_lock(&stats->lock);
if (stats->n)
field = stats->sum / stats->n;
spin_unlock(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
if (ret > 0) {
err = copy_to_user(buf, tbuf, ret);
if (err)
return err;
}
*pos += ret;
return ret;
}
static ssize_t average_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cmd_stats *stats;
stats = filp->private_data;
spin_lock(&stats->lock);
stats->sum = 0;
stats->n = 0;
spin_unlock(&stats->lock);
*pos += count;
return count;
}
static const struct file_operations stats_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = average_read,
.write = average_write,
};
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
struct dentry **cmd;
const char *namep;
int err;
int i;
if (!mlx5_debugfs_root)
return 0;
cmd = &dev->priv.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
if (!*cmd)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
stats->root = debugfs_create_dir(namep, *cmd);
if (!stats->root) {
mlx5_core_warn(dev, "failed adding command %d\n",
i);
err = -ENOMEM;
goto out;
}
stats->avg = debugfs_create_file("average", 0400,
stats->root, stats,
&stats_fops);
if (!stats->avg) {
mlx5_core_warn(dev, "failed creating debugfs file\n");
err = -ENOMEM;
goto out;
}
stats->count = debugfs_create_u64("n", 0400,
stats->root,
&stats->n);
if (!stats->count) {
mlx5_core_warn(dev, "failed creating debugfs file\n");
err = -ENOMEM;
goto out;
}
}
}
return 0;
out:
debugfs_remove_recursive(dev->priv.cmdif_debugfs);
return err;
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return;
debugfs_remove_recursive(dev->priv.cmdif_debugfs);
}
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return 0;
dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
if (!dev->priv.cq_debugfs)
return -ENOMEM;
return 0;
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
if (!mlx5_debugfs_root)
return;
debugfs_remove_recursive(dev->priv.cq_debugfs);
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index)
{
struct mlx5_query_qp_mbox_out *out;
struct mlx5_qp_context *ctx;
u64 param = 0;
int err;
int no_sq;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return param;
err = mlx5_core_qp_query(dev, qp, out, sizeof(*out));
if (err) {
mlx5_core_warn(dev, "failed to query qp\n");
goto out;
}
ctx = &out->ctx;
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
param = be32_to_cpu(ctx->flags) >> 28;
break;
case QP_XPORT:
param = (be32_to_cpu(ctx->flags) >> 16) & 0xff;
break;
case QP_MTU:
param = ctx->mtu_msgmax >> 5;
break;
case QP_N_RECV:
param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
break;
case QP_RECV_SZ:
param = 1 << ((ctx->rq_size_stride & 7) + 4);
break;
case QP_N_SEND:
no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
if (!no_sq)
param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
else
param = 0;
break;
case QP_LOG_PG_SZ:
param = ((cpu_to_be32(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f);
param += 12;
break;
case QP_RQPN:
param = cpu_to_be32(ctx->log_pg_sz_remote_qpn) & 0xffffff;
break;
}
out:
kfree(out);
return param;
}
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
struct mlx5_query_eq_mbox_out *out;
struct mlx5_eq_context *ctx;
u64 param = 0;
int err;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return param;
ctx = &out->ctx;
err = mlx5_core_eq_query(dev, eq, out, sizeof(*out));
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
}
switch (index) {
case EQ_NUM_EQES:
param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
break;
case EQ_INTR:
param = ctx->intr;
break;
case EQ_LOG_PG_SZ:
param = (ctx->log_page_size & 0x1f) + 12;
break;
}
out:
kfree(out);
return param;
}
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index)
{
struct mlx5_query_cq_mbox_out *out;
struct mlx5_cq_context *ctx;
u64 param = 0;
int err;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return param;
ctx = &out->ctx;
err = mlx5_core_query_cq(dev, cq, out);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
}
switch (index) {
case CQ_PID:
param = cq->pid;
break;
case CQ_NUM_CQES:
param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f);
break;
case CQ_LOG_PG_SZ:
param = (ctx->log_pg_sz & 0x1f) + 12;
break;
}
out:
kfree(out);
return param;
}
static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_field_desc *desc;
struct mlx5_rsc_debug *d;
char tbuf[18];
u64 field;
int ret;
int err;
if (*pos)
return 0;
desc = filp->private_data;
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
case MLX5_DBG_RSC_QP:
field = qp_read_field(d->dev, d->object, desc->i);
break;
case MLX5_DBG_RSC_EQ:
field = eq_read_field(d->dev, d->object, desc->i);
break;
case MLX5_DBG_RSC_CQ:
field = cq_read_field(d->dev, d->object, desc->i);
break;
default:
mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
return -EINVAL;
}
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
if (ret > 0) {
err = copy_to_user(buf, tbuf, ret);
if (err)
return err;
}
*pos += ret;
return ret;
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = dbg_read,
};
static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
struct dentry *root, struct mlx5_rsc_debug **dbg,
int rsn, char **field, int nfile, void *data)
{
struct mlx5_rsc_debug *d;
char resn[32];
int err;
int i;
d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
if (!d)
return -ENOMEM;
d->dev = dev;
d->object = data;
d->type = type;
sprintf(resn, "0x%x", rsn);
d->root = debugfs_create_dir(resn, root);
if (!d->root) {
err = -ENOMEM;
goto out_free;
}
for (i = 0; i < nfile; i++) {
d->fields[i].i = i;
d->fields[i].dent = debugfs_create_file(field[i], 0400,
d->root, &d->fields[i],
&fops);
if (!d->fields[i].dent) {
err = -ENOMEM;
goto out_rem;
}
}
*dbg = d;
return 0;
out_rem:
debugfs_remove_recursive(d->root);
out_free:
kfree(d);
return err;
}
static void rem_res_tree(struct mlx5_rsc_debug *d)
{
debugfs_remove_recursive(d->root);
kfree(d);
}
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
int err;
if (!mlx5_debugfs_root)
return 0;
err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
&qp->dbg, qp->qpn, qp_fields,
ARRAY_SIZE(qp_fields), qp);
if (err)
qp->dbg = NULL;
return err;
}
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
if (!mlx5_debugfs_root)
return;
if (qp->dbg)
rem_res_tree(qp->dbg);
}
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
int err;
if (!mlx5_debugfs_root)
return 0;
err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
&eq->dbg, eq->eqn, eq_fields,
ARRAY_SIZE(eq_fields), eq);
if (err)
eq->dbg = NULL;
return err;
}
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
if (!mlx5_debugfs_root)
return;
if (eq->dbg)
rem_res_tree(eq->dbg);
}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
int err;
if (!mlx5_debugfs_root)
return 0;
err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
&cq->dbg, cq->cqn, cq_fields,
ARRAY_SIZE(cq_fields), cq);
if (err)
cq->dbg = NULL;
return err;
}
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
if (!mlx5_debugfs_root)
return;
if (cq->dbg)
rem_res_tree(cq->dbg);
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
enum {
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
MLX5_EQE_OWNER_INIT_VAL = 0x1,
};
enum {
MLX5_EQ_STATE_ARMED = 0x9,
MLX5_EQ_STATE_FIRED = 0xa,
MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
};
enum {
MLX5_NUM_SPARE_EQE = 0x80,
MLX5_NUM_ASYNC_EQE = 0x100,
MLX5_NUM_CMD_EQE = 32,
};
enum {
MLX5_EQ_DOORBEL_OFFSET = 0x40,
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
(1ull << MLX5_EVENT_TYPE_COMM_EST) | \
(1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
(1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
(1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
struct map_eq_in {
u64 mask;
u32 reserved;
u32 unmap_eqn;
};
struct cre_des_eq {
u8 reserved[15];
u8 eqn;
};
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
struct mlx5_destroy_eq_mbox_in in;
struct mlx5_destroy_eq_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
in.eqn = eqn;
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (!err)
goto ex;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
ex:
return err;
}
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
{
return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
}
static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
}
static const char *eqe_type_str(u8 type)
{
switch (type) {
case MLX5_EVENT_TYPE_COMP:
return "MLX5_EVENT_TYPE_COMP";
case MLX5_EVENT_TYPE_PATH_MIG:
return "MLX5_EVENT_TYPE_PATH_MIG";
case MLX5_EVENT_TYPE_COMM_EST:
return "MLX5_EVENT_TYPE_COMM_EST";
case MLX5_EVENT_TYPE_SQ_DRAINED:
return "MLX5_EVENT_TYPE_SQ_DRAINED";
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
case MLX5_EVENT_TYPE_CQ_ERROR:
return "MLX5_EVENT_TYPE_CQ_ERROR";
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
case MLX5_EVENT_TYPE_INTERNAL_ERROR:
return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
case MLX5_EVENT_TYPE_PORT_CHANGE:
return "MLX5_EVENT_TYPE_PORT_CHANGE";
case MLX5_EVENT_TYPE_GPIO_EVENT:
return "MLX5_EVENT_TYPE_GPIO_EVENT";
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
case MLX5_EVENT_TYPE_STALL_EVENT:
return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
default:
return "Unrecognized event";
}
}
static enum mlx5_dev_event port_subtype_event(u8 subtype)
{
switch (subtype) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
return MLX5_DEV_EVENT_PORT_DOWN;
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
return MLX5_DEV_EVENT_PORT_UP;
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
return MLX5_DEV_EVENT_PORT_INITIALIZED;
case MLX5_PORT_CHANGE_SUBTYPE_LID:
return MLX5_DEV_EVENT_LID_CHANGE;
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
return MLX5_DEV_EVENT_PKEY_CHANGE;
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
return MLX5_DEV_EVENT_GUID_CHANGE;
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
return MLX5_DEV_EVENT_CLIENT_REREG;
}
return -1;
}
static void eq_update_ci(struct mlx5_eq *eq, int arm)
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
__raw_writel((__force u32) cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
int eqes_found = 0;
int set_ci = 0;
u32 cqn;
u32 srqn;
u8 port;
while ((eqe = next_eqe_sw(eq))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
rmb();
mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(dev, cqn);
break;
case MLX5_EVENT_TYPE_PATH_MIG:
case MLX5_EVENT_TYPE_COMM_EST:
case MLX5_EVENT_TYPE_SQ_DRAINED:
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
mlx5_core_dbg(dev, "event %s(%d) arrived\n",
eqe_type_str(eqe->type), eqe->type);
mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff,
eqe->type);
break;
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
eqe_type_str(eqe->type), eqe->type, srqn);
mlx5_srq_event(dev, srqn, eqe->type);
break;
case MLX5_EVENT_TYPE_CMD:
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
break;
case MLX5_EVENT_TYPE_PORT_CHANGE:
port = (eqe->data.port.port >> 4) & 0xf;
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
case MLX5_PORT_CHANGE_SUBTYPE_LID:
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
dev->event(dev, port_subtype_event(eqe->sub_type), &port);
break;
default:
mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
port, eqe->sub_type);
}
break;
case MLX5_EVENT_TYPE_CQ_ERROR:
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
mlx5_cq_event(dev, cqn, eqe->type);
break;
case MLX5_EVENT_TYPE_PAGE_REQUEST:
{
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
}
break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
break;
}
++eq->cons_index;
eqes_found = 1;
++set_ci;
/* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MLX5_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
eq_update_ci(eq, 0);
set_ci = 0;
}
}
eq_update_ci(eq, 1);
return eqes_found;
}
static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
struct mlx5_core_dev *dev = eq->dev;
mlx5_eq_int(dev, eq);
/* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
int i;
for (i = 0; i < eq->nent; i++) {
eqe = get_eqe(eq, i);
eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
}
}
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
&eq->buf);
if (err)
return err;
init_eq_buf(eq);
inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
memset(&out, 0, sizeof(out));
mlx5_fill_page_array(&eq->buf, in->pas);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
in->ctx.intr = vecidx;
in->ctx.log_page_size = PAGE_SHIFT - 12;
in->events_mask = cpu_to_be64(mask);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err)
goto err_in;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
goto err_in;
}
eq->eqn = out.eq_number;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
name, eq);
if (err)
goto err_eq;
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
err = mlx5_debug_eq_add(dev, eq);
if (err)
goto err_irq;
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
mlx5_vfree(in);
return 0;
err_irq:
free_irq(table->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
err_in:
mlx5_vfree(in);
err_buf:
mlx5_buf_free(dev, &eq->buf);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
mlx5_debug_eq_remove(dev, eq);
free_irq(table->msix_arr[eq->irqn].vector, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
mlx5_buf_free(dev, &eq->buf);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
int err;
spin_lock_init(&dev->priv.eq_table.lock);
err = mlx5_eq_debugfs_init(dev);
return err;
}
void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
{
mlx5_eq_debugfs_cleanup(dev);
}
int mlx5_start_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
return err;
}
mlx5_cmd_use_events(dev);
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK,
"mlx5_async_eq", &dev->priv.uuari.uars[0]);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err1;
}
err = mlx5_create_map_eq(dev, &table->pages_eq,
MLX5_EQ_VEC_PAGES,
dev->caps.max_vf + 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0]);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err2;
}
return err;
err2:
mlx5_destroy_unmap_eq(dev, &table->async_eq);
err1:
mlx5_cmd_use_polling(dev);
mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
return err;
}
int mlx5_stop_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
if (err)
return err;
mlx5_destroy_unmap_eq(dev, &table->async_eq);
mlx5_cmd_use_polling(dev);
err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
if (err)
mlx5_cmd_use_events(dev);
return err;
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_query_eq_mbox_out *out, int outlen)
{
struct mlx5_query_eq_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
in.eqn = eq->eqn;
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
if (out->hdr.status)
err = mlx5_cmd_status_to_err(&out->hdr);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);

View File

@ -0,0 +1,185 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include <linux/module.h>
#include "mlx5_core.h"
int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_query_adapter_mbox_out *out;
struct mlx5_cmd_query_adapter_mbox_in in;
int err;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return -ENOMEM;
memset(&in, 0, sizeof(in));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
goto out_out;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out_out;
}
memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
out_out:
kfree(out);
return err;
}
int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
struct mlx5_caps *caps)
{
struct mlx5_cmd_query_hca_cap_mbox_out *out;
struct mlx5_cmd_query_hca_cap_mbox_in in;
struct mlx5_query_special_ctxs_mbox_out ctx_out;
struct mlx5_query_special_ctxs_mbox_in ctx_in;
int err;
u16 t16;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return -ENOMEM;
memset(&in, 0, sizeof(in));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
in.hdr.opmod = cpu_to_be16(0x1);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
goto out_out;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out_out;
}
caps->log_max_eq = out->hca_cap.log_max_eq & 0xf;
caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
caps->flags = be64_to_cpu(out->hca_cap.flags);
caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
caps->num_ports = out->hca_cap.num_ports & 0xf;
caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
if (caps->num_ports > MLX5_MAX_PORTS) {
mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n",
caps->num_ports, MLX5_MAX_PORTS);
err = -EINVAL;
goto out_out;
}
caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
caps->log_max_mcg = out->hca_cap.log_max_mcg;
caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
if (t16 & 0x8000) {
caps->bf_reg_size = 1 << (t16 & 0x1f);
caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
} else {
caps->bf_reg_size = 0;
caps->bf_regs_per_page = 0;
}
caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1);
memset(&ctx_in, 0, sizeof(ctx_in));
memset(&ctx_out, 0, sizeof(ctx_out));
ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in),
&ctx_out, sizeof(ctx_out));
if (err)
goto out_out;
if (ctx_out.hdr.status)
err = mlx5_cmd_status_to_err(&ctx_out.hdr);
caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey);
out_out:
kfree(out);
return err;
}
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_init_hca_mbox_in in;
struct mlx5_cmd_init_hca_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_teardown_hca_mbox_in in;
struct mlx5_cmd_teardown_hca_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
}

View File

@ -0,0 +1,217 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
enum {
MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
MAX_MISSES = 3,
};
enum {
MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
};
static DEFINE_SPINLOCK(health_lock);
static LIST_HEAD(health_list);
static struct work_struct health_work;
static health_handler_t reg_handler;
int mlx5_register_health_report_handler(health_handler_t handler)
{
spin_lock_irq(&health_lock);
if (reg_handler) {
spin_unlock_irq(&health_lock);
return -EEXIST;
}
reg_handler = handler;
spin_unlock_irq(&health_lock);
return 0;
}
EXPORT_SYMBOL(mlx5_register_health_report_handler);
void mlx5_unregister_health_report_handler(void)
{
spin_lock_irq(&health_lock);
reg_handler = NULL;
spin_unlock_irq(&health_lock);
}
EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
static void health_care(struct work_struct *work)
{
struct mlx5_core_health *health, *n;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
LIST_HEAD(tlist);
spin_lock_irq(&health_lock);
list_splice_init(&health_list, &tlist);
spin_unlock_irq(&health_lock);
list_for_each_entry_safe(health, n, &tlist, list) {
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
spin_lock_irq(&health_lock);
if (reg_handler)
reg_handler(dev->pdev, health->health,
sizeof(health->health));
list_del_init(&health->list);
spin_unlock_irq(&health_lock);
}
}
static const char *hsynd_str(u8 synd)
{
switch (synd) {
case MLX5_HEALTH_SYNDR_FW_ERR:
return "firmware internal error";
case MLX5_HEALTH_SYNDR_IRISC_ERR:
return "irisc not responding";
case MLX5_HEALTH_SYNDR_CRC_ERR:
return "firmware CRC error";
case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
return "ICM fetch PCI error";
case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
return "HW fatal error\n";
case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
return "async EQ buffer overrun";
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
default:
return "unrecognized error";
}
}
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
int i;
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
pr_info("assert_var[%d] 0x%08x\n", i, be32_to_cpu(h->assert_var[i]));
pr_info("assert_exit_ptr 0x%08x\n", be32_to_cpu(h->assert_exit_ptr));
pr_info("assert_callra 0x%08x\n", be32_to_cpu(h->assert_callra));
pr_info("fw_ver 0x%08x\n", be32_to_cpu(h->fw_ver));
pr_info("hw_id 0x%08x\n", be32_to_cpu(h->hw_id));
pr_info("irisc_index %d\n", h->irisc_index);
pr_info("synd 0x%x: %s\n", h->synd, hsynd_str(h->synd));
pr_info("ext_sync 0x%04x\n", be16_to_cpu(h->ext_sync));
}
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
struct mlx5_core_health *health = &dev->priv.health;
unsigned long next;
u32 count;
count = ioread32be(health->health_counter);
if (count == health->prev)
++health->miss_counter;
else
health->miss_counter = 0;
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised\n");
print_health_info(dev);
spin_lock_irq(&health_lock);
list_add_tail(&health->list, &health_list);
spin_unlock_irq(&health_lock);
queue_work(mlx5_core_wq, &health_work);
} else {
get_random_bytes(&next, sizeof(next));
next %= HZ;
next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
mod_timer(&health->timer, next);
}
}
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
INIT_LIST_HEAD(&health->list);
init_timer(&health->timer);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
health->timer.data = (unsigned long)dev;
health->timer.function = poll_health;
health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
add_timer(&health->timer);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
del_timer_sync(&health->timer);
spin_lock_irq(&health_lock);
if (!list_empty(&health->list))
list_del_init(&health->list);
spin_unlock_irq(&health_lock);
}
void mlx5_health_cleanup(void)
{
}
void __init mlx5_health_init(void)
{
INIT_WORK(&health_work, health_care);
}

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
u16 opmod, int port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;
struct mlx5_mad_ifc_mbox_out *out = NULL;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out) {
err = -ENOMEM;
goto out;
}
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
in->hdr.opmod = cpu_to_be16(opmod);
in->port = port;
memcpy(in->data, inb, sizeof(in->data));
err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
if (err)
goto out;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out;
}
memcpy(outb, out->data, sizeof(out->data));
out:
kfree(out);
kfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);

View File

@ -0,0 +1,475 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <asm-generic/kmap_types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "1.0"
#define DRIVER_RELDATE "June 2013"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
struct workqueue_struct *mlx5_core_wq;
static int set_dma_caps(struct pci_dev *pdev)
{
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
return err;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev,
"Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
"Can't set consistent PCI DMA mask, aborting.\n");
return err;
}
}
dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
return err;
}
static int request_bar(struct pci_dev *pdev)
{
int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
return -ENODEV;
}
err = pci_request_regions(pdev, DRIVER_NAME);
if (err)
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
return err;
}
static void release_bar(struct pci_dev *pdev)
{
pci_release_regions(pdev);
}
static int mlx5_enable_msix(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int num_eqs = 1 << dev->caps.log_max_eq;
int nvec;
int err;
int i;
nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
return -ENOMEM;
table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
if (!table->msix_arr)
return -ENOMEM;
for (i = 0; i < nvec; i++)
table->msix_arr[i].entry = i;
retry:
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
if (err <= 0) {
return err;
} else if (err > 2) {
nvec = err;
goto retry;
}
mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
return 0;
}
static void mlx5_disable_msix(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
pci_disable_msix(dev->pdev);
kfree(table->msix_arr);
}
struct mlx5_reg_host_endianess {
u8 he;
u8 rsvd[15];
};
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL;
struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
struct mlx5_cmd_set_hca_cap_mbox_out set_out;
struct mlx5_profile *prof = dev->profile;
u64 flags;
int csum = 1;
int err;
memset(&query_ctx, 0, sizeof(query_ctx));
query_out = kzalloc(sizeof(*query_out), GFP_KERNEL);
if (!query_out)
return -ENOMEM;
set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
if (!set_ctx) {
err = -ENOMEM;
goto query_ex;
}
query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
query_ctx.hdr.opmod = cpu_to_be16(0x1);
err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx),
query_out, sizeof(*query_out));
if (err)
goto query_ex;
err = mlx5_cmd_status_to_err(&query_out->hdr);
if (err) {
mlx5_core_warn(dev, "query hca cap failed, %d\n", err);
goto query_ex;
}
memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
sizeof(set_ctx->hca_cap));
if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
csum = !!prof->cmdif_csum;
flags = be64_to_cpu(set_ctx->hca_cap.flags);
if (csum)
flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
else
flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
set_ctx->hca_cap.flags = cpu_to_be64(flags);
}
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
memset(&set_out, 0, sizeof(set_out));
set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
&set_out, sizeof(set_out));
if (err) {
mlx5_core_warn(dev, "set hca cap failed, %d\n", err);
goto query_ex;
}
err = mlx5_cmd_status_to_err(&set_out.hdr);
if (err)
goto query_ex;
if (!csum)
dev->cmd.checksum_disabled = 1;
query_ex:
kfree(query_out);
kfree(set_ctx);
return err;
}
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianess he_in;
struct mlx5_reg_host_endianess he_out;
int err;
memset(&he_in, 0, sizeof(he_in));
he_in.he = MLX5_SET_HOST_ENDIANNESS;
err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
&he_out, sizeof(he_out),
MLX5_REG_HOST_ENDIANNESS, 0, 1);
return err;
}
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
int err;
dev->pdev = pdev;
pci_set_drvdata(dev->pdev, dev);
strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
mutex_init(&priv->pgdir_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
return -ENOMEM;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
goto err_dbg;
}
err = request_bar(pdev);
if (err) {
dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
goto err_disable;
}
pci_set_master(pdev);
err = set_dma_caps(pdev);
if (err) {
dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
goto err_clr_master;
}
dev->iseg_base = pci_resource_start(dev->pdev, 0);
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
err = -ENOMEM;
dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
goto err_unmap;
}
mlx5_pagealloc_init(dev);
err = set_hca_ctrl(dev);
if (err) {
dev_err(&pdev->dev, "set_hca_ctrl failed\n");
goto err_pagealloc_cleanup;
}
err = handle_hca_cap(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n");
goto err_pagealloc_cleanup;
}
err = mlx5_satisfy_startup_pages(dev);
if (err) {
dev_err(&pdev->dev, "failed to allocate startup pages\n");
goto err_pagealloc_cleanup;
}
err = mlx5_pagealloc_start(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
goto err_reclaim_pages;
}
err = mlx5_cmd_init_hca(dev);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
goto err_pagealloc_stop;
}
mlx5_start_health_poll(dev);
err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
goto err_stop_poll;
}
err = mlx5_cmd_query_adapter(dev);
if (err) {
dev_err(&pdev->dev, "query adapter failed\n");
goto err_stop_poll;
}
err = mlx5_enable_msix(dev);
if (err) {
dev_err(&pdev->dev, "enable msix failed\n");
goto err_stop_poll;
}
err = mlx5_eq_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize eq\n");
goto disable_msix;
}
err = mlx5_alloc_uuars(dev, &priv->uuari);
if (err) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
goto err_eq_cleanup;
}
err = mlx5_start_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
goto err_free_uar;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
mlx5_init_qp_table(dev);
mlx5_init_srq_table(dev);
return 0;
err_free_uar:
mlx5_free_uuars(dev, &priv->uuari);
err_eq_cleanup:
mlx5_eq_cleanup(dev);
disable_msix:
mlx5_disable_msix(dev);
err_stop_poll:
mlx5_stop_health_poll(dev);
mlx5_cmd_teardown_hca(dev);
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
err_reclaim_pages:
mlx5_reclaim_startup_pages(dev);
err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
err_unmap:
iounmap(dev->iseg);
err_clr_master:
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
err_disable:
pci_disable_device(dev->pdev);
err_dbg:
debugfs_remove(priv->dbg_root);
return err;
}
EXPORT_SYMBOL(mlx5_dev_init);
void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
mlx5_cmd_teardown_hca(dev);
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
iounmap(dev->iseg);
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
pci_disable_device(dev->pdev);
debugfs_remove(priv->dbg_root);
}
EXPORT_SYMBOL(mlx5_dev_cleanup);
static int __init init(void)
{
int err;
mlx5_register_debugfs();
mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
if (!mlx5_core_wq) {
err = -ENOMEM;
goto err_debug;
}
mlx5_health_init();
return 0;
mlx5_health_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
}
static void __exit cleanup(void)
{
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
mlx5_unregister_debugfs();
}
module_init(init);
module_exit(cleanup);

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
struct mlx5_attach_mcg_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
__be32 rsvd;
u8 gid[16];
};
struct mlx5_attach_mcg_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvf[8];
};
struct mlx5_detach_mcg_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
__be32 rsvd;
u8 gid[16];
};
struct mlx5_detach_mcg_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvf[8];
};
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
struct mlx5_attach_mcg_mbox_in in;
struct mlx5_attach_mcg_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
struct mlx5_detach_mcg_mbox_in in;
struct mlx5_detach_mcg_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_CORE_H__
#define __MLX5_CORE_H__
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
current->pid, ##arg)
#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
__func__, __LINE__, current->pid, ##arg); \
} while (0)
#define mlx5_core_err(dev, format, arg...) \
pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
current->pid, ##arg)
#define mlx5_core_warn(dev, format, arg...) \
pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
current->pid, ##arg)
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */
};
int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
struct mlx5_caps *caps);
int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
#endif /* __MLX5_CORE_H__ */

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen)
{
struct mlx5_create_mkey_mbox_out out;
int err;
u8 key;
memset(&out, 0, sizeof(out));
spin_lock(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
spin_unlock(&dev->priv.mkey_lock);
in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
return err;
}
if (out.hdr.status) {
mlx5_core_dbg(dev, "status %d\n", out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key;
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
{
struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen)
{
struct mlx5_destroy_mkey_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey)
{
struct mlx5_query_special_ctxs_mbox_in in;
struct mlx5_query_special_ctxs_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
*mkey = be32_to_cpu(out.dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);

View File

@ -0,0 +1,435 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <asm-generic/kmap_types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
MLX5_PAGES_GIVE = 1,
MLX5_PAGES_TAKE = 2
};
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u32 func_id;
s16 npages;
struct work_struct work;
};
struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
u16 func_id;
};
struct mlx5_query_pages_inbox {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr;
u8 reserved[2];
__be16 func_id;
__be16 init_pages;
__be16 num_pages;
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
__be16 rsvd0;
__be16 func_id;
__be16 rsvd1;
__be16 num_entries;
u8 rsvd2[16];
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[2];
__be16 num_entries;
u8 rsvd1[20];
__be64 pas[0];
};
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
struct rb_root *root = &dev->priv.page_root;
struct rb_node **new = &root->rb_node;
struct rb_node *parent = NULL;
struct fw_page *nfp;
struct fw_page *tfp;
while (*new) {
parent = *new;
tfp = rb_entry(parent, struct fw_page, rb_node);
if (tfp->addr < addr)
new = &parent->rb_left;
else if (tfp->addr > addr)
new = &parent->rb_right;
else
return -EEXIST;
}
nfp = kmalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp)
return -ENOMEM;
nfp->addr = addr;
nfp->page = page;
nfp->func_id = func_id;
rb_link_node(&nfp->rb_node, parent, new);
rb_insert_color(&nfp->rb_node, root);
return 0;
}
static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
{
struct rb_root *root = &dev->priv.page_root;
struct rb_node *tmp = root->rb_node;
struct page *result = NULL;
struct fw_page *tfp;
while (tmp) {
tfp = rb_entry(tmp, struct fw_page, rb_node);
if (tfp->addr < addr) {
tmp = tmp->rb_left;
} else if (tfp->addr > addr) {
tmp = tmp->rb_right;
} else {
rb_erase(&tfp->rb_node, root);
result = tfp->page;
kfree(tfp);
break;
}
}
return result;
}
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s16 *pages, s16 *init_pages)
{
struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
if (pages)
*pages = be16_to_cpu(out.num_pages);
if (init_pages)
*init_pages = be16_to_cpu(out.init_pages);
*func_id = be16_to_cpu(out.func_id);
return err;
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
struct page *page;
int inlen;
u64 addr;
int err;
int i;
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
return -ENOMEM;
}
memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
page = alloc_page(GFP_HIGHUSER);
if (!page) {
err = -ENOMEM;
mlx5_core_warn(dev, "failed to allocate page\n");
goto out_alloc;
}
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
__free_page(page);
err = -ENOMEM;
goto out_alloc;
}
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(page);
err = -ENOMEM;
goto out_alloc;
}
in->pas[i] = cpu_to_be64(addr);
}
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
in->num_entries = cpu_to_be16(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
goto out_alloc;
}
dev->priv.fw_pages += npages;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
goto out_alloc;
}
}
mlx5_core_dbg(dev, "err %d\n", err);
goto out_free;
out_alloc:
if (notify_fail) {
memset(in, 0, inlen);
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)))
mlx5_core_warn(dev, "\n");
}
for (i--; i >= 0; i--) {
addr = be64_to_cpu(in->pas[i]);
page = remove_page(dev, addr);
if (!page) {
mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
addr);
continue;
}
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(page);
}
out_free:
mlx5_vfree(in);
return err;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
struct mlx5_manage_pages_inbox in;
struct mlx5_manage_pages_outbox *out;
struct page *page;
int num_claimed;
int outlen;
u64 addr;
int err;
int i;
memset(&in, 0, sizeof(in));
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be16(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed recliaming pages\n");
goto out_free;
}
dev->priv.fw_pages -= npages;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out_free;
}
num_claimed = be16_to_cpu(out->num_entries);
if (nclaimed)
*nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
page = remove_page(dev, addr);
if (!page) {
mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
} else {
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(page);
}
}
out_free:
mlx5_vfree(out);
return err;
}
static void pages_work_handler(struct work_struct *work)
{
struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
struct mlx5_core_dev *dev = req->dev;
int err = 0;
if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1);
if (err)
mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
"reclaim" : "give", err);
kfree(req);
}
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s16 npages)
{
struct mlx5_pages_req *req;
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
return;
}
req->dev = dev;
req->func_id = func_id;
req->npages = npages;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
}
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev)
{
s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id);
int err;
err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages);
if (err)
return err;
mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
return give_pages(dev, func_id, init_pages, 0);
}
static int optimal_reclaimed_pages(void)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_layout *lay;
int ret;
ret = (sizeof(lay->in) + sizeof(block->data) -
sizeof(struct mlx5_manage_pages_outbox)) / 8;
return ret;
}
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
unsigned long end = jiffies + msecs_to_jiffies(5000);
struct fw_page *fwp;
struct rb_node *p;
int err;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
return err;
}
}
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
break;
}
} while (p);
return 0;
}
void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
dev->priv.page_root = RB_ROOT;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
/* nothing */
}
int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
{
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
if (!dev->priv.pg_wq)
return -ENOMEM;
return 0;
}
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
{
destroy_workqueue(dev->priv.pg_wq);
}

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
struct mlx5_alloc_pd_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_alloc_pd_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 pdn;
u8 rsvd[4];
};
struct mlx5_dealloc_pd_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 pdn;
u8 rsvd[4];
};
struct mlx5_dealloc_pd_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
struct mlx5_alloc_pd_mbox_in in;
struct mlx5_alloc_pd_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
*pdn = be32_to_cpu(out.pdn) & 0xffffff;
return err;
}
EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
struct mlx5_dealloc_pd_mbox_in in;
struct mlx5_dealloc_pd_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD);
in.pdn = cpu_to_be32(pdn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);

View File

@ -0,0 +1,104 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write)
{
struct mlx5_access_reg_mbox_in *in = NULL;
struct mlx5_access_reg_mbox_out *out = NULL;
int err = -ENOMEM;
in = mlx5_vzalloc(sizeof(*in) + size_in);
if (!in)
return -ENOMEM;
out = mlx5_vzalloc(sizeof(*out) + size_out);
if (!out)
goto ex1;
memcpy(in->data, data_in, size_in);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
in->hdr.opmod = cpu_to_be16(!write);
in->arg = cpu_to_be32(arg);
in->register_id = cpu_to_be16(reg_num);
err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
sizeof(out) + size_out);
if (err)
goto ex2;
if (out->hdr.status)
err = mlx5_cmd_status_to_err(&out->hdr);
if (!err)
memcpy(data_out, out->data, size_out);
ex2:
mlx5_vfree(out);
ex1:
mlx5_vfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
u8 rsvd1[2];
__be32 caps_127_96;
__be32 caps_95_64;
__be32 caps_63_32;
__be32 caps_31_0;
};
int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps)
{
struct mlx5_reg_pcap in;
struct mlx5_reg_pcap out;
int err;
memset(&in, 0, sizeof(in));
in.caps_127_96 = cpu_to_be32(caps);
in.port_num = port_num;
err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
sizeof(out), MLX5_REG_PCAP, 0, 1);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);

View File

@ -0,0 +1,301 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/mlx5/cmd.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_core_qp *qp;
spin_lock(&table->lock);
qp = radix_tree_lookup(&table->tree, qpn);
if (qp)
atomic_inc(&qp->refcount);
spin_unlock(&table->lock);
if (!qp) {
mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn);
return;
}
qp->event(qp, event_type);
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
int inlen)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_create_qp_mbox_out out;
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
memset(&dout, 0, sizeof(dout));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d", err);
return err;
}
if (out.hdr.status) {
pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps));
return mlx5_cmd_status_to_err(&out.hdr);
}
qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d", err);
goto err_cmd;
}
err = mlx5_debug_qp_add(dev, qp);
if (err)
mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
qp->qpn);
qp->pid = current->pid;
atomic_set(&qp->refcount, 1);
atomic_inc(&dev->num_qps);
init_completion(&qp->free);
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
din.qpn = cpu_to_be32(qp->qpn);
mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
struct mlx5_destroy_qp_mbox_in in;
struct mlx5_destroy_qp_mbox_out out;
struct mlx5_qp_table *table = &dev->priv.qp_table;
unsigned long flags;
int err;
mlx5_debug_qp_remove(dev, qp);
spin_lock_irqsave(&table->lock, flags);
radix_tree_delete(&table->tree, qp->qpn);
spin_unlock_irqrestore(&table->lock, flags);
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
in.qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
atomic_dec(&dev->num_qps);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
enum mlx5_qp_state new_state,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
struct mlx5_core_qp *qp)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
[MLX5_QP_STATE_RST] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
},
[MLX5_QP_STATE_INIT] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
[MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
},
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
[MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP,
},
[MLX5_QP_STATE_SQD] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP,
[MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP,
},
[MLX5_QP_STATE_SQER] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
},
[MLX5_QP_STATE_ERR] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
}
};
struct mlx5_modify_qp_mbox_out out;
int err = 0;
u16 op;
if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
!optab[cur_state][new_state])
return -EINVAL;
memset(&out, 0, sizeof(out));
op = optab[cur_state][new_state];
in->hdr.opcode = cpu_to_be16(op);
in->qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
if (err)
return err;
return mlx5_cmd_status_to_err(&out.hdr);
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
void mlx5_init_qp_table(struct mlx5_core_dev *dev)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev);
}
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
{
mlx5_qp_debugfs_cleanup(dev);
}
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
struct mlx5_query_qp_mbox_out *out, int outlen)
{
struct mlx5_query_qp_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
in.qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
struct mlx5_alloc_xrcd_mbox_in in;
struct mlx5_alloc_xrcd_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
*xrcdn = be32_to_cpu(out.xrcdn);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
{
struct mlx5_dealloc_xrcd_mbox_in in;
struct mlx5_dealloc_xrcd_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
in.xrcdn = cpu_to_be32(xrcdn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);

View File

@ -0,0 +1,223 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *srq;
spin_lock(&table->lock);
srq = radix_tree_lookup(&table->tree, srqn);
if (srq)
atomic_inc(&srq->refcount);
spin_unlock(&table->lock);
if (!srq) {
mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
return;
}
srq->event(srq, event_type);
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *srq;
spin_lock(&table->lock);
srq = radix_tree_lookup(&table->tree, srqn);
if (srq)
atomic_inc(&srq->refcount);
spin_unlock(&table->lock);
return srq;
}
EXPORT_SYMBOL(mlx5_core_get_srq);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_destroy_srq_mbox_in din;
struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, srq->srqn, srq);
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
goto err_cmd;
}
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.srqn = cpu_to_be32(srq->srqn);
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
struct mlx5_destroy_srq_mbox_in in;
struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
spin_lock_irq(&table->lock);
tmp = radix_tree_delete(&table->tree, srq->srqn);
spin_unlock_irq(&table->lock);
if (!tmp) {
mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
return -EINVAL;
}
if (tmp != srq) {
mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
return -EINVAL;
}
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
return 0;
}
EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
struct mlx5_query_srq_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, sizeof(*out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
struct mlx5_arm_srq_mbox_in in;
struct mlx5_arm_srq_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
in.hdr.opmod = cpu_to_be16(!!is_srq);
in.srqn = cpu_to_be32(srq->srqn);
in.lwm = cpu_to_be16(lwm);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
}
EXPORT_SYMBOL(mlx5_core_arm_srq);
void mlx5_init_srq_table(struct mlx5_core_dev *dev)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
{
/* nothing */
}

View File

@ -0,0 +1,223 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
enum {
NUM_DRIVER_UARS = 4,
NUM_LOW_LAT_UUARS = 4,
};
struct mlx5_alloc_uar_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_alloc_uar_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 uarn;
u8 rsvd[4];
};
struct mlx5_free_uar_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 uarn;
u8 rsvd[4];
};
struct mlx5_free_uar_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
struct mlx5_alloc_uar_mbox_in in;
struct mlx5_alloc_uar_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
goto ex;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
goto ex;
}
*uarn = be32_to_cpu(out.uarn) & 0xffffff;
ex:
return err;
}
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
struct mlx5_free_uar_mbox_in in;
struct mlx5_free_uar_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR);
in.uarn = cpu_to_be32(uarn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
goto ex;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
ex:
return err;
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
static int need_uuar_lock(int uuarn)
{
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
return 0;
return 1;
}
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
{
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
struct mlx5_bf *bf;
phys_addr_t addr;
int err;
int i;
uuari->num_uars = NUM_DRIVER_UARS;
uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
mutex_init(&uuari->lock);
uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
if (!uuari->uars)
return -ENOMEM;
uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
if (!uuari->bfs) {
err = -ENOMEM;
goto out_uars;
}
uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
GFP_KERNEL);
if (!uuari->bitmap) {
err = -ENOMEM;
goto out_bfs;
}
uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
if (!uuari->count) {
err = -ENOMEM;
goto out_bitmap;
}
for (i = 0; i < uuari->num_uars; i++) {
err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
if (err)
goto out_count;
addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
if (!uuari->uars[i].map) {
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
goto out_count;
}
mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
uuari->uars[i].index, uuari->uars[i].map);
}
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
bf->buf_size = dev->caps.bf_reg_size / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size +
MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
bf->uuarn = i;
}
return 0;
out_count:
for (i--; i >= 0; i--) {
iounmap(uuari->uars[i].map);
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
}
kfree(uuari->count);
out_bitmap:
kfree(uuari->bitmap);
out_bfs:
kfree(uuari->bfs);
out_uars:
kfree(uuari->uars);
return err;
}
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
{
int i = uuari->num_uars;
for (i--; i >= 0; i--) {
iounmap(uuari->uars[i].map);
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
}
kfree(uuari->count);
kfree(uuari->bitmap);
kfree(uuari->bfs);
kfree(uuari->uars);
return 0;
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_CMD_H
#define MLX5_CMD_H
#include <linux/types.h>
struct manage_pages_layout {
u64 ptr;
u32 reserved;
u16 num_entries;
u16 func_id;
};
struct mlx5_cmd_alloc_uar_imm_out {
u32 rsvd[3];
u32 uarn;
};
#endif /* MLX5_CMD_H */

View File

@ -0,0 +1,165 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_CORE_CQ_H
#define MLX5_CORE_CQ_H
#include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h>
struct mlx5_core_cq {
u32 cqn;
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
atomic_t refcount;
struct completion free;
unsigned vector;
int irqn;
void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
struct mlx5_uar *uar;
u32 cons_index;
unsigned arm_sn;
struct mlx5_rsc_debug *dbg;
int pid;
};
enum {
MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
};
enum {
MLX5_CQE_OWNER_MASK = 1,
MLX5_CQE_REQ = 0,
MLX5_CQE_RESP_WR_IMM = 1,
MLX5_CQE_RESP_SEND = 2,
MLX5_CQE_RESP_SEND_IMM = 3,
MLX5_CQE_RESP_SEND_INV = 4,
MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */
MLX5_CQE_REQ_ERR = 13,
MLX5_CQE_RESP_ERR = 14,
};
enum {
MLX5_CQ_MODIFY_RESEIZE = 0,
MLX5_CQ_MODIFY_MODER = 1,
MLX5_CQ_MODIFY_MAPPING = 2,
};
struct mlx5_cq_modify_params {
int type;
union {
struct {
u32 page_offset;
u8 log_cq_size;
} resize;
struct {
} moder;
struct {
} mapping;
} params;
};
enum {
CQE_SIZE_64 = 0,
CQE_SIZE_128 = 1,
};
static inline int cqe_sz_to_mlx_sz(u8 size)
{
return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
{
*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
}
enum {
MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
MLX5_CQ_DB_REQ_NOT = 0 << 24
};
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page,
spinlock_t *doorbell_lock)
{
__be32 doorbell[2];
u32 sn;
u32 ci;
sn = cq->arm_sn & 3;
ci = cq->cons_index & 0xffffff;
*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
/* Make sure that the doorbell record in host memory is
* written before ringing the doorbell via PCI MMIO.
*/
wmb();
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn);
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
}
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int type, struct mlx5_cq_modify_params *params);
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
#endif /* MLX5_CORE_CQ_H */

View File

@ -0,0 +1,893 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_DEVICE_H
#define MLX5_DEVICE_H
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
#elif defined(__BIG_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0x80
#else
#error Host endianness not defined
#endif
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
MLX5_PCI_CMD_XPORT = 7,
};
enum {
MLX5_EXTENDED_UD_AV = 0x80000000,
};
enum {
MLX5_CQ_STATE_ARMED = 9,
MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
MLX5_CQ_STATE_FIRED = 0xa,
};
enum {
MLX5_STAT_RATE_OFFSET = 5,
};
enum {
MLX5_INLINE_SEG = 0x80000000,
};
enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
MLX5_PERM_REMOTE_READ = 1 << 4,
MLX5_PERM_REMOTE_WRITE = 1 << 5,
MLX5_PERM_ATOMIC = 1 << 6,
MLX5_PERM_UMR_EN = 1 << 7,
};
enum {
MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
};
enum {
MLX5_ACCESS_MODE_PA = 0,
MLX5_ACCESS_MODE_MTT = 1,
MLX5_ACCESS_MODE_KLM = 2
};
enum {
MLX5_MKEY_REMOTE_INVAL = 1 << 24,
MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
MLX5_MKEY_BSF_EN = 1 << 30,
MLX5_MKEY_LEN64 = 1 << 31,
};
enum {
MLX5_EN_RD = (u64)1,
MLX5_EN_WR = (u64)2
};
enum {
MLX5_BF_REGS_PER_PAGE = 4,
MLX5_MAX_UAR_PAGES = 1 << 8,
MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE,
};
enum {
MLX5_MKEY_MASK_LEN = 1ull << 0,
MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
MLX5_MKEY_MASK_PD = 1ull << 7,
MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
MLX5_MKEY_MASK_KEY = 1ull << 13,
MLX5_MKEY_MASK_QPN = 1ull << 14,
MLX5_MKEY_MASK_LR = 1ull << 17,
MLX5_MKEY_MASK_LW = 1ull << 18,
MLX5_MKEY_MASK_RR = 1ull << 19,
MLX5_MKEY_MASK_RW = 1ull << 20,
MLX5_MKEY_MASK_A = 1ull << 21,
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
MLX5_MKEY_MASK_FREE = 1ull << 29,
};
enum mlx5_event {
MLX5_EVENT_TYPE_COMP = 0x0,
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
MLX5_EVENT_TYPE_COMM_EST = 0x02,
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
MLX5_EVENT_TYPE_CMD = 0x0a,
MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
};
enum {
MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
};
enum {
MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
};
enum {
MLX5_OPCODE_NOP = 0x00,
MLX5_OPCODE_SEND_INVAL = 0x01,
MLX5_OPCODE_RDMA_WRITE = 0x08,
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
MLX5_OPCODE_SEND = 0x0a,
MLX5_OPCODE_SEND_IMM = 0x0b,
MLX5_OPCODE_RDMA_READ = 0x10,
MLX5_OPCODE_ATOMIC_CS = 0x11,
MLX5_OPCODE_ATOMIC_FA = 0x12,
MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
MLX5_OPCODE_BIND_MW = 0x18,
MLX5_OPCODE_CONFIG_CMD = 0x1f,
MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
MLX5_RECV_OPCODE_SEND = 0x01,
MLX5_RECV_OPCODE_SEND_IMM = 0x02,
MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
MLX5_CQE_OPCODE_ERROR = 0x1e,
MLX5_CQE_OPCODE_RESIZE = 0x16,
MLX5_OPCODE_SET_PSV = 0x20,
MLX5_OPCODE_GET_PSV = 0x21,
MLX5_OPCODE_CHECK_PSV = 0x22,
MLX5_OPCODE_RGET_PSV = 0x26,
MLX5_OPCODE_RCHECK_PSV = 0x27,
MLX5_OPCODE_UMR = 0x25,
};
enum {
MLX5_SET_PORT_RESET_QKEY = 0,
MLX5_SET_PORT_GUID0 = 16,
MLX5_SET_PORT_NODE_GUID = 17,
MLX5_SET_PORT_SYS_GUID = 18,
MLX5_SET_PORT_GID_TABLE = 19,
MLX5_SET_PORT_PKEY_TABLE = 20,
};
enum {
MLX5_MAX_PAGE_SHIFT = 31
};
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
__be16 opmod;
};
struct mlx5_outbox_hdr {
u8 status;
u8 rsvd[3];
__be32 syndrome;
};
struct mlx5_cmd_query_adapter_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cmd_query_adapter_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[24];
u8 intapin;
u8 rsvd1[13];
__be16 vsd_vendor_id;
u8 vsd[208];
u8 vsd_psid[16];
};
struct mlx5_hca_cap {
u8 rsvd1[16];
u8 log_max_srq_sz;
u8 log_max_qp_sz;
u8 rsvd2;
u8 log_max_qp;
u8 log_max_strq_sz;
u8 log_max_srqs;
u8 rsvd4[2];
u8 rsvd5;
u8 log_max_cq_sz;
u8 rsvd6;
u8 log_max_cq;
u8 log_max_eq_sz;
u8 log_max_mkey;
u8 rsvd7;
u8 log_max_eq;
u8 max_indirection;
u8 log_max_mrw_sz;
u8 log_max_bsf_list_sz;
u8 log_max_klm_list_sz;
u8 rsvd_8_0;
u8 log_max_ra_req_dc;
u8 rsvd_8_1;
u8 log_max_ra_res_dc;
u8 rsvd9;
u8 log_max_ra_req_qp;
u8 rsvd10;
u8 log_max_ra_res_qp;
u8 rsvd11[4];
__be16 max_qp_count;
__be16 rsvd12;
u8 rsvd13;
u8 local_ca_ack_delay;
u8 rsvd14;
u8 num_ports;
u8 log_max_msg;
u8 rsvd15[3];
__be16 stat_rate_support;
u8 rsvd16[2];
__be64 flags;
u8 rsvd17;
u8 uar_sz;
u8 rsvd18;
u8 log_pg_sz;
__be16 bf_log_bf_reg_size;
u8 rsvd19[4];
__be16 max_desc_sz_sq;
u8 rsvd20[2];
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
u8 rsvd22[4];
__be16 max_qp_mcg;
u8 rsvd23;
u8 log_max_mcg;
u8 rsvd24;
u8 log_max_pd;
u8 rsvd25;
u8 log_max_xrcd;
u8 rsvd26[40];
__be32 uar_page_sz;
u8 rsvd27[28];
u8 log_msx_atomic_size_qp;
u8 rsvd28[2];
u8 log_msx_atomic_size_dc;
u8 rsvd29[76];
};
struct mlx5_cmd_query_hca_cap_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cmd_query_hca_cap_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_hca_cap hca_cap;
};
struct mlx5_cmd_set_hca_cap_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
struct mlx5_hca_cap hca_cap;
};
struct mlx5_cmd_set_hca_cap_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_cmd_init_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
__be16 profile;
u8 rsvd1[4];
};
struct mlx5_cmd_init_hca_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cmd_teardown_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
__be16 profile;
u8 rsvd1[4];
};
struct mlx5_cmd_teardown_hca_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
__be32 inlen;
__be64 in_ptr;
__be32 in[4];
__be32 out[4];
__be64 out_ptr;
__be32 outlen;
u8 token;
u8 sig;
u8 rsvd1;
u8 status_own;
};
struct health_buffer {
__be32 assert_var[5];
__be32 rsvd0[3];
__be32 assert_exit_ptr;
__be32 assert_callra;
__be32 rsvd1[2];
__be32 fw_ver;
__be32 hw_id;
__be32 rsvd2;
u8 irisc_index;
u8 synd;
__be16 ext_sync;
};
struct mlx5_init_seg {
__be32 fw_rev;
__be32 cmdif_rev_fw_sub;
__be32 rsvd0[2];
__be32 cmdq_addr_h;
__be32 cmdq_addr_l_sz;
__be32 cmd_dbell;
__be32 rsvd1[121];
struct health_buffer health;
__be32 rsvd2[884];
__be32 health_counter;
__be32 rsvd3[1023];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
};
struct mlx5_eqe_comp {
__be32 reserved[6];
__be32 cqn;
};
struct mlx5_eqe_qp_srq {
__be32 reserved[6];
__be32 qp_srq_n;
};
struct mlx5_eqe_cq_err {
__be32 cqn;
u8 reserved1[7];
u8 syndrome;
};
struct mlx5_eqe_dropped_packet {
};
struct mlx5_eqe_port_state {
u8 reserved0[8];
u8 port;
};
struct mlx5_eqe_gpio {
__be32 reserved0[2];
__be64 gpio_event;
};
struct mlx5_eqe_congestion {
u8 type;
u8 rsvd0;
u8 congestion_level;
};
struct mlx5_eqe_stall_vl {
u8 rsvd0[3];
u8 port_vl;
};
struct mlx5_eqe_cmd {
__be32 vector;
__be32 rsvd[6];
};
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
u8 rsvd1[2];
__be16 num_pages;
__be32 rsvd2[5];
};
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
struct mlx5_eqe_comp comp;
struct mlx5_eqe_qp_srq qp_srq;
struct mlx5_eqe_cq_err cq_err;
struct mlx5_eqe_dropped_packet dp;
struct mlx5_eqe_port_state port;
struct mlx5_eqe_gpio gpio;
struct mlx5_eqe_congestion cong;
struct mlx5_eqe_stall_vl stall_vl;
struct mlx5_eqe_page_req req_pages;
} __packed;
struct mlx5_eqe {
u8 rsvd0;
u8 type;
u8 rsvd1;
u8 sub_type;
__be32 rsvd2[7];
union ev_data data;
__be16 rsvd3;
u8 signature;
u8 owner;
} __packed;
struct mlx5_cmd_prot_block {
u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
u8 rsvd0[48];
__be64 next;
__be32 block_num;
u8 rsvd1;
u8 token;
u8 ctrl_sig;
u8 sig;
};
struct mlx5_err_cqe {
u8 rsvd0[32];
__be32 srqn;
u8 rsvd1[18];
u8 vendor_err_synd;
u8 syndrome;
__be32 s_wqe_opcode_qpn;
__be16 wqe_counter;
u8 signature;
u8 op_own;
};
struct mlx5_cqe64 {
u8 rsvd0[17];
u8 ml_path;
u8 rsvd20[4];
__be16 slid;
__be32 flags_rqpn;
u8 rsvd28[4];
__be32 srqn;
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
__be64 timestamp;
__be32 sop_drop_qpn;
__be16 wqe_counter;
u8 signature;
u8 op_own;
};
struct mlx5_wqe_srq_next_seg {
u8 rsvd0[2];
__be16 next_wqe_index;
u8 signature;
u8 rsvd1[11];
};
union mlx5_ext_cqe {
struct ib_grh grh;
u8 inl[64];
};
struct mlx5_cqe128 {
union mlx5_ext_cqe inl_grh;
struct mlx5_cqe64 cqe64;
};
struct mlx5_srq_ctx {
u8 state_log_sz;
u8 rsvd0[3];
__be32 flags_xrcd;
__be32 pgoff_cqn;
u8 rsvd1[4];
u8 log_pg_sz;
u8 rsvd2[7];
__be32 pd;
__be16 lwm;
__be16 wqe_cnt;
u8 rsvd3[8];
__be64 db_record;
};
struct mlx5_create_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_srqn;
u8 rsvd0[4];
struct mlx5_srq_ctx ctx;
u8 rsvd1[208];
__be64 pas[0];
};
struct mlx5_create_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 srqn;
u8 rsvd[4];
};
struct mlx5_destroy_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 srqn;
u8 rsvd[4];
};
struct mlx5_destroy_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 srqn;
u8 rsvd0[4];
};
struct mlx5_query_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_srq_ctx ctx;
u8 rsvd1[32];
__be64 pas[0];
};
struct mlx5_arm_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 srqn;
__be16 rsvd;
__be16 lwm;
};
struct mlx5_arm_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cq_context {
u8 status;
u8 cqe_sz_flags;
u8 st;
u8 rsvd3;
u8 rsvd4[6];
__be16 page_offset;
__be32 log_sz_usr_page;
__be16 cq_period;
__be16 cq_max_count;
__be16 rsvd20;
__be16 c_eqn;
u8 log_pg_sz;
u8 rsvd25[7];
__be32 last_notified_index;
__be32 solicit_producer_index;
__be32 consumer_counter;
__be32 producer_counter;
u8 rsvd48[8];
__be64 db_record_addr;
};
struct mlx5_create_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_cqn;
u8 rsvdx[4];
struct mlx5_cq_context ctx;
u8 rsvd6[192];
__be64 pas[0];
};
struct mlx5_create_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_destroy_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_destroy_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_query_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_query_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_cq_context ctx;
u8 rsvd6[16];
__be64 pas[0];
};
struct mlx5_eq_context {
u8 status;
u8 ec_oi;
u8 st;
u8 rsvd2[7];
__be16 page_pffset;
__be32 log_sz_usr_page;
u8 rsvd3[7];
u8 intr;
u8 log_page_size;
u8 rsvd4[15];
__be32 consumer_counter;
__be32 produser_counter;
u8 rsvd5[16];
};
struct mlx5_create_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[3];
u8 input_eqn;
u8 rsvd1[4];
struct mlx5_eq_context ctx;
u8 rsvd2[8];
__be64 events_mask;
u8 rsvd3[176];
__be64 pas[0];
};
struct mlx5_create_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[3];
u8 eq_number;
u8 rsvd1[4];
};
struct mlx5_destroy_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[3];
u8 eqn;
u8 rsvd1[4];
};
struct mlx5_destroy_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_map_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be64 mask;
u8 mu;
u8 rsvd0[2];
u8 eqn;
u8 rsvd1[24];
};
struct mlx5_map_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[3];
u8 eqn;
u8 rsvd1[4];
};
struct mlx5_query_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
struct mlx5_eq_context ctx;
};
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
* bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
*/
u8 status;
u8 pcie_control;
u8 flags;
u8 version;
__be32 qpn_mkey7_0;
u8 rsvd1[4];
__be32 flags_pd;
__be64 start_addr;
__be64 len;
__be32 bsfs_octo_size;
u8 rsvd2[16];
__be32 xlt_oct_size;
u8 rsvd3[3];
u8 log2_page_size;
u8 rsvd4[4];
};
struct mlx5_query_special_ctxs_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_special_ctxs_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 dump_fill_mkey;
__be32 reserved_lkey;
};
struct mlx5_create_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_mkey_index;
u8 rsvd0[4];
struct mlx5_mkey_seg seg;
u8 rsvd1[16];
__be32 xlat_oct_act_size;
__be32 bsf_coto_act_size;
u8 rsvd2[168];
__be64 pas[0];
};
struct mlx5_create_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 mkey;
u8 rsvd[4];
};
struct mlx5_destroy_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 mkey;
u8 rsvd[4];
};
struct mlx5_destroy_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 mkey;
};
struct mlx5_query_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
__be64 pas[0];
};
struct mlx5_modify_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 mkey;
__be64 pas[0];
};
struct mlx5_modify_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
};
struct mlx5_dump_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
};
struct mlx5_dump_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 mkey;
};
struct mlx5_mad_ifc_mbox_in {
struct mlx5_inbox_hdr hdr;
__be16 remote_lid;
u8 rsvd0;
u8 port;
u8 rsvd1[4];
u8 data[256];
};
struct mlx5_mad_ifc_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
u8 data[256];
};
struct mlx5_access_reg_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
__be16 register_id;
__be32 arg;
__be32 data[0];
};
struct mlx5_access_reg_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
__be32 data[0];
};
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
};
#endif /* MLX5_DEVICE_H */

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_DOORBELL_H
#define MLX5_DOORBELL_H
#define MLX5_BF_OFFSET 0x800
#define MLX5_CQ_DOORBELL 0x20
#if BITS_PER_LONG == 64
/* Assume that we can just write a 64-bit doorbell atomically. s390
* actually doesn't have writeq() but S/390 systems don't even have
* PCI so we won't worry about it.
*/
#define MLX5_DECLARE_DOORBELL_LOCK(name)
#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
spinlock_t *doorbell_lock)
{
__raw_writeq(*(u64 *)val, dest);
}
#else
/* Just fall back to a spinlock to protect the doorbell if
* BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
* MMIO writes.
*/
#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
spinlock_t *doorbell_lock)
{
unsigned long flags;
spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
spin_unlock_irqrestore(doorbell_lock, flags);
}
#endif
#endif /* MLX5_DOORBELL_H */

View File

@ -0,0 +1,769 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_DRIVER_H
#define MLX5_DRIVER_H
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
enum {
/* one minute for the sake of bringup. Generally, commands must always
* complete and we may need to increase this timeout value
*/
MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
enum {
CMD_OWNER_SW = 0x0,
CMD_OWNER_HW = 0x1,
CMD_STATUS_SUCCESS = 0,
};
enum mlx5_sqp_t {
MLX5_SQP_SMI = 0,
MLX5_SQP_GSI = 1,
MLX5_SQP_IEEE_1588 = 2,
MLX5_SQP_SNIFFER = 3,
MLX5_SQP_SYNC_UMR = 4,
};
enum {
MLX5_MAX_PORTS = 2,
};
enum {
MLX5_EQ_VEC_PAGES = 0,
MLX5_EQ_VEC_CMD = 1,
MLX5_EQ_VEC_ASYNC = 2,
MLX5_EQ_VEC_COMP_BASE,
};
enum {
MLX5_MAX_EQ_NAME = 20
};
enum {
MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
MLX5_ATOMIC_MODE_CX = 2 << 16,
MLX5_ATOMIC_MODE_8B = 3 << 16,
MLX5_ATOMIC_MODE_16B = 4 << 16,
MLX5_ATOMIC_MODE_32B = 5 << 16,
MLX5_ATOMIC_MODE_64B = 6 << 16,
MLX5_ATOMIC_MODE_128B = 7 << 16,
MLX5_ATOMIC_MODE_256B = 8 << 16,
};
enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
MLX5_CMD_OP_CREATE_CQ = 0x400,
MLX5_CMD_OP_DESTROY_CQ = 0x401,
MLX5_CMD_OP_QUERY_CQ = 0x402,
MLX5_CMD_OP_MODIFY_CQ = 0x403,
MLX5_CMD_OP_CREATE_QP = 0x500,
MLX5_CMD_OP_DESTROY_QP = 0x501,
MLX5_CMD_OP_RST2INIT_QP = 0x502,
MLX5_CMD_OP_INIT2RTR_QP = 0x503,
MLX5_CMD_OP_RTR2RTS_QP = 0x504,
MLX5_CMD_OP_RTS2RTS_QP = 0x505,
MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
MLX5_CMD_OP_2ERR_QP = 0x507,
MLX5_CMD_OP_RTS2SQD_QP = 0x508,
MLX5_CMD_OP_SQD2RTS_QP = 0x509,
MLX5_CMD_OP_2RST_QP = 0x50a,
MLX5_CMD_OP_QUERY_QP = 0x50b,
MLX5_CMD_OP_CONF_SQP = 0x50c,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
MLX5_CMD_OP_SUSPEND_QP = 0x50f,
MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
MLX5_CMD_OP_SQD2SQD_QP = 0x511,
MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
MLX5_CMD_OP_CREATE_PSV = 0x600,
MLX5_CMD_OP_DESTROY_PSV = 0x601,
MLX5_CMD_OP_QUERY_PSV = 0x602,
MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
MLX5_CMD_OP_CREATE_SRQ = 0x700,
MLX5_CMD_OP_DESTROY_SRQ = 0x701,
MLX5_CMD_OP_QUERY_SRQ = 0x702,
MLX5_CMD_OP_ARM_RQ = 0x703,
MLX5_CMD_OP_RESIZE_SRQ = 0x704,
MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802,
MLX5_CMD_OP_DEALLOC_UAR = 0x803,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_MAX = 0x810,
};
enum {
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
MLX5_REG_PMLP = 0, /* TBD */
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
};
enum dbg_rsc_type {
MLX5_DBG_RSC_QP,
MLX5_DBG_RSC_EQ,
MLX5_DBG_RSC_CQ,
};
struct mlx5_field_desc {
struct dentry *dent;
int i;
};
struct mlx5_rsc_debug {
struct mlx5_core_dev *dev;
void *object;
enum dbg_rsc_type type;
struct dentry *root;
struct mlx5_field_desc fields[0];
};
enum mlx5_dev_event {
MLX5_DEV_EVENT_SYS_ERROR,
MLX5_DEV_EVENT_PORT_UP,
MLX5_DEV_EVENT_PORT_DOWN,
MLX5_DEV_EVENT_PORT_INITIALIZED,
MLX5_DEV_EVENT_LID_CHANGE,
MLX5_DEV_EVENT_PKEY_CHANGE,
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
};
struct mlx5_uuar_info {
struct mlx5_uar *uars;
int num_uars;
int num_low_latency_uuars;
unsigned long *bitmap;
unsigned int *count;
struct mlx5_bf *bfs;
/*
* protect uuar allocation data structs
*/
struct mutex lock;
};
struct mlx5_bf {
void __iomem *reg;
void __iomem *regreg;
int buf_size;
struct mlx5_uar *uar;
unsigned long offset;
int need_lock;
/* protect blue flame buffer selection when needed
*/
spinlock_t lock;
/* serialize 64 bit writes when done as two 32 bit accesses
*/
spinlock_t lock32;
int uuarn;
};
struct mlx5_cmd_first {
__be32 data[4];
};
struct mlx5_cmd_msg {
struct list_head list;
struct cache_ent *cache;
u32 len;
struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next;
};
struct mlx5_cmd_debug {
struct dentry *dbg_root;
struct dentry *dbg_in;
struct dentry *dbg_out;
struct dentry *dbg_outlen;
struct dentry *dbg_status;
struct dentry *dbg_run;
void *in_msg;
void *out_msg;
u8 status;
u16 inlen;
u16 outlen;
};
struct cache_ent {
/* protect block chain allocations
*/
spinlock_t lock;
struct list_head head;
};
struct cmd_msg_cache {
struct cache_ent large;
struct cache_ent med;
};
struct mlx5_cmd_stats {
u64 sum;
u64 n;
struct dentry *root;
struct dentry *avg;
struct dentry *count;
/* protect command average calculations */
spinlock_t lock;
};
struct mlx5_cmd {
void *cmd_buf;
dma_addr_t dma;
u16 cmdif_rev;
u8 log_sz;
u8 log_stride;
int max_reg_cmds;
int events;
u32 __iomem *vector;
/* protect command queue allocations
*/
spinlock_t alloc_lock;
/* protect token allocations
*/
spinlock_t token_lock;
u8 token;
unsigned long bitmask;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct workqueue_struct *wq;
struct semaphore sem;
struct semaphore pages_sem;
int mode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache;
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
struct mlx5_port_caps {
int gid_table_len;
int pkey_table_len;
};
struct mlx5_caps {
u8 log_max_eq;
u8 log_max_cq;
u8 log_max_qp;
u8 log_max_mkey;
u8 log_max_pd;
u8 log_max_srq;
u32 max_cqes;
int max_wqes;
int max_sq_desc_sz;
int max_rq_desc_sz;
u64 flags;
u16 stat_rate_support;
int log_max_msg;
int num_ports;
int max_ra_res_qp;
int max_ra_req_qp;
int max_srq_wqes;
int bf_reg_size;
int bf_regs_per_page;
struct mlx5_port_caps port[MLX5_MAX_PORTS];
u8 ext_port_cap[MLX5_MAX_PORTS];
int max_vf;
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
u16 max_qp_mcg;
int min_page_sz;
};
struct mlx5_cmd_mailbox {
void *buf;
dma_addr_t dma;
struct mlx5_cmd_mailbox *next;
};
struct mlx5_buf_list {
void *buf;
dma_addr_t map;
};
struct mlx5_buf {
struct mlx5_buf_list direct;
struct mlx5_buf_list *page_list;
int nbufs;
int npages;
int page_shift;
int size;
};
struct mlx5_eq {
struct mlx5_core_dev *dev;
__be32 __iomem *doorbell;
u32 cons_index;
struct mlx5_buf buf;
int size;
u8 irqn;
u8 eqn;
int nent;
u64 mask;
char name[MLX5_MAX_EQ_NAME];
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
};
struct mlx5_core_mr {
u64 iova;
u64 size;
u32 key;
u32 pd;
u32 access;
};
struct mlx5_core_srq {
u32 srqn;
int max;
int max_gs;
int max_avail_gather;
int wqe_shift;
void (*event) (struct mlx5_core_srq *, enum mlx5_event);
atomic_t refcount;
struct completion free;
};
struct mlx5_eq_table {
void __iomem *update_ci;
void __iomem *update_arm_ci;
struct list_head *comp_eq_head;
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
struct msix_entry *msix_arr;
int num_comp_vectors;
/* protect EQs list
*/
spinlock_t lock;
};
struct mlx5_uar {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
void __iomem *wc_map;
void __iomem *map;
};
struct mlx5_core_health {
struct health_buffer __iomem *health;
__be32 __iomem *health_counter;
struct timer_list timer;
struct list_head list;
u32 prev;
int miss_counter;
};
struct mlx5_cq_table {
/* protect radix tree
*/
spinlock_t lock;
struct radix_tree_root tree;
};
struct mlx5_qp_table {
/* protect radix tree
*/
spinlock_t lock;
struct radix_tree_root tree;
};
struct mlx5_srq_table {
/* protect radix tree
*/
spinlock_t lock;
struct radix_tree_root tree;
};
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
int reg_pages;
struct mlx5_core_health health;
struct mlx5_srq_table srq_table;
/* start: qp staff */
struct mlx5_qp_table qp_table;
struct dentry *qp_debugfs;
struct dentry *eq_debugfs;
struct dentry *cq_debugfs;
struct dentry *cmdif_debugfs;
/* end: qp staff */
/* start: cq staff */
struct mlx5_cq_table cq_table;
/* end: cq staff */
/* start: alloc staff */
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
struct dentry *dbg_root;
/* protect mkey key part */
spinlock_t mkey_lock;
u8 mkey_key;
};
struct mlx5_core_dev {
struct pci_dev *pdev;
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct mlx5_caps caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
void *data);
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
};
struct mlx5_db {
__be32 *db;
union {
struct mlx5_db_pgdir *pgdir;
struct mlx5_ib_user_db_page *user_page;
} u;
dma_addr_t dma;
int index;
};
enum {
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
};
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
__be32 *db_page;
dma_addr_t db_dma;
};
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
struct mlx5_cmd_msg *in;
struct mlx5_cmd_msg *out;
mlx5_cmd_cbk_t callback;
void *context;
int idx;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
struct mlx5_cmd_layout *lay;
int ret;
int page_queue;
u8 status;
u8 token;
struct timespec ts1;
struct timespec ts2;
};
struct mlx5_pas {
u64 pa;
u8 log_sz;
};
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
return buf->direct.buf + offset;
else
return buf->page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
}
extern struct workqueue_struct *mlx5_core_wq;
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
struct ib_field {
size_t struct_offset_bytes;
size_t struct_size_bytes;
int offset_bits;
int size_bits;
};
static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
{
return pci_get_drvdata(pdev);
}
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->fw_rev) & 0xffff;
}
static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->fw_rev) >> 16;
}
static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
}
static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
}
static inline void *mlx5_vzalloc(unsigned long size)
{
void *rtn;
rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!rtn)
rtn = vzalloc(size);
return rtn;
}
static inline void mlx5_vfree(const void *addr)
{
if (addr && is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
}
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
u16 opmod, int port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s16 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_query_eq_mbox_out *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
typedef void (*health_handler_t)(struct pci_dev *pdev, void *buf, int size);
int mlx5_register_health_report_handler(health_handler_t handler);
void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
return mkey >> 8;
}
static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
{
return mkey_idx << 8;
}
enum {
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
};
enum {
MAX_MR_CACHE_ENTRIES = 16,
};
struct mlx5_profile {
u64 mask;
u32 log_max_qp;
int cmdif_csum;
struct {
int size;
int limit;
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
#endif /* MLX5_DRIVER_H */

View File

@ -0,0 +1,467 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_QP_H
#define MLX5_QP_H
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
#define MLX5_INVALID_LKEY 0x100
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MLX5_QP_OPTPAR_RRE = 1 << 1,
MLX5_QP_OPTPAR_RAE = 1 << 2,
MLX5_QP_OPTPAR_RWE = 1 << 3,
MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
MLX5_QP_OPTPAR_SRQN = 1 << 18,
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
MLX5_QP_OPTPAR_DC_HS = 1 << 20,
MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
};
enum mlx5_qp_state {
MLX5_QP_STATE_RST = 0,
MLX5_QP_STATE_INIT = 1,
MLX5_QP_STATE_RTR = 2,
MLX5_QP_STATE_RTS = 3,
MLX5_QP_STATE_SQER = 4,
MLX5_QP_STATE_SQD = 5,
MLX5_QP_STATE_ERR = 6,
MLX5_QP_STATE_SQ_DRAINING = 7,
MLX5_QP_STATE_SUSPENDED = 9,
MLX5_QP_NUM_STATE
};
enum {
MLX5_QP_ST_RC = 0x0,
MLX5_QP_ST_UC = 0x1,
MLX5_QP_ST_UD = 0x2,
MLX5_QP_ST_XRC = 0x3,
MLX5_QP_ST_MLX = 0x4,
MLX5_QP_ST_DCI = 0x5,
MLX5_QP_ST_DCT = 0x6,
MLX5_QP_ST_QP0 = 0x7,
MLX5_QP_ST_QP1 = 0x8,
MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
MLX5_QP_ST_RAW_IPV6 = 0xa,
MLX5_QP_ST_SNIFFER = 0xb,
MLX5_QP_ST_SYNC_UMR = 0xe,
MLX5_QP_ST_PTP_1588 = 0xd,
MLX5_QP_ST_REG_UMR = 0xc,
MLX5_QP_ST_MAX
};
enum {
MLX5_QP_PM_MIGRATED = 0x3,
MLX5_QP_PM_ARMED = 0x0,
MLX5_QP_PM_REARM = 0x1
};
enum {
MLX5_NON_ZERO_RQ = 0 << 24,
MLX5_SRQ_RQ = 1 << 24,
MLX5_CRQ_RQ = 2 << 24,
MLX5_ZERO_LEN_RQ = 3 << 24
};
enum {
/* params1 */
MLX5_QP_BIT_SRE = 1 << 15,
MLX5_QP_BIT_SWE = 1 << 14,
MLX5_QP_BIT_SAE = 1 << 13,
/* params2 */
MLX5_QP_BIT_RRE = 1 << 15,
MLX5_QP_BIT_RWE = 1 << 14,
MLX5_QP_BIT_RAE = 1 << 13,
MLX5_QP_BIT_RIC = 1 << 4,
};
enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
};
enum {
MLX5_SEND_WQE_BB = 64,
};
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
};
enum {
MLX5_FENCE_MODE_NONE = 0 << 5,
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
};
enum {
MLX5_QP_LAT_SENSITIVE = 1 << 28,
MLX5_QP_ENABLE_SIG = 1 << 31,
};
enum {
MLX5_RCV_DBR = 0,
MLX5_SND_DBR = 1,
};
struct mlx5_wqe_fmr_seg {
__be32 flags;
__be32 mem_key;
__be64 buf_list;
__be64 start_addr;
__be64 reg_len;
__be32 offset;
__be32 page_size;
u32 reserved[2];
};
struct mlx5_wqe_ctrl_seg {
__be32 opmod_idx_opcode;
__be32 qpn_ds;
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
__be32 imm;
};
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
};
struct mlx5_wqe_masked_atomic_seg {
__be64 swap_add;
__be64 compare;
__be64 swap_add_mask;
__be64 compare_mask;
};
struct mlx5_av {
union {
struct {
__be32 qkey;
__be32 reserved;
} qkey;
__be64 dc_key;
} key;
__be32 dqp_dct;
u8 stat_rate_sl;
u8 fl_mlid;
__be16 rlid;
u8 reserved0[10];
u8 tclass;
u8 hop_limit;
__be32 grh_gid_fl;
u8 rgid[16];
};
struct mlx5_wqe_datagram_seg {
struct mlx5_av av;
};
struct mlx5_wqe_raddr_seg {
__be64 raddr;
__be32 rkey;
u32 reserved;
};
struct mlx5_wqe_atomic_seg {
__be64 swap_add;
__be64 compare;
};
struct mlx5_wqe_data_seg {
__be32 byte_count;
__be32 lkey;
__be64 addr;
};
struct mlx5_wqe_umr_ctrl_seg {
u8 flags;
u8 rsvd0[3];
__be16 klm_octowords;
__be16 bsf_octowords;
__be64 mkey_mask;
u8 rsvd1[32];
};
struct mlx5_seg_set_psv {
__be32 psv_num;
__be16 syndrome;
__be16 status;
__be32 transient_sig;
__be32 ref_tag;
};
struct mlx5_seg_get_psv {
u8 rsvd[19];
u8 num_psv;
__be32 l_key;
__be64 va;
__be32 psv_index[4];
};
struct mlx5_seg_check_psv {
u8 rsvd0[2];
__be16 err_coalescing_op;
u8 rsvd1[2];
__be16 xport_err_op;
u8 rsvd2[2];
__be16 xport_err_mask;
u8 rsvd3[7];
u8 num_psv;
__be32 l_key;
__be64 va;
__be32 psv_index[4];
};
struct mlx5_rwqe_sig {
u8 rsvd0[4];
u8 signature;
u8 rsvd1[11];
};
struct mlx5_wqe_signature_seg {
u8 rsvd0[4];
u8 signature;
u8 rsvd1[11];
};
struct mlx5_wqe_inline_seg {
__be32 byte_count;
};
struct mlx5_core_qp {
void (*event) (struct mlx5_core_qp *, int);
int qpn;
atomic_t refcount;
struct completion free;
struct mlx5_rsc_debug *dbg;
int pid;
};
struct mlx5_qp_path {
u8 fl;
u8 rsvd3;
u8 free_ar;
u8 pkey_index;
u8 rsvd0;
u8 grh_mlid;
__be16 rlid;
u8 ackto_lt;
u8 mgid_index;
u8 static_rate;
u8 hop_limit;
__be32 tclass_flowlabel;
u8 rgid[16];
u8 rsvd1[4];
u8 sl;
u8 port;
u8 rsvd2[6];
};
struct mlx5_qp_context {
__be32 flags;
__be32 flags_pd;
u8 mtu_msgmax;
u8 rq_size_stride;
__be16 sq_crq_size;
__be32 qp_counter_set_usr_page;
__be32 wire_qpn;
__be32 log_pg_sz_remote_qpn;
struct mlx5_qp_path pri_path;
struct mlx5_qp_path alt_path;
__be32 params1;
u8 reserved2[4];
__be32 next_send_psn;
__be32 cqn_send;
u8 reserved3[8];
__be32 last_acked_psn;
__be32 ssn;
__be32 params2;
__be32 rnr_nextrecvpsn;
__be32 xrcd;
__be32 cqn_recv;
__be64 db_rec_addr;
__be32 qkey;
__be32 rq_type_srqn;
__be32 rmsn;
__be16 hw_sq_wqe_counter;
__be16 sw_sq_wqe_counter;
__be16 hw_rcyclic_byte_counter;
__be16 hw_rq_counter;
__be16 sw_rcyclic_byte_counter;
__be16 sw_rq_counter;
u8 rsvd0[5];
u8 cgs;
u8 cs_req;
u8 cs_res;
__be64 dc_access_key;
u8 rsvd1[24];
};
struct mlx5_create_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_qpn;
u8 rsvd0[4];
__be32 opt_param_mask;
u8 rsvd1[4];
struct mlx5_qp_context ctx;
u8 rsvd3[16];
__be64 pas[0];
};
struct mlx5_create_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 qpn;
u8 rsvd0[4];
};
struct mlx5_destroy_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd0[4];
};
struct mlx5_destroy_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_modify_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd1[4];
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
};
struct mlx5_modify_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_query_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd[4];
};
struct mlx5_query_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd1[8];
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
u8 rsvd2[16];
__be64 pas[0];
};
struct mlx5_conf_sqp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd[3];
u8 type;
};
struct mlx5_conf_sqp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_alloc_xrcd_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_alloc_xrcd_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 xrcdn;
u8 rsvd[4];
};
struct mlx5_dealloc_xrcd_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 xrcdn;
u8 rsvd[4];
};
struct mlx5_dealloc_xrcd_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
}
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
int inlen);
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
enum mlx5_qp_state new_state,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
struct mlx5_query_qp_mbox_out *out, int outlen);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
#endif /* MLX5_QP_H */

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_SRQ_H
#define MLX5_SRQ_H
#include <linux/mlx5/driver.h>
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
#endif /* MLX5_SRQ_H */