Merge branch 'cxgb4-rdma'

Raju Rangoju says:

====================
Add support for RDMA enhancements in cxgb4

Allocates the HW-resources and provide the necessary routines for the
upper layer driver (rdma/iw_cxgb4) to enable the RDMA SRQ support for
Chelsio adapters.

Advertise support for write with immediate work request
Advertise support for write with completion

v3: modified memory allocation as per Stefano's suggestion

v2: fixed the patching issues and also
    fixed the following based on review comments of Stefano Brivio
 - using kvzalloc instead of vzalloc
 - using #define instead of enum
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-03-22 11:59:12 -04:00
commit bc48740bcd
9 changed files with 321 additions and 1 deletions

View file

@ -6,7 +6,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
cudbg_common.o cudbg_lib.o cudbg_zlib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o

View file

@ -390,6 +390,8 @@ struct adapter_params {
* used by the Port
*/
u8 mps_bg_map[MAX_NPORTS]; /* MPS Buffer Group Map */
bool write_w_imm_support; /* FW supports WRITE_WITH_IMMEDIATE */
bool write_cmpl_support; /* FW supports WRITE_CMPL */
};
/* State needed to monitor the forward progress of SGE Ingress DMA activities
@ -960,6 +962,8 @@ struct adapter {
/* HMA */
struct hma_data hma;
struct srq_data *srq;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be

View file

@ -75,6 +75,7 @@
#include "t4fw_api.h"
#include "t4fw_version.h"
#include "cxgb4_dcb.h"
#include "srq.h"
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
@ -586,6 +587,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_abort_rpl_rss *p = (void *)rsp;
hash_del_filter_rpl(q->adap, p);
} else if (opcode == CPL_SRQ_TABLE_RPL) {
const struct cpl_srq_table_rpl *p = (void *)rsp;
do_srq_table_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
@ -4467,6 +4472,20 @@ static int adap_init0(struct adapter *adap)
adap->vres.pbl.start = val[4];
adap->vres.pbl.size = val[5] - val[4] + 1;
params[0] = FW_PARAM_PFVF(SRQ_START);
params[1] = FW_PARAM_PFVF(SRQ_END);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
params, val);
if (!ret) {
adap->vres.srq.start = val[0];
adap->vres.srq.size = val[1] - val[0] + 1;
}
if (adap->vres.srq.size) {
adap->srq = t4_init_srq(adap->vres.srq.size);
if (!adap->srq)
dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
}
params[0] = FW_PARAM_PFVF(SQRQ_START);
params[1] = FW_PARAM_PFVF(SQRQ_END);
params[2] = FW_PARAM_PFVF(CQ_START);
@ -4500,6 +4519,18 @@ static int adap_init0(struct adapter *adap)
"max_ordird_qp %d max_ird_adapter %d\n",
adap->params.max_ordird_qp,
adap->params.max_ird_adapter);
/* Enable write_with_immediate if FW supports it */
params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
val);
adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
/* Enable write_cmpl if FW supports it */
params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
val);
adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
adap->num_ofld_uld += 2;
}
if (caps_cmd.iscsicaps) {
@ -5135,6 +5166,7 @@ static void free_some_resources(struct adapter *adapter)
kvfree(adapter->smt);
kvfree(adapter->l2t);
kvfree(adapter->srq);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
cxgb4_cleanup_tc_flower(adapter);

View file

@ -666,6 +666,8 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
lld->nodeid = dev_to_node(adap->pdev_dev);
lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
lld->write_w_imm_support = adap->params.write_w_imm_support;
lld->write_cmpl_support = adap->params.write_cmpl_support;
}
static void uld_attach(struct adapter *adap, unsigned int uld)

View file

@ -284,6 +284,7 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range iscsi;
struct cxgb4_range stag;
struct cxgb4_range rq;
struct cxgb4_range srq;
struct cxgb4_range pbl;
struct cxgb4_range qp;
struct cxgb4_range cq;
@ -353,6 +354,8 @@ struct cxgb4_lld_info {
void **iscsi_ppm; /* iscsi page pod manager */
int nodeid; /* device numa node id */
bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */
bool write_w_imm_support; /* FW supports WRITE_WITH_IMMEDIATE */
bool write_cmpl_support; /* FW supports WRITE_CMPL WR */
};
struct cxgb4_uld_info {

View file

@ -0,0 +1,138 @@
/*
* This file is part of the Chelsio T6 Ethernet driver for Linux.
*
* Copyright (c) 2017-2018 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "cxgb4.h"
#include "t4_msg.h"
#include "srq.h"
struct srq_data *t4_init_srq(int srq_size)
{
struct srq_data *s;
s = kvzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return NULL;
s->srq_size = srq_size;
init_completion(&s->comp);
mutex_init(&s->lock);
return s;
}
/* cxgb4_get_srq_entry: read the SRQ table entry
* @dev: Pointer to the net_device
* @idx: Index to the srq
* @entryp: pointer to the srq entry
*
* Sends CPL_SRQ_TABLE_REQ message for the given index.
* Contents will be returned in CPL_SRQ_TABLE_RPL message.
*
* Returns zero if the read is successful, else a error
* number will be returned. Caller should not use the srq
* entry if the return value is non-zero.
*
*
*/
int cxgb4_get_srq_entry(struct net_device *dev,
int srq_idx, struct srq_entry *entryp)
{
struct cpl_srq_table_req *req;
struct adapter *adap;
struct sk_buff *skb;
struct srq_data *s;
int rc = -ENODEV;
adap = netdev2adap(dev);
s = adap->srq;
if (!(adap->flags & FULL_INIT_DONE) || !s)
goto out;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
req = (struct cpl_srq_table_req *)
__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
TID_TID_V(srq_idx) |
TID_QID_V(adap->sge.fw_evtq.abs_id)));
req->idx = srq_idx;
mutex_lock(&s->lock);
s->entryp = entryp;
t4_mgmt_tx(adap, skb);
rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO);
if (rc)
rc = 0;
else /* !rc means we timed out */
rc = -ETIMEDOUT;
WARN_ON_ONCE(entryp->idx != srq_idx);
mutex_unlock(&s->lock);
out:
return rc;
}
EXPORT_SYMBOL(cxgb4_get_srq_entry);
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl)
{
unsigned int idx = TID_TID_G(GET_TID(rpl));
struct srq_data *s = adap->srq;
struct srq_entry *e;
if (unlikely(rpl->status != CPL_CONTAINS_READ_RPL)) {
dev_err(adap->pdev_dev,
"Unexpected SRQ_TABLE_RPL status %u for entry %u\n",
rpl->status, idx);
goto out;
}
/* Store the read entry */
e = s->entryp;
e->valid = 1;
e->idx = idx;
e->pdid = SRQT_PDID_G(be64_to_cpu(rpl->rsvd_pdid));
e->qlen = SRQT_QLEN_G(be32_to_cpu(rpl->qlen_qbase));
e->qbase = SRQT_QBASE_G(be32_to_cpu(rpl->qlen_qbase));
e->cur_msn = be16_to_cpu(rpl->cur_msn);
e->max_msn = be16_to_cpu(rpl->max_msn);
out:
complete(&s->comp);
}

View file

@ -0,0 +1,65 @@
/*
* This file is part of the Chelsio T6 Ethernet driver for Linux.
*
* Copyright (c) 2017-2018 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_SRQ_H
#define __CXGB4_SRQ_H
struct adapter;
struct cpl_srq_table_rpl;
#define SRQ_WAIT_TO (HZ * 5)
struct srq_entry {
u8 valid;
u8 idx;
u8 qlen;
u16 pdid;
u16 cur_msn;
u16 max_msn;
u32 qbase;
};
struct srq_data {
unsigned int srq_size;
struct srq_entry *entryp;
struct completion comp;
struct mutex lock; /* generic mutex for srq data */
};
struct srq_data *t4_init_srq(int srq_size);
int cxgb4_get_srq_entry(struct net_device *dev,
int srq_idx, struct srq_entry *entryp);
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl);
#endif /* __CXGB4_SRQ_H */

View file

@ -52,6 +52,7 @@ enum {
CPL_L2T_WRITE_REQ = 0x12,
CPL_SMT_WRITE_REQ = 0x14,
CPL_TID_RELEASE = 0x1A,
CPL_SRQ_TABLE_REQ = 0x1C,
CPL_TX_DATA_ISO = 0x1F,
CPL_CLOSE_LISTSRV_RPL = 0x20,
@ -102,6 +103,7 @@ enum {
CPL_FW4_MSG = 0xC0,
CPL_FW4_PLD = 0xC1,
CPL_FW4_ACK = 0xC3,
CPL_SRQ_TABLE_RPL = 0xCC,
CPL_RX_PHYS_DSGL = 0xD0,
@ -136,6 +138,8 @@ enum CPL_error {
CPL_ERR_KEEPALV_NEG_ADVICE = 37,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_IWARP_FLM = 50,
CPL_CONTAINS_READ_RPL = 60,
CPL_CONTAINS_WRITE_RPL = 61,
};
enum {
@ -198,6 +202,7 @@ union opcode_tid {
/* partitioning of TID fields that also carry a queue id */
#define TID_TID_S 0
#define TID_TID_M 0x3fff
#define TID_TID_V(x) ((x) << TID_TID_S)
#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
#define TID_QID_S 14
@ -743,6 +748,22 @@ struct cpl_abort_req_rss {
u8 status;
};
struct cpl_abort_req_rss6 {
WR_HDR;
union opcode_tid ot;
__u32 srqidx_status;
};
#define ABORT_RSS_STATUS_S 0
#define ABORT_RSS_STATUS_M 0xff
#define ABORT_RSS_STATUS_V(x) ((x) << ABORT_RSS_STATUS_S)
#define ABORT_RSS_STATUS_G(x) (((x) >> ABORT_RSS_STATUS_S) & ABORT_RSS_STATUS_M)
#define ABORT_RSS_SRQIDX_S 8
#define ABORT_RSS_SRQIDX_M 0xffffff
#define ABORT_RSS_SRQIDX_V(x) ((x) << ABORT_RSS_SRQIDX_S)
#define ABORT_RSS_SRQIDX_G(x) (((x) >> ABORT_RSS_SRQIDX_S) & ABORT_RSS_SRQIDX_M)
struct cpl_abort_req {
WR_HDR;
union opcode_tid ot;
@ -758,6 +779,11 @@ struct cpl_abort_rpl_rss {
u8 status;
};
struct cpl_abort_rpl_rss6 {
union opcode_tid ot;
__u32 srqidx_status;
};
struct cpl_abort_rpl {
WR_HDR;
union opcode_tid ot;
@ -2112,4 +2138,49 @@ enum {
X_CPL_RX_MPS_PKT_TYPE_QFC = 1 << 2,
X_CPL_RX_MPS_PKT_TYPE_PTP = 1 << 3
};
struct cpl_srq_table_req {
WR_HDR;
union opcode_tid ot;
__u8 status;
__u8 rsvd[2];
__u8 idx;
__be64 rsvd_pdid;
__be32 qlen_qbase;
__be16 cur_msn;
__be16 max_msn;
};
struct cpl_srq_table_rpl {
union opcode_tid ot;
__u8 status;
__u8 rsvd[2];
__u8 idx;
__be64 rsvd_pdid;
__be32 qlen_qbase;
__be16 cur_msn;
__be16 max_msn;
};
/* cpl_srq_table_{req,rpl}.params fields */
#define SRQT_QLEN_S 28
#define SRQT_QLEN_M 0xF
#define SRQT_QLEN_V(x) ((x) << SRQT_QLEN_S)
#define SRQT_QLEN_G(x) (((x) >> SRQT_QLEN_S) & SRQT_QLEN_M)
#define SRQT_QBASE_S 0
#define SRQT_QBASE_M 0x3FFFFFF
#define SRQT_QBASE_V(x) ((x) << SRQT_QBASE_S)
#define SRQT_QBASE_G(x) (((x) >> SRQT_QBASE_S) & SRQT_QBASE_M)
#define SRQT_PDID_S 0
#define SRQT_PDID_M 0xFF
#define SRQT_PDID_V(x) ((x) << SRQT_PDID_S)
#define SRQT_PDID_G(x) (((x) >> SRQT_PDID_S) & SRQT_PDID_M)
#define SRQT_IDX_S 0
#define SRQT_IDX_M 0xF
#define SRQT_IDX_V(x) ((x) << SRQT_IDX_S)
#define SRQT_IDX_G(x) (((x) >> SRQT_IDX_S) & SRQT_IDX_M)
#endif /* __T4_MSG_H */

View file

@ -101,6 +101,7 @@ enum fw_wr_opcodes {
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_RDMA_WRITE_CMPL_WR = 0x21,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_ISCSI_TX_DATA_WR = 0x45,
FW_PTP_TX_PKT_WR = 0x46,
@ -1213,6 +1214,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
};
/*
@ -1244,6 +1247,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
FW_PARAMS_PARAM_PFVF_SRQ_START = 0x19,
FW_PARAMS_PARAM_PFVF_SRQ_END = 0x1A,
FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
FW_PARAMS_PARAM_PFVF_VIID = 0x24,
FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,