1
0
Fork 0

net/mlx5_core: use set/get macros in device caps

Transform device capabilities related commands to use set/get macros to
manipulate command mailboxes.

Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
wifi-calibration
Eli Cohen 2014-10-02 12:19:44 +03:00 committed by David S. Miller
parent d29b796ada
commit b775516b04
5 changed files with 297 additions and 167 deletions

View File

@ -1537,3 +1537,20 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
return cmd_status_to_err(hdr->status);
}
int mlx5_cmd_status_to_err_v2(void *ptr)
{
u32 syndrome;
u8 status;
status = be32_to_cpu(*(__be32 *)ptr) >> 24;
if (!status)
return 0;
syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
cmd_status_str(status), status, syndrome);
return cmd_status_to_err(status);
}

View File

@ -43,6 +43,7 @@
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
@ -277,18 +278,20 @@ static u16 to_fw_pkey_sz(u32 size)
/* selectively copy writable fields clearing any reserved area
*/
static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_general_caps *from)
static void copy_rw_fields(void *to, struct mlx5_caps *from)
{
__be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
u64 v64;
to->log_max_qp = from->log_max_qp & 0x1f;
to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f;
to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f;
to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f;
to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f;
to->pkey_table_size = cpu_to_be16(to_fw_pkey_sz(from->pkey_table_size));
v64 = from->flags & MLX5_CAP_BITS_RW_MASK;
to->flags = cpu_to_be64(v64);
MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
*flags_off = cpu_to_be64(v64);
}
static u16 get_pkey_table_size(int pkey)
@ -299,55 +302,47 @@ static u16 get_pkey_table_size(int pkey)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
}
static void fw2drv_caps(struct mlx5_caps *caps,
struct mlx5_cmd_query_hca_cap_mbox_out *out)
static void fw2drv_caps(struct mlx5_caps *caps, void *out)
{
struct mlx5_general_caps *gen = &caps->gen;
u16 t16;
gen->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
gen->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
gen->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
gen->log_max_strq = out->hca_cap.log_max_strq_sz;
gen->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
gen->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
gen->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
gen->max_eqes = out->hca_cap.log_max_eq_sz;
gen->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
gen->log_max_eq = out->hca_cap.log_max_eq & 0xf;
gen->max_indirection = out->hca_cap.max_indirection;
gen->log_max_mrw_sz = out->hca_cap.log_max_mrw_sz;
gen->log_max_bsf_list_size = 0;
gen->log_max_klm_list_size = 0;
gen->log_max_ra_req_dc = out->hca_cap.log_max_ra_req_dc;
gen->log_max_ra_res_dc = out->hca_cap.log_max_ra_res_dc;
gen->log_max_ra_req_qp = out->hca_cap.log_max_ra_req_qp;
gen->log_max_ra_res_qp = out->hca_cap.log_max_ra_res_qp;
gen->max_qp_counters = be16_to_cpu(out->hca_cap.max_qp_count);
gen->pkey_table_size = get_pkey_table_size(be16_to_cpu(out->hca_cap.pkey_table_size));
gen->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
gen->num_ports = out->hca_cap.num_ports & 0xf;
gen->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
gen->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
gen->flags = be64_to_cpu(out->hca_cap.flags);
gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
pr_debug("flags = 0x%llx\n", gen->flags);
gen->uar_sz = out->hca_cap.uar_sz;
gen->min_log_pg_sz = out->hca_cap.log_pg_sz;
t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
if (t16 & 0x8000) {
gen->bf_reg_size = 1 << (t16 & 0x1f);
gen->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
} else {
gen->bf_reg_size = 0;
gen->bf_regs_per_page = 0;
}
gen->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
gen->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
gen->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
gen->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
gen->log_max_xrcd = out->hca_cap.log_max_xrcd;
gen->log_uar_page_sz = be16_to_cpu(out->hca_cap.log_uar_page_sz);
gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
}
static const char *caps_opmod_str(u16 opmod)
@ -365,59 +360,61 @@ static const char *caps_opmod_str(u16 opmod)
int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
u16 opmod)
{
struct mlx5_cmd_query_hca_cap_mbox_out *out;
struct mlx5_cmd_query_hca_cap_mbox_in in;
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
void *out;
int err;
memset(&in, 0, sizeof(in));
out = kzalloc(sizeof(*out), GFP_KERNEL);
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto query_ex;
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
in.hdr.opmod = cpu_to_be16(opmod);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
err = mlx5_cmd_status_to_err(&out->hdr);
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
goto query_ex;
}
mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
fw2drv_caps(caps, out);
fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
query_ex:
kfree(out);
return err;
}
static int set_caps(struct mlx5_core_dev *dev,
struct mlx5_cmd_set_hca_cap_mbox_in *in)
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
{
struct mlx5_cmd_set_hca_cap_mbox_out out;
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
int err;
memset(&out, 0, sizeof(out));
memset(out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
if (err)
return err;
err = mlx5_cmd_status_to_err(&out.hdr);
err = mlx5_cmd_status_to_err_v2(out);
return err;
}
static int handle_hca_cap(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
struct mlx5_caps *cur_caps = NULL;
struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
@ -446,8 +443,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable checksum */
cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
copy_rw_fields(&set_ctx->hca_cap, &cur_caps->gen);
err = set_caps(dev, set_ctx);
copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
cur_caps);
err = set_caps(dev, set_ctx, set_sz);
query_ex:
kfree(cur_caps);

View File

@ -327,98 +327,6 @@ struct mlx5_cmd_query_adapter_mbox_out {
u8 vsd_psid[16];
};
struct mlx5_hca_cap {
u8 rsvd1[16];
u8 log_max_srq_sz;
u8 log_max_qp_sz;
u8 rsvd2;
u8 log_max_qp;
u8 log_max_strq_sz;
u8 log_max_srqs;
u8 rsvd4[2];
u8 rsvd5;
u8 log_max_cq_sz;
u8 rsvd6;
u8 log_max_cq;
u8 log_max_eq_sz;
u8 log_max_mkey;
u8 rsvd7;
u8 log_max_eq;
u8 max_indirection;
u8 log_max_mrw_sz;
u8 log_max_bsf_list_sz;
u8 log_max_klm_list_sz;
u8 rsvd_8_0;
u8 log_max_ra_req_dc;
u8 rsvd_8_1;
u8 log_max_ra_res_dc;
u8 rsvd9;
u8 log_max_ra_req_qp;
u8 rsvd10;
u8 log_max_ra_res_qp;
u8 pad_cap;
u8 rsvd11[3];
__be16 max_qp_count;
__be16 pkey_table_size;
u8 rsvd13;
u8 local_ca_ack_delay;
u8 rsvd14;
u8 num_ports;
u8 log_max_msg;
u8 rsvd15[3];
__be16 stat_rate_support;
u8 rsvd16[2];
__be64 flags;
u8 rsvd17;
u8 uar_sz;
u8 rsvd18;
u8 log_pg_sz;
__be16 bf_log_bf_reg_size;
u8 rsvd19[4];
__be16 max_desc_sz_sq;
u8 rsvd20[2];
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
__be32 max_qp_mcg;
u8 rsvd22[3];
u8 log_max_mcg;
u8 rsvd23;
u8 log_max_pd;
u8 rsvd24;
u8 log_max_xrcd;
u8 rsvd25[42];
__be16 log_uar_page_sz;
u8 rsvd26[108];
};
struct mlx5_cmd_query_hca_cap_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cmd_query_hca_cap_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_hca_cap hca_cap;
};
struct mlx5_cmd_set_hca_cap_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
struct mlx5_hca_cap hca_cap;
};
struct mlx5_cmd_set_hca_cap_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_cmd_init_hca_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];

View File

@ -641,6 +641,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
u16 opmod);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,

View File

@ -140,4 +140,210 @@ enum {
MLX5_CMD_OP_MAX = 0x911
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_0[0x80];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
u8 reserved_1[0xb];
u8 log_max_qp[0x5];
u8 log_max_strq_sz[0x8];
u8 reserved_2[0x3];
u8 log_max_srqs[0x5];
u8 reserved_3[0x10];
u8 reserved_4[0x8];
u8 log_max_cq_sz[0x8];
u8 reserved_5[0xb];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
u8 reserved_6[0x2];
u8 log_max_mkey[0x6];
u8 reserved_7[0xc];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
u8 reserved_8[0x1];
u8 log_max_mrw_sz[0x7];
u8 reserved_9[0x2];
u8 log_max_bsf_list_size[0x6];
u8 reserved_10[0x2];
u8 log_max_klm_list_size[0x6];
u8 reserved_11[0xa];
u8 log_max_ra_req_dc[0x6];
u8 reserved_12[0xa];
u8 log_max_ra_res_dc[0x6];
u8 reserved_13[0xa];
u8 log_max_ra_req_qp[0x6];
u8 reserved_14[0xa];
u8 log_max_ra_res_qp[0x6];
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
u8 reserved_15[0x1d];
u8 reserved_16[0x6];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
u8 eswitch_owner[0x1];
u8 reserved_17[0xa];
u8 local_ca_ack_delay[0x5];
u8 reserved_18[0x8];
u8 num_ports[0x8];
u8 reserved_19[0x3];
u8 log_max_msg[0x5];
u8 reserved_20[0x18];
u8 stat_rate_support[0x10];
u8 reserved_21[0x10];
u8 reserved_22[0x10];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
u8 reserved_23[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
u8 reserved_24[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
u8 dc[0x1];
u8 reserved_25[0x2];
u8 roce[0x1];
u8 atomic[0x1];
u8 rsz_srq[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
u8 sniffer_rule_flow[0x1];
u8 sniffer_rule_vport[0x1];
u8 sniffer_rule_phy[0x1];
u8 reserved_26[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
u8 reserved_27[0x3];
u8 cd[0x1];
u8 reserved_28[0x1];
u8 apm[0x1];
u8 reserved_29[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
u8 reserved_30[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
u8 reserved_31[0xa];
u8 uar_sz[0x6];
u8 reserved_32[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
u8 reserved_33[0xa];
u8 log_bf_reg_size[0x5];
u8 reserved_34[0x10];
u8 reserved_35[0x10];
u8 max_wqe_sz_sq[0x10];
u8 reserved_36[0x10];
u8 max_wqe_sz_rq[0x10];
u8 reserved_37[0x10];
u8 max_wqe_sz_sq_dc[0x10];
u8 reserved_38[0x7];
u8 max_qp_mcg[0x19];
u8 reserved_39[0x18];
u8 log_max_mcg[0x8];
u8 reserved_40[0xb];
u8 log_max_pd[0x5];
u8 reserved_41[0xb];
u8 log_max_xrcd[0x5];
u8 reserved_42[0x20];
u8 reserved_43[0x3];
u8 log_max_rq[0x5];
u8 reserved_44[0x3];
u8 log_max_sq[0x5];
u8 reserved_45[0x3];
u8 log_max_tir[0x5];
u8 reserved_46[0x3];
u8 log_max_tis[0x5];
u8 reserved_47[0x13];
u8 log_max_rq_per_tir[0x5];
u8 reserved_48[0x3];
u8 log_max_tis_per_sq[0x5];
u8 reserved_49[0xe0];
u8 reserved_50[0x10];
u8 log_uar_page_sz[0x10];
u8 reserved_51[0x100];
u8 reserved_52[0x1f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
u8 reserved_53[0x220];
};
struct mlx5_ifc_set_hca_cap_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 reserved_1[0x10];
u8 op_mod[0x10];
u8 reserved_2[0x40];
struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
};
struct mlx5_ifc_query_hca_cap_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 reserved_1[0x10];
u8 op_mod[0x10];
u8 reserved_2[0x40];
};
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
u8 capability_struct[256][0x8];
};
struct mlx5_ifc_set_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
};
#endif /* MLX5_IFC_H */