Merge branches '4.7-rc-misc', 'hfi1-fixes', 'i40iw-rc-fixes' and 'mellanox-rc-fixes' into k.o/for-4.7-rc

This commit is contained in:
Doug Ledford 2016-06-23 12:22:33 -04:00
commit 9903fd1374
27 changed files with 161 additions and 82 deletions

View file

@ -411,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
for (ix = 0; ix < table->sz; ix++) for (ix = 0; ix < table->sz; ix++)
if (table->data_vec[ix].attr.ndev == ndev) if (table->data_vec[ix].attr.ndev == ndev)
if (!del_gid(ib_dev, port, table, ix, false)) if (!del_gid(ib_dev, port, table, ix,
!!(table->data_vec[ix].props &
GID_TABLE_ENTRY_DEFAULT)))
deleted = true; deleted = true;
write_unlock_irq(&table->rwlock); write_unlock_irq(&table->rwlock);

View file

@ -3876,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)]; rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) { if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true); true);
if (!err) { if (!err)
mc->igmp_joined = true; mc->igmp_joined = true;
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
} }
} else { } else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)

View file

@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
struct ib_srq *srq = NULL; struct ib_srq *srq = NULL;
struct ib_qp *qp; struct ib_qp *qp;
char *buf; char *buf;
struct ib_qp_init_attr attr; struct ib_qp_init_attr attr = {};
struct ib_uverbs_ex_create_qp_resp resp; struct ib_uverbs_ex_create_qp_resp resp;
int ret; int ret;

View file

@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
ah_attr->grh.dgid = sgid; ah_attr->grh.dgid = sgid;
if (!rdma_cap_eth_ah(device, port_num)) { if (!rdma_cap_eth_ah(device, port_num)) {
ret = ib_find_cached_gid_by_port(device, &dgid, if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
IB_GID_TYPE_IB, ret = ib_find_cached_gid_by_port(device, &dgid,
port_num, NULL, IB_GID_TYPE_IB,
&gid_index); port_num, NULL,
if (ret) &gid_index);
return ret; if (ret)
return ret;
} else {
gid_index = 0;
}
} }
ah_attr->grh.sgid_index = (u8) gid_index; ah_attr->grh.sgid_index = (u8) gid_index;

View file

@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *); static void dc_start(struct hfi1_devdata *);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np); unsigned int *np);
static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
/* /*
* Error interrupt table entry. This is used as input to the interrupt * Error interrupt table entry. This is used as input to the interrupt
@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
} }
reset_neighbor_info(ppd); reset_neighbor_info(ppd);
if (ppd->mgmt_allowed)
remove_full_mgmt_pkey(ppd);
/* disable the port */ /* disable the port */
clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
__func__, ppd->pkeys[2], FULL_MGMT_P_KEY); __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
ppd->pkeys[2] = FULL_MGMT_P_KEY; ppd->pkeys[2] = FULL_MGMT_P_KEY;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
hfi1_event_pkey_change(ppd->dd, ppd->port);
} }
static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{ {
ppd->pkeys[2] = 0; if (ppd->pkeys[2] != 0) {
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); ppd->pkeys[2] = 0;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
hfi1_event_pkey_change(ppd->dd, ppd->port);
}
} }
/* /*
@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
return 0; return 0;
} }
/*
* FULL_MGMT_P_KEY is cleared from the pkey table, so that the
* pkey table can be configured properly if the HFI unit is connected
* to switch port with MgmtAllowed=NO
*/
clear_full_mgmt_pkey(ppd);
return set_link_state(ppd, HLS_DN_POLL); return set_link_state(ppd, HLS_DN_POLL);
} }
@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
& SEND_LEN_CHECK1_LEN_VL15_MASK) << & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
SEND_LEN_CHECK1_LEN_VL15_SHIFT; SEND_LEN_CHECK1_LEN_VL15_SHIFT;
int i; int i, j;
u32 thres; u32 thres;
for (i = 0; i < ppd->vls_supported; i++) { for (i = 0; i < ppd->vls_supported; i++) {
@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
sc_mtu_to_threshold(dd->vld[i].sc, sc_mtu_to_threshold(dd->vld[i].sc,
dd->vld[i].mtu, dd->vld[i].mtu,
dd->rcd[0]->rcvhdrqentsize)); dd->rcd[0]->rcvhdrqentsize));
sc_set_cr_threshold(dd->vld[i].sc, thres); for (j = 0; j < INIT_SC_PER_VL; j++)
sc_set_cr_threshold(
pio_select_send_context_vl(dd, j, i),
thres);
} }
thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
sc_mtu_to_threshold(dd->vld[15].sc, sc_mtu_to_threshold(dd->vld[15].sc,

View file

@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
switch (cmd) { switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT: case HFI1_IOCTL_ASSIGN_CTXT:
if (uctxt)
return -EINVAL;
if (copy_from_user(&uinfo, if (copy_from_user(&uinfo,
(struct hfi1_user_info __user *)arg, (struct hfi1_user_info __user *)arg,
sizeof(uinfo))) sizeof(uinfo)))

View file

@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
int ret = 0, j, pidx, initfail; int ret = 0, j, pidx, initfail;
struct hfi1_devdata *dd = NULL; struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */ /* First, lock the non-writable module parameters */

View file

@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
memset(data, 0, size); memset(data, 0, size);
} }
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
{
struct ib_event event;
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
}
static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
{ {
struct ib_mad_send_buf *send_buf; struct ib_mad_send_buf *send_buf;
@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
} }
if (changed) { if (changed) {
struct ib_event event;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
hfi1_event_pkey_change(dd, port);
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
} }
return 0; return 0;
} }

View file

@ -434,4 +434,6 @@ struct sc2vlnt {
COUNTER_MASK(1, 3) | \ COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4)) COUNTER_MASK(1, 4))
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
#endif /* _HFI1_MAD_H */ #endif /* _HFI1_MAD_H */

View file

@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
/* counter is reset if occupancy count changes */ /* counter is reset if occupancy count changes */
if (reg != reg_prev) if (reg != reg_prev)
loop = 0; loop = 0;
if (loop > 500) { if (loop > 50000) {
/* timed out - bounce the link */ /* timed out - bounce the link */
dd_dev_err(dd, dd_dev_err(dd,
"%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@ -1797,6 +1797,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
pio_map_free(m); pio_map_free(m);
} }
/*
* Set credit return threshold for the kernel send context
*/
static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
{
u32 thres;
thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
50),
sc_mtu_to_threshold(dd->kernel_send_context[scontext],
dd->vld[i].mtu,
dd->rcd[0]->rcvhdrqentsize));
sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
}
/* /*
* pio_map_init - called when #vls change * pio_map_init - called when #vls change
* @dd: hfi1_devdata * @dd: hfi1_devdata
@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
if (!newmap->map[i]) if (!newmap->map[i])
goto bail; goto bail;
newmap->map[i]->mask = (1 << ilog2(sz)) - 1; newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
/* assign send contexts */ /*
* assign send contexts and
* adjust credit return threshold
*/
for (j = 0; j < sz; j++) { for (j = 0; j < sz; j++) {
if (dd->kernel_send_context[scontext]) if (dd->kernel_send_context[scontext]) {
newmap->map[i]->ksc[j] = newmap->map[i]->ksc[j] =
dd->kernel_send_context[scontext]; dd->kernel_send_context[scontext];
set_threshold(dd, scontext, i);
}
if (++scontext >= first_scontext + if (++scontext >= first_scontext +
vl_scontexts[i]) vl_scontexts[i])
/* wrap back to first send context */ /* wrap back to first send context */

View file

@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
if (ppd->qsfp_info.cache_valid) { if (ppd->qsfp_info.cache_valid) {
if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); snprintf(lenstr, sizeof(lenstr), "%dM ",
cache[QSFP_MOD_LEN_OFFS]);
power_byte = cache[QSFP_MOD_PWR_OFFS]; power_byte = cache[QSFP_MOD_PWR_OFFS];
sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",

View file

@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp) struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{ {
struct verbs_txreq *tx = ERR_PTR(-EBUSY); struct verbs_txreq *tx = ERR_PTR(-EBUSY);
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
write_seqlock(&dev->iowait_lock); write_seqlock(&dev->iowait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv; struct hfi1_qp_priv *priv;
@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
} }
out: out:
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
return tx; return tx;
} }

View file

@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp) struct rvt_qp *qp)
__must_hold(&qp->slock)
{ {
struct verbs_txreq *tx; struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;

View file

@ -113,6 +113,8 @@
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768 #define IW_CFG_FPM_QP_COUNT 32768
#define I40IW_MAX_PAGES_PER_FMR 512
#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_MTU_TO_MSS 40 #define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460 #define I40IW_DEFAULT_MSS 1460

View file

@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_qp_init_rd_atom = props->max_qp_rd_atom; props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE; props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1; props->max_map_per_fmr = 1;
props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0; return 0;
} }
@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
mutex_lock(&iwdev->pbl_mutex); mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
mutex_unlock(&iwdev->pbl_mutex); mutex_unlock(&iwdev->pbl_mutex);
if (!status) if (status)
goto err1; goto err1;
if (palloc->level != I40IW_LEVEL_1) if (palloc->level != I40IW_LEVEL_1)
@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
struct i40iw_fast_reg_stag_info info; struct i40iw_fast_reg_stag_info info;
memset(&info, 0, sizeof(info));
info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
info.access_rights |= i40iw_get_user_access(flags); info.access_rights |= i40iw_get_user_access(flags);
info.stag_key = reg_wr(ib_wr)->key & 0xff; info.stag_key = reg_wr(ib_wr)->key & 0xff;
@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.addr_type = I40IW_ADDR_TYPE_VA_BASED; info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
info.va = (void *)(uintptr_t)iwmr->ibmr.iova; info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
info.total_len = iwmr->ibmr.length; info.total_len = iwmr->ibmr.length;
info.reg_addr_pa = *(u64 *)palloc->level1.addr;
info.first_pm_pbl_index = palloc->level1.idx; info.first_pm_pbl_index = palloc->level1.idx;
info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
info.chunk_size = 1;
if (page_shift == 21) if (page_shift == 21)
info.page_size = 1; /* 2M page */ info.page_size = 1; /* 2M page */
@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
{ {
struct i40iw_cq *iwcq; struct i40iw_cq *iwcq;
struct i40iw_cq_uk *ukcq; struct i40iw_cq_uk *ukcq;
enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; unsigned long flags;
enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
iwcq = (struct i40iw_cq *)ibcq; iwcq = (struct i40iw_cq *)ibcq;
ukcq = &iwcq->sc_cq.cq_uk; ukcq = &iwcq->sc_cq.cq_uk;
if (notify_flags == IB_CQ_NEXT_COMP) if (notify_flags == IB_CQ_SOLICITED)
cq_notify = IW_CQ_COMPL_EVENT; cq_notify = IW_CQ_COMPL_SOLICITED;
spin_lock_irqsave(&iwcq->lock, flags);
ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
spin_unlock_irqrestore(&iwcq->lock, flags);
return 0; return 0;
} }

View file

@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock); spin_unlock(&tun_qp->tx_lock);
if (ret) if (ret)
goto out; goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah) if (tun_qp->tx_ring[tun_tx_ix].ah)
@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.wr.send_flags = IB_SEND_SIGNALED; wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr.wr, &bad_wr); ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
out: if (!ret)
if (ret) return 0;
ib_destroy_ah(ah); out:
spin_lock(&tun_qp->tx_lock);
tun_qp->tx_ix_tail++;
spin_unlock(&tun_qp->tx_lock);
tun_qp->tx_ring[tun_tx_ix].ah = NULL;
end:
ib_destroy_ah(ah);
return ret; return ret;
} }
@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
ret = ib_post_send(send_qp, &wr.wr, &bad_wr); ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
if (!ret)
return 0;
spin_lock(&sqp->tx_lock);
sqp->tx_ix_tail++;
spin_unlock(&sqp->tx_lock);
sqp->tx_ring[wire_tx_ix].ah = NULL;
out: out:
if (ret) ib_destroy_ah(ah);
ib_destroy_ah(ah);
return ret; return ret;
} }

View file

@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev; struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev); int is_bonded = mlx4_is_bonded(dev);
if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
return ERR_PTR(-EINVAL);
if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
(flow_attr->type != IB_FLOW_ATTR_NORMAL)) (flow_attr->type != IB_FLOW_ATTR_NORMAL))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);

View file

@ -139,7 +139,7 @@ struct mlx4_ib_mr {
u32 max_pages; u32 max_pages;
struct mlx4_mr mmr; struct mlx4_mr mmr;
struct ib_umem *umem; struct ib_umem *umem;
void *pages_alloc; size_t page_map_size;
}; };
struct mlx4_ib_mw { struct mlx4_ib_mw {

View file

@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
struct mlx4_ib_mr *mr, struct mlx4_ib_mr *mr,
int max_pages) int max_pages)
{ {
int size = max_pages * sizeof(u64);
int add_size;
int ret; int ret;
add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); /* Ensure that size is aligned to DMA cacheline
* requirements.
* max_pages is limited to MLX4_MAX_FAST_REG_PAGES
* so page_map_size will never cross PAGE_SIZE.
*/
mr->page_map_size = roundup(max_pages * sizeof(u64),
MLX4_MR_PAGES_ALIGN);
mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); /* Prevent cross page boundary allocation. */
if (!mr->pages_alloc) mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
if (!mr->pages)
return -ENOMEM; return -ENOMEM;
mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
mr->page_map = dma_map_single(device->dma_device, mr->pages, mr->page_map = dma_map_single(device->dma_device, mr->pages,
size, DMA_TO_DEVICE); mr->page_map_size, DMA_TO_DEVICE);
if (dma_mapping_error(device->dma_device, mr->page_map)) { if (dma_mapping_error(device->dma_device, mr->page_map)) {
ret = -ENOMEM; ret = -ENOMEM;
@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
} }
return 0; return 0;
err:
kfree(mr->pages_alloc);
err:
free_page((unsigned long)mr->pages);
return ret; return ret;
} }
@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
{ {
if (mr->pages) { if (mr->pages) {
struct ib_device *device = mr->ibmr.device; struct ib_device *device = mr->ibmr.device;
int size = mr->max_pages * sizeof(u64);
dma_unmap_single(device->dma_device, mr->page_map, dma_unmap_single(device->dma_device, mr->page_map,
size, DMA_TO_DEVICE); mr->page_map_size, DMA_TO_DEVICE);
kfree(mr->pages_alloc); free_page((unsigned long)mr->pages);
mr->pages = NULL; mr->pages = NULL;
} }
} }
@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr->npages = 0; mr->npages = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
sizeof(u64) * mr->max_pages, mr->page_map_size, DMA_TO_DEVICE);
DMA_TO_DEVICE);
rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->page_map, ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
sizeof(u64) * mr->max_pages, mr->page_map_size, DMA_TO_DEVICE);
DMA_TO_DEVICE);
return rc; return rc;
} }

View file

@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
sizeof (struct mlx4_wqe_raddr_seg); sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC: case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) + return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_atomic_seg) + sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg); sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI: case MLX4_IB_QPT_GSI:
@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
{ {
err = create_qp_common(to_mdev(pd->device), pd, init_attr, err = create_qp_common(to_mdev(pd->device), pd, init_attr,
udata, 0, &qp, gfp); udata, 0, &qp, gfp);
if (err) if (err) {
kfree(qp);
return ERR_PTR(err); return ERR_PTR(err);
}
qp->ibqp.qp_num = qp->mqp.qpn; qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn; qp->xrcdn = xrcdn;

View file

@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
pma_cnt_ext->port_xmit_data = pma_cnt_ext->port_xmit_data =
cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
transmitted_ib_multicast.octets) >> 2); transmitted_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_data = pma_cnt_ext->port_rcv_data =
cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
received_ib_multicast.octets) >> 2); received_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_packets = pma_cnt_ext->port_xmit_packets =

View file

@ -3332,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
return MLX5_FENCE_MODE_SMALL_AND_FENCE; return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else else
return fence; return fence;
} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
} else { return MLX5_FENCE_MODE_FENCE;
return 0;
} }
return 0;
} }
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,

View file

@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
switch (cmd.type) { switch (cmd.type) {
case QIB_CMD_ASSIGN_CTXT: case QIB_CMD_ASSIGN_CTXT:
if (rcd) {
ret = -EINVAL;
goto bail;
}
ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
if (ret) if (ret)
goto bail; goto bail;

View file

@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
/* wrap to first map page, invert bit 0 */ /* wrap to first map page, invert bit 0 */
offset = qpt->incr | ((offset & 1) ^ 1); offset = qpt->incr | ((offset & 1) ^ 1);
} }
/* there can be no bits at shift and below */ /* there can be no set bits in low-order QoS bits */
WARN_ON(offset & (rdi->dparms.qos_shift - 1)); WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
qpn = mk_qpn(qpt, map, offset); qpn = mk_qpn(qpt, map, offset);
} }
@ -576,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_ssn = 1; qp->s_ssn = 1;
qp->s_lsn = 0; qp->s_lsn = 0;
qp->s_mig_state = IB_MIG_MIGRATED; qp->s_mig_state = IB_MIG_MIGRATED;
if (qp->s_ack_queue)
memset(
qp->s_ack_queue,
0,
rvt_max_atomic(rdi) *
sizeof(*qp->s_ack_queue));
qp->r_head_ack_queue = 0; qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0; qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0; qp->s_num_rd_atomic = 0;
@ -705,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
* initialization that is needed. * initialization that is needed.
*/ */
priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
if (!priv) if (IS_ERR(priv)) {
ret = priv;
goto bail_qp; goto bail_qp;
}
qp->priv = priv; qp->priv = priv;
qp->timeout_jiffies = qp->timeout_jiffies =
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /

View file

@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
!rdi->driver_f.quiesce_qp || !rdi->driver_f.quiesce_qp ||
!rdi->driver_f.notify_error_qp || !rdi->driver_f.notify_error_qp ||
!rdi->driver_f.mtu_from_qp || !rdi->driver_f.mtu_from_qp ||
!rdi->driver_f.mtu_to_path_mtu || !rdi->driver_f.mtu_to_path_mtu)
!rdi->driver_f.shut_down_port ||
!rdi->driver_f.cap_mask_chg)
return -EINVAL; return -EINVAL;
break; break;

View file

@ -172,6 +172,7 @@ enum {
enum { enum {
MLX5_FENCE_MODE_NONE = 0 << 5, MLX5_FENCE_MODE_NONE = 0 << 5,
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
MLX5_FENCE_MODE_FENCE = 2 << 5,
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
}; };

View file

@ -203,7 +203,9 @@ struct rvt_driver_provided {
/* /*
* Allocate a private queue pair data structure for driver specific * Allocate a private queue pair data structure for driver specific
* information which is opaque to rdmavt. * information which is opaque to rdmavt. Errors are returned via
* ERR_PTR(err). The driver is free to return NULL or a valid
* pointer.
*/ */
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
gfp_t gfp); gfp_t gfp);